ngram
listlengths
0
82k
[ "\"\"\" Gathers values from the tensor `values` at locations `indices`.", "select '{device} because no device of this type is available.\"", "either. Args: values: tensor of shape (batch, spatial..., channel) indices:", "a tensor (nativer or otherwise) by a backend if it", "backend that must be implemented if the feature is supported.", "DType: return DType(complex, max(64, self.precision)) def combine_types(self, *dtypes: DType) ->", "self.stack([state.residual for state in states]) iterations = [state.iterations for state", "self.stack([self.to_float(state.x) for state in states]) residual = self.stack([state.residual for state", "len(backends) == 0: raise NoBackendFound(f\"No backend found for types {[type(v).__name__", "collections import namedtuple from contextlib import contextmanager from threading import", "= [\"\"] * batch_size f_inputs = [None] * batch_size f_b_losses", "of the tensor is known and can be read at", "backend precision but leave non-float tensors untouched, use `Backend.as_tensor()`. Args:", "to float32 unless specified otherwise. The output of math operations", "unstack(self, tensor, axis=0, keepdims=False) -> tuple: if axis < 0:", "if isinstance(feature, str) else feature.__name__ if not hasattr(Backend, feature): raise", "-> Callable: return NotImplemented def functional_gradient(self, f, wrt: tuple or", "self.prod(self.shape(array)) def batch_gather(self, tensor, batches): if isinstance(batches, int): batches =", "diverged[b], \"\") for b in range(batch_size)] trajectories = [t[:-1] +", "used for out-of-bounds points if mode='constant' (Default value = 0)", "__str__(self): return self.name def __repr__(self): return self.name def list_devices(self, device_type:", "A * x. 2nd order tensor (batch, vector) or list", "'iterations', 'function_evaluations', 'converged', 'diverged', 'message', ]) class ComputeDevice: \"\"\" A", "from a normal distribution with mean 0 and std 1.", "return self.conjugate_gradient(lin, y, x0, rtol, atol, max_iter, trj) elif method", "'constant') Returns: padded tensor or NotImplemented \"\"\" raise NotImplementedError(self) def", "'converged', 'diverged', 'message', ]) class ComputeDevice: \"\"\" A physical device", "def imag(self, x): raise NotImplementedError(self) def real(self, x): raise NotImplementedError(self)", "Convolve value with kernel. Depending on the tensor rank, the", "f_output_available = Barrier(batch_size + 1) finished = [False] * batch_size", "type resulting from operations involving the tensors as input. This", "tolerance_sq, axis=(1,)) if trajectory is not None: trajectory.append(SolveResult(method, x, residual,", "in parallel y: target result of A * x. 2nd", "backend-compatible tensor Returns: NumPy representation of the values stored in", "it as a tensor argument. Args: x: object to check", "specified coordinates. Args: grid: Tensor spatial_dims: Dimension indices that correspond", "elif method == 'CG-adaptive': return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol,", "it_counter += 1 iterations += continue_1 dx_dy = self.sum(dx *", "x is a Python number (numbers.Number instance), `convert_numbers` decides whether", "= DType(int, 32)): raise NotImplementedError(self) def zeros(self, shape, dtype: DType", "# in-place subtraction affects convergence residual_squared = self.sum(residual ** 2,", "dy, residual, iterations, function_evaluations, converged, diverged)) return trajectory if trj", "choose_backend(*values, prefer_default=False) -> Backend: \"\"\" Selects a suitable backend to", "_PRECISION[0] = floating_point_bits def get_precision() -> int: \"\"\" Gets the", "values type resulting from operations involving the tensors as input.", "only_native=False): return False return True def _is_specific(backend, values): for value", "modification. If x is a Python number (numbers.Number instance), `convert_numbers`", "def __str__(self): return self.name def __repr__(self): return self.name def list_devices(self,", "'Backend': from phi.math.backend import BACKENDS for backend in BACKENDS: if", "with zeros so that the result has the same shape", "size (batch, parameters) rtol: Relative tolerance of size (batch,) atol:", "dimension 3 or higher Returns: \"\"\" raise NotImplementedError(self) def ifft(self,", "is None and dim2 is None: return None if dim1", "error if the value of the tensor is not known", "maximum(self, a, b): raise NotImplementedError(self) def minimum(self, a, b): raise", "of available operations. To support a compute library, subclass `Backend`", "e.g. `Backend.sparse_tensor` Returns: Whether the feature is supported. \"\"\" feature", "on `f` that uses a custom gradient for backprop. Args:", "converged, diverged _, _, x, _, _, residual, iterations, function_evaluations,", "device such as `'CPU'`, `'GPU'` or `'TPU'`. \"\"\" self.memory: int", "to floating point values with precision equal to the currently", "OptimizeResult) # res.nit, res.nfev xs[b] = res.x converged[b] = res.success", "tensor: Native tensor belonging to any registered backend. backend: Target", "must be either equal for both or one for either.", "bits, [1:] from 'with' blocks def choose_backend(*values, prefer_default=False) -> Backend:", "variant described in \"Methods of Conjugate Gradients for Solving Linear", "self.supports(Backend.functional_gradient) assert len(self.staticshape(x0)) == 2 # (batch, parameters) batch_size =", "necessary but ensures batch-independence x += step_size * dx #", "zip(trajectories, last_points)] trajectory = [] for states in zip(*trajectories): x", "None threads = [] for b in range(batch_size): def b_thread(b=b):", "add(self, a, b): a, b = self.auto_cast(a, b) return a", "res.message finished[b] = True while not all_finished: f_input_available.wait() f_output_available.wait() b_thread", "self.name in backend.name: return backend raise RuntimeError(f\"Backend '{self}' is not", "symbols: dx=d, dy=q, step_size=alpha, residual_squared=delta, residual=r, y=b method = f\"Φ-Flow", "divisor def and_(self, a, b): a, b = self.auto_cast(a, b)", "to `backend`. \"\"\" backend = backend or default_backend() current_backend =", "a tensor to floating point values with precision equal to", "\"\"\" dtypes = [self.dtype(t) for t in tensors] result_type =", "(nnz, vector) \"\"\" raise NotImplementedError(self) def mean(self, value, axis=None, keepdims=False):", "a compute library, subclass `Backend` and register it by adding", "x, residual, iterations, function_evaluations, converged, diverged, \"\")) x = self.copy(x)", "operating on `Tensor` objects to delegate the actual computations. Args:", "raise NotImplementedError(self) def isfinite(self, x): raise NotImplementedError(self) def scatter(self, base_grid,", "float, complex, bool): tensors = [self.cast(t, result_type) for t in", "Returns: `Backend` or `None` \"\"\" return _DEFAULT[-1] if len(_DEFAULT) >", "0) mode: str: (Default value = 'constant') Returns: padded tensor", "= self.stack(xs) residual = self.stack(final_losses) return SolveResult(method_description, x, residual, iterations,", "\"\")) res = minimize(fun=b_fun, x0=x0[b], jac=True, method=method, tol=atol[b], options={'maxiter': max_iter[b]},", "yield None finally: _PRECISION.pop(-1) def convert(tensor, backend: Backend = None,", "If called outside a backend context, returns `None`. Returns: `Backend`", "def size(self, array): return self.prod(self.shape(array)) def batch_gather(self, tensor, batches): if", "axis >= len(tensor.shape) or axis < 0: raise ValueError(\"Illegal axis", "function_evaluations, converged, diverged, messages) def linear_solve(self, method: str, lin, y,", "x += step_size * dx if it_counter % 50 ==", "check \"\"\" raise NotImplementedError(self) def not_equal(self, x, y): return ~self.equal(x,", "precision in bits, [1:] from 'with' blocks def choose_backend(*values, prefer_default=False)", "without modification. If x is a Python number (numbers.Number instance),", "False, \"\")) res = minimize(fun=b_fun, x0=x0[b], jac=True, method=method, tol=atol[b], options={'maxiter':", "selected to perform backend computations. \"\"\" def __init__(self, backend: 'Backend',", "library. Else, intermediately converts `tensor` to a NumPy array. *Warning*:", "Usage: `with precision(p):` This overrides the global setting, see `set_global_precision()`.", "representation of the value. Tensors are typically available when the", "= False trajectories = [[] for _ in range(batch_size)] if", "normal distribution with mean 0 and std 1. \"\"\" raise", "self.to_float(self.expand_dims(not_finished_1, -1)) return trajectory if trj else SolveResult(method, x, residual,", "diverged, \"\")] if trj else None continue_ = ~converged &", "not backend.is_tensor(value, only_native=False): return False return True def _is_specific(backend, values):", "this point. If true, `numpy(tensor)` must return a valid NumPy", "ceil(self, x): raise NotImplementedError(self) def floor(self, x): raise NotImplementedError(self) def", "2, -1, keepdims=True) dx = residual - self.divide_no_nan(self.sum(residual * dy,", "and `use_dlpack=True`, uses zero-copy conversion using the DLPack library. Else,", "= max([len(t) for t in trajectories]) last_points = [SolveResult(method_description, xs[b],", "cores or GPU multiprocessors. -1 for n/a. \"\"\" self.description: str", "raise NotImplementedError(self) def meshgrid(self, *coordinates): raise NotImplementedError(self) def linspace(self, start,", "[] def b_fun(x: numpy.ndarray): function_evaluations[b] += 1 f_inputs[b] = self.as_tensor(x,", "bool): raise NotImplementedError() def transpose(self, tensor, axes): raise NotImplementedError() def", "DType: return combine_types(*dtypes, fp_precision=self.precision) def auto_cast(self, *tensors) -> list: \"\"\"", "step_size *= self.expand_dims(self.to_float(not_finished_1), -1) # this is not really necessary", "<NAME> # https://nvlpubs.nist.gov/nistpubs/jres/049/jresv49n6p409_A1b.pdf method = f\"Φ-Flow CG-adaptive ({self.name})\" y =", "[backend for backend in BACKENDS if _is_applicable(backend, values)] if len(backends)", "than `value`, these dimensions are added to `value` as outer", "for _ in range(batch_size)] if trj else None threads =", "y) def greater_than(self, x, y): x, y = self.auto_cast(x, y)", "def xor(self, a, b): a, b = self.auto_cast(a, b) return", "current_backend.to_dlpack(tensor) return backend.from_dlpack(capsule) else: nparray = current_backend.numpy(tensor) return backend.as_tensor(nparray) #", "`Backend.linear_solve()`. \"\"\" # Based on \"An Introduction to the Conjugate", "in values]}; registered backends are {BACKENDS}\") # --- Native tensors?", "def log10(self, x): raise NotImplementedError(self) def dtype(self, array) -> DType:", "will be of the corresponding data type, float16, float32 or", "`multiples` has more dimensions than `value`, these dimensions are added", "along batch, must have the same nonzero locations. * linear", "axis=-1, keepdims=True) step_size = self.divide_no_nan(self.sum(dx * residual, axis=-1, keepdims=True), dx_dy)", "x): \"\"\" Computes the n-dimensional FFT along all but the", "of linear equations A · x = y. This method", "state) iterations[b] += 1 loss = min(recent_b_losses) recent_b_losses.clear() final_losses[b] =", "random_uniform(self, shape): \"\"\" Float tensor of selected precision containing random", "native and thus, will be converted by this method. Args:", "This operation does not support backpropagation. Args: loop: Loop function,", "`NoBackendFound` error, else returns `None`. Returns: the selected `Backend` \"\"\"", "tensor of selected precision containing random values sampled from a", "self.staticshape(x0)[0] fg = self.functional_gradient(f, [0], get_output=True) method_description = f\"SciPy {method}", "1) # 0=success messages[b] = res.message finished[b] = True while", "iterations, function_evaluations, converged, diverged, \"\")] if trj else None finished", "Python numbers. *Note:* There may be objects that are considered", "jit_compile(self, f: Callable) -> Callable: return NotImplemented def functional_gradient(self, f,", "bytes). -1 for n/a. \"\"\" self.processor_count: int = processor_count \"\"\"", "_diverged): continue_1 = self.to_int32(continue_) it_counter += 1 iterations += continue_1", "floating point values with precision equal to the currently set", "-> tuple: raise NotImplementedError(self) def record_gradients(self, xs: tuple or list,", "set globally using `set_global_precision()` or locally using `with precision(p):`. Any", "type clashes like int32 vs int64. (Default value = True)", "Backend has priority --- if _is_applicable(_DEFAULT[-1], values) and (prefer_default or", "\"\"\" Sets the floating point precision for the local context.", "only spatial dimensions Returns: non-zero multi-indices as tensor of shape", "else None) for i in range(self.ndims(value))) return value[slices] def sum(self,", "if axis >= len(tensor.shape) or axis < 0: raise ValueError(\"Illegal", "<NAME> and <NAME> # https://nvlpubs.nist.gov/nistpubs/jres/049/jresv49n6p409_A1b.pdf method = f\"Φ-Flow CG-adaptive ({self.name})\"", "affects convergence residual_squared_old = residual_squared residual_squared = self.sum(residual ** 2,", "is returned without modification. This method raises an error if", "(batch,) max_iter: Maximum number of iterations of size (batch,) trj:", "~self.equal(x, y) def greater_than(self, x, y): x, y = self.auto_cast(x,", "--- for backend in backends: if _is_specific(backend, values): return backend", "backend set by the inner-most surrounding `with backend:` block. If", "RuntimeError(f\"Backend '{self}' is not visible.\") @property def complex_type(self) -> DType:", "\"\"\" Standard conjugate gradient algorithm. Signature matches to `Backend.linear_solve()`. \"\"\"", "a, b = self.auto_cast(a, b) return a & b def", "32)): raise NotImplementedError(self) def zeros(self, shape, dtype: DType = None):", "trajectory = [SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, \"\")]", "must be a matrix but got shape {lin_shape}\" return self.matmul(lin,", "in a graph. Use `is_available(tensor)` to check if the value", "Returns a NumPy representation of the given tensor. If `tensor`", "*coordinates): raise NotImplementedError(self) def linspace(self, start, stop, number): raise NotImplementedError(self)", "matrix) values: param shape: shape: Returns: \"\"\" raise NotImplementedError(self) def", "[state.function_evaluations for state in states] converged = [state.converged for state", "-> Backend: \"\"\" The default backend is preferred by `choose_backend()`.", "PyTorch: [`torch.cuda.get_device_properties`](https://pytorch.org/docs/stable/cuda.html#torch.cuda.get_device_properties) * TensorFlow: `tensorflow.python.client.device_lib.list_local_devices` * Jax: [`jax.devices`](https://jax.readthedocs.io/en/latest/jax.html#jax.devices) Args: device_type:", "supported, returns NotImplemented. Args: value: tensor pad_width: 2D tensor specifying", "x): return self.reshape(x, (-1,)) def std(self, x, axis=None, keepdims=False): raise", "(numbers.Number instance), `convert_numbers` decides whether to convert it unless the", "= ~converged & ~diverged & (iterations < max_iter) return continue_,", "no backend can handle the given values. If True, raises", "Convert a Tensor to the native format of `backend`. If", "*= self.expand_dims(self.to_float(not_finished_1), -1) # this is not really necessary but", "See `ComputeDevice.device_type`. Returns: `list` of all currently available devices. \"\"\"", "while ~self.all(finished): it_counter += 1; iterations += not_finished_1 dy =", "convert(tensor, backend: Backend = None, use_dlpack=True): \"\"\" Convert a Tensor", "to record and return the optimization trajectory as a `List[SolveResult]`.", "if len(_DEFAULT) > 1 else None def set_global_default_backend(backend: Backend): \"\"\"", "if i in axes else None) for i in range(self.ndims(value)))", "1, channels or 1) mode: One of ('update', 'add') Returns:", "dy # in-place subtraction affects convergence residual_squared = self.sum(residual **", "lin, vector): if callable(lin): return lin(vector) elif isinstance(lin, (tuple, list)):", "linear(self, lin, vector): if callable(lin): return lin(vector) elif isinstance(lin, (tuple,", "y=None): raise NotImplementedError(self) def nonzero(self, values): \"\"\" Args: values: Tensor", "if the value of the tensor is known and can", "information about the device such as driver version. \"\"\" self.ref", "impl_fun = getattr(self.__class__, feature) return impl_fun is not backend_fun def", "is not known at this point, e.g. because it represents", "tuple or list): \"\"\" Multiply-sum-reduce a_axes of a with b_axes", "-1)) return trajectory if trj else SolveResult(method, x, residual, iterations,", "return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter, trj) else: raise", "comprehensive list of available operations. To support a compute library,", "use for coordinates outside the grid. One of `('undefined', 'zeros',", "x, dx, dy, residual, iterations, function_evaluations, _converged, _diverged): continue_1 =", "values from the tensor `values` at locations `indices`. The first", "the profiler. Usage: choose_backend(key).call(custom_function, *args) \"\"\" return f(*args) def block_until_ready(self,", "is understood by this backend, this method returns the number", "completion. \"\"\" raise NotImplementedError(self) def abs(self, x): raise NotImplementedError(self) def", "Depending on the tensor rank, the convolution is either 1D", "spatial..., channel) indices: int tensor of shape (batch, any..., multi_index)", "upon loop completion. \"\"\" raise NotImplementedError(self) def abs(self, x): raise", "+= 1 else: residual = residual - step_size * dy", "< max_iter) def loop(continue_, it_counter, x, dx, dy, residual, iterations,", "self.to_int32(continue_) it_counter += 1 iterations += continue_1 dx_dy = self.sum(dx", "blocks _PRECISION = [32] # [0] = global precision in", "precision in bits. The precision can be set globally using", "Returns: Convolution result as tensor of shape (batch_size, out_channel, spatial...)", "= self.sum(residual ** 2, -1, keepdims=True) diverged = self.any(~self.isfinite(x), axis=(1,))", "trajectories[b].append(SolveResult(method_description, x, loss, iterations[b], function_evaluations[b], False, False, \"\")) res =", "last dimensions. Args: x: tensor of dimension 3 or higher", "a, axis=0, number=1): raise NotImplementedError(self) def shape(self, tensor): raise NotImplementedError(self)", "str, f, x0, atol, max_iter, trj: bool): from scipy.optimize import", "method: Which algorithm to use. One of `('auto', 'CG', 'CG-adaptive')`.", "def cos(self, x): raise NotImplementedError(self) def tan(self, x): raise NotImplementedError(self)", "of DYNAMIC_BACKEND which affects all registered backends. If `floating_point_bits` is", "variables upon loop completion. \"\"\" raise NotImplementedError(self) def abs(self, x):", "or 1) mode: One of ('update', 'add') Returns: Copy of", "\"\"\" raise NotImplementedError(self) def coordinates(self, tensor): \"\"\" Returns the coordinates", "it is not a struct (e.g. tuple, list) and all", "\"\"\" Computes the n-dimensional inverse FFT along all but the", "for types {[type(v).__name__ for v in values]}; registered backends are", "b, b_axes: tuple or list): \"\"\" Multiply-sum-reduce a_axes of a", "\"\"\" return f(*args) def block_until_ready(self, values): pass def jit_compile(self, f:", "stack(self, values, axis=0): raise NotImplementedError(self) def concat(self, values, axis): raise", "GPU multiprocessors. -1 for n/a. \"\"\" self.description: str = description", "considered a native tensor by a backend if no internal", "tensors def __str__(self): return self.name def __repr__(self): return self.name def", "in eager mode. Args: tensor: backend-compatible tensor Returns: bool \"\"\"", "max(64, self.precision)) def combine_types(self, *dtypes: DType) -> DType: return combine_types(*dtypes,", "0: raise ValueError(\"Illegal axis value\") result = [] for slice_idx", "if axis < 0: axis += len(tensor.shape) if axis >=", "feature): raise ValueError(f\"Not a valid feature: '{feature}'\") backend_fun = getattr(Backend,", "object is considered a tensor (nativer or otherwise) by a", "def log(self, x): \"\"\" Natural logarithm \"\"\" raise NotImplementedError(self) def", "[state.converged for state in states] diverged = [state.diverged for state", "because it represents a node in a graph. Use `is_available(tensor)`", "= [0] * batch_size xs = [None] * batch_size final_losses", "dx, dx_dy) dy = self.linear(lin, dx); function_evaluations += continue_1 diverged", "regular grid at the specified coordinates. Args: grid: Tensor spatial_dims:", "else: component = tensor[tuple([slice_idx if d == axis else slice(None)", "values)] if len(backends) == 0: raise NoBackendFound(f\"No backend found for", "step_size * dx # if it_counter % 50 == 0:", "y=0. Args: x: y: Returns: \"\"\" raise NotImplementedError(self) def where(self,", "also convert floating point values to this precision, even if", "inner-most surrounding `with backend:` block. If called outside a backend", "axis=-1, keepdims=True), dx_dy) step_size *= self.expand_dims(self.to_float(continue_1), -1) # this is", "def log2(self, x): raise NotImplementedError(self) def log10(self, x): raise NotImplementedError(self)", "really necessary but ensures batch-independence x += step_size * dx", "== 'auto': return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter, trj)", "prefers_channels_last(self) -> bool: raise NotImplementedError() @property def precision(self) -> int:", "mask: 1D mask tensor axis: Axis index >= 0 \"\"\"", "{mem} | {pro} | {descr}\" class Backend: def __init__(self, name:", "recent_b_losses.clear() final_losses[b] = loss if trajectories is not None: trajectories[b].append(SolveResult(method_description,", "y def add(self, a, b): a, b = self.auto_cast(a, b)", "raise NotImplementedError(f\"Method '{method}' not supported for linear solve.\") def conjugate_gradient(self,", "NumPy array. Args: tensor: backend-compatible tensor Returns: NumPy representation of", "+= 1 loss = min(recent_b_losses) recent_b_losses.clear() final_losses[b] = loss if", "Pain\" by <NAME> # symbols: dx=d, dy=q, step_size=alpha, residual_squared=delta, residual=r,", "is used by most math functions operating on `Tensor` objects", "assert isinstance(backend, Backend) _DEFAULT[0] = backend def set_global_precision(floating_point_bits: int): \"\"\"", "dx) iterations = self.zeros([batch_size], DType(int, 32)) function_evaluations = self.ones([batch_size], DType(int,", "dy, residual, iterations, function_evaluations, converged, diverged _, _, x, _,", "def random_normal(self, shape): \"\"\" Float tensor of selected precision containing", "with similar signature and return values as `f`. However, the", "= self.all(residual_squared <= tolerance_sq, axis=(1,)) if trajectory is not None:", "by this backend but are not native and thus, will", "is None or dim1 == 1: return dim2 if dim2", "a, b): raise NotImplementedError(self) def clip(self, x, minimum, maximum): raise", "float64. Operations may also convert floating point values to this", "device that can be selected to perform backend computations. \"\"\"", "iterations[b], function_evaluations[b], converged[b], diverged[b], \"\") for b in range(batch_size)] trajectories", "self.sum(dx * dy, axis=-1, keepdims=True) step_size = self.divide_no_nan(self.sum(dx * residual,", "object is considered a native tensor by a backend if", "dimensions (pair for matrix) values: param shape: shape: Returns: \"\"\"", "min(recent_b_losses) recent_b_losses.clear() final_losses[b] = loss if trajectories is not None:", "target result of A * x. 2nd order tensor (batch,", "which scatter values are inserted at indices. Tensor of shape", "getattr(Backend, feature) impl_fun = getattr(self.__class__, feature) return impl_fun is not", "axis else slice(None) for d in range(len(tensor.shape))])] result.append(component) return tuple(result)", "numerator, denominator = self.auto_cast(numerator, denominator) return numerator / denominator def", "int): \"\"\" Sets the floating point precision of DYNAMIC_BACKEND which", "a Tensor to the native format of `backend`. If the", "axis=0, keepdims=False) -> tuple: if axis < 0: axis +=", "y) return x > y def greater_or_equal(self, x, y): x,", "NumPy array, it is returned without modification. This method raises", "32)) function_evaluations = self.ones([batch_size], DType(int, 32)) residual_squared = rsq0 =", "return False # Other low-level helper functions def combined_dim(dim1, dim2,", "library, subclass `Backend` and register it by adding it to", "a backend if it is not a struct (e.g. tuple,", "be objects that are considered tensors by this backend but", "the current default backend, see `default_backend()`. Returns: Tensor belonging to", "= feature if isinstance(feature, str) else feature.__name__ if not hasattr(Backend,", "axis += len(tensor.shape) if axis >= len(tensor.shape) or axis <", "it can handle handle the values, see `default_backend()`. raise_error: Determines", "default `Backend` \"\"\" return _DEFAULT[-1] def context_backend() -> Backend or", "function_evaluations += 1 else: residual = residual - step_size *", "tol=atol[b], options={'maxiter': max_iter[b]}, callback=callback) assert isinstance(res, OptimizeResult) # res.nit, res.nfev", "function_evaluations[b] += 1 f_inputs[b] = self.as_tensor(x, convert_external=True) f_input_available.wait() f_output_available.wait() recent_b_losses.append(f_b_losses[b])", "Args: tensor: Native tensor belonging to any registered backend. backend:", "= getattr(self.__class__, feature) return impl_fun is not backend_fun def prefers_channels_last(self)", "64 for double \"\"\" _PRECISION.append(floating_point_bits) try: yield None finally: _PRECISION.pop(-1)", "extrapolation: Values to use for coordinates outside the grid. One", "if method == 'auto': return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol,", "default to float32 unless specified otherwise. The output of math", "tolerance_sq = self.maximum(rtol ** 2 * self.sum(y ** 2, -1),", "override this method to prevent unnecessary casting. Args: *tensors: tensors", "precision containing random values sampled from a normal distribution with", "with only spatial dimensions Returns: non-zero multi-indices as tensor of", "axes. mode: constant', 'boundary', 'periodic', 'symmetric', 'reflect' constant_values: used for", "using `set_global_default_backend()` and locally using `with backend:`. Returns: current default", "leave non-float tensors untouched, use `Backend.as_tensor()`. Args: x: tensor of", "precision(p):`. Any Backend method may convert floating point values to", "diverged, messages) def linear_solve(self, method: str, lin, y, x0, rtol,", "tensor \"\"\" raise NotImplementedError() def to_dlpack(self, tensor): raise NotImplementedError() def", "Any Backend method may convert floating point values to this", "and backend.supports(Backend.from_dlpack): capsule = current_backend.to_dlpack(tensor) return backend.from_dlpack(capsule) else: nparray =", "lin_i in lin: lin_shape = self.staticshape(lin_i) assert len(lin_shape) == 2", "`Backend` form a comprehensive list of available operations. To support", "equations A · x = y. This method need not", "has priority --- if _is_applicable(_DEFAULT[-1], values) and (prefer_default or _is_specific(_DEFAULT[-1],", "order tensor (batch, vector) or list of vectors. x0: Initial", "\"\"\" self.memory: int = memory \"\"\" Maximum memory of the", "this method. Args: x: tensor-like, e.g. list, tuple, Python number,", "tensors? --- for backend in backends: if _is_specific(backend, values): return", "\"\"\" Determins the appropriate values type resulting from operations involving", "or emulate them. The methods of `Backend` form a comprehensive", "multi_index is values.rank - 2. Returns: Gathered values as tensor", "the input had a different precision. If `floating_point_bits` is None,", "return a | b def xor(self, a, b): a, b", "\"\"\" self.ref = ref \"\"\" (Optional) Reference to the internal", "see `set_global_precision()`. Args: floating_point_bits: 16 for half, 32 for single,", "x0 = self.copy(self.to_float(x0), only_mutable=True) batch_size = self.staticshape(y)[0] tolerance_sq = self.maximum(rtol", "residual = y - self.linear(lin, x); function_evaluations += 1 else:", "matrix but got shape {lin_shape}\" return self.matmul(lin, vector) def gradients(self,", "while True: f_input_available.wait() if all(finished): all_finished = True f_output_available.wait() break", "Tensor of shape (batch_size or 1, update_count, index_vector) values: Values", "or 1, out_channel, in_channel, spatial...) zero_padding: If True, pads the", "None continue_ = ~converged & ~diverged & (iterations < max_iter)", "of a with b_axes of b. \"\"\" raise NotImplementedError(self) def", "'auto': return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter, trj) elif", "Returns: current default `Backend` \"\"\" return _DEFAULT[-1] def context_backend() ->", "adding it to the list. \"\"\" _DEFAULT = [] #", "this precision, even if the input had a different precision.", "self.expand_dims(self.to_float(continue_1), -1) # this is not really necessary but ensures", "NotImplementedError(self) def conv(self, value, kernel, zero_padding=True): \"\"\" Convolve value with", "@property def as_registered(self) -> 'Backend': from phi.math.backend import BACKENDS for", "= [32] # [0] = global precision in bits, [1:]", "One of `('auto', 'CG', 'CG-adaptive')`. lin: Linear operation. One of", "mask tensor axis: Axis index >= 0 \"\"\" raise NotImplementedError(self)", "return a `tuple` with entries equal to `values` in shape", "axis value\") result = [] for slice_idx in range(tensor.shape[axis]): if", "+ b def sub(self, a, b): a, b = self.auto_cast(a,", "== 1: return dim1 assert dim1 == dim2, f\"Incompatible {type_str}", "conjugate gradient algorithm. Signature matches to `Backend.linear_solve()`. \"\"\" # Based", "this backend, this method returns the number as-is. This can", "or unbound Backend method, e.g. `Backend.sparse_tensor` Returns: Whether the feature", "== 0: # Not traceable since Python bool # residual", "mul(self, a, b): a, b = self.auto_cast(a, b) return a", "| b def xor(self, a, b): a, b = self.auto_cast(a,", "a, b): raise NotImplementedError(self) def minimum(self, a, b): raise NotImplementedError(self)", "and_(self, a, b): a, b = self.auto_cast(a, b) return a", "\"\"\" Computes the n-dimensional FFT along all but the first", "Returns: \"\"\" raise NotImplementedError(self) def ifft(self, k): \"\"\" Computes the", "and locally using `with backend:`. Returns: current default `Backend` \"\"\"", "b): a, b = self.auto_cast(a, b) return a - b", "dim2, type_str: str = 'batch'): if dim1 is None and", "floating point values to this precision, even if the input", "of size (batch, parameters) rtol: Relative tolerance of size (batch,)", "({self.name})\" y = self.to_float(y) x0 = self.copy(self.to_float(x0), only_mutable=True) batch_size =", "Computes x/y but returns 0 if y=0. Args: x: y:", "trj else None continue_ = ~converged & ~diverged & (iterations", "[self.cast(t, result_type) for t in tensors] return tensors def __str__(self):", "pad_width, mode: str = 'constant', constant_values=0): \"\"\" Pad a tensor", "values.rank - 2. Returns: Gathered values as tensor of shape", "lin(vector) elif isinstance(lin, (tuple, list)): for lin_i in lin: lin_shape", "batches): if isinstance(batches, int): batches = [batches] return tensor[batches, ...]", "= self.auto_cast(a, b) return a * b def div(self, numerator,", "def __init__(self, backend: 'Backend', name: str, device_type: str, memory: int,", "DType(int, 32)): raise NotImplementedError(self) def zeros(self, shape, dtype: DType =", "def matmul(self, A, b): raise NotImplementedError(self) def einsum(self, equation, *tensors):", "residual, iterations, function_evaluations, converged, diverged _, _, x, _, _,", "else None finished = converged | diverged | (iterations >=", "= [backend for backend in BACKENDS if _is_applicable(backend, values)] if", "use_dlpack and current_backend.supports(Backend.to_dlpack) and backend.supports(Backend.from_dlpack): capsule = current_backend.to_dlpack(tensor) return backend.from_dlpack(capsule)", "in the tensor \"\"\" raise NotImplementedError() def to_dlpack(self, tensor): raise", "def custom_gradient(self, f: Callable, gradient: Callable) -> Callable: \"\"\" Creates", "backend assuming it can handle handle the values, see `default_backend()`.", "Not traceable since Python bool # residual = y -", "method returns the number as-is. This can help prevent type", "value, pad_width, mode: str = 'constant', constant_values=0): \"\"\" Pad a", "shape): \"\"\" Optional features. Args: indices: tuple/list matching the dimensions", "is a Python number that is understood by this backend,", "x): \"\"\" Converts a tensor to floating point values with", "/ denominator def pow(self, base, exp): base, exp = self.auto_cast(base,", "1) f_output_available = Barrier(batch_size + 1) finished = [False] *", "slice_idx + 1) if d == axis else slice(None) for", "backend computations. \"\"\" def __init__(self, backend: 'Backend', name: str, device_type:", "of shape (batch_size, out_channel, spatial...) \"\"\" raise NotImplementedError(self) def expand_dims(self,", "handle Python numbers. *Note:* There may be objects that are", "numbers or others that are also supported as tensors (Default", "cast(self, x, dtype: DType): raise NotImplementedError(self) def to_float(self, x): \"\"\"", "= ref \"\"\" (Optional) Reference to the internal device representation.", "blocks def choose_backend(*values, prefer_default=False) -> Backend: \"\"\" Selects a suitable", "and (prefer_default or _is_specific(_DEFAULT[-1], values)): return _DEFAULT[-1] # --- Filter", "list of available operations. To support a compute library, subclass", "raise NotImplementedError(self) def max(self, x, axis=None, keepdims=False): raise NotImplementedError(self) def", "residual, iterations, function_evaluations, converged, diverged =\\ self.while_loop(loop, (continue_, 0, x,", "self.any(residual_squared / rsq0 > 100, axis=(1,)) & (iterations >= 8)", "isinstance(device, str): devices = self.list_devices(device) assert len(devices) >= 1, f\"{self.name}:", "all_finished = False trajectories = [[] for _ in range(batch_size)]", "NotImplementedError(f\"Method '{method}' not supported for linear solve.\") def conjugate_gradient(self, lin,", "are added to `value` as outer dimensions. Args: value: tensor", "not in (0, 1) # 0=success messages[b] = res.message finished[b]", "a, b = self.auto_cast(a, b) return a ^ b def", "to the native format of `backend`. If the target backend", "keepdims=True) step_size = self.divide_no_nan(residual_squared, dx_dy) step_size *= self.expand_dims(self.to_float(not_finished_1), -1) #", "def floordiv(self, a, b): a, b = self.auto_cast(a, b) return", "iterations = self.zeros([batch_size], DType(int, 32)) function_evaluations = self.ones([batch_size], DType(int, 32))", "precision, even if the input had a different precision. Returns:", "registered backend. backend: Target backend. If `None`, uses the current", "x, axis=None, keepdims=False): raise NotImplementedError(self) def min(self, x, axis=None, keepdims=False):", "\"\"\" Convert a Tensor to the native format of `backend`.", "both backends support *DLPack* and `use_dlpack=True`, uses zero-copy conversion using", "Tensors are typically available when the backend operates in eager", "given tensor. If `tensor` is already a NumPy array, it", "\"\"\" raise NotImplementedError(self) def flatten(self, x): return self.reshape(x, (-1,)) def", "* dy # in-place subtraction affects convergence residual_squared_old = residual_squared", "dx_dy) step_size *= self.expand_dims(self.to_float(continue_1), -1) # this is not really", "method: str, f, x0, atol, max_iter, trj: bool): from scipy.optimize", "axis=None): raise NotImplementedError(self) def divide_no_nan(self, x, y): \"\"\" Computes x/y", "dy=q, step_size=alpha, residual_squared=delta, residual=r, y=b method = f\"Φ-Flow CG ({self.name})\"", "for either. Args: values: tensor of shape (batch, spatial..., channel)", "If `x` is mutable and of the correct floating type,", "NotImplementedError(self) def where(self, condition, x=None, y=None): raise NotImplementedError(self) def nonzero(self,", "`trj`. \"\"\" if method == 'auto': return self.conjugate_gradient_adaptive(lin, y, x0,", "& b def or_(self, a, b): a, b = self.auto_cast(a,", "@property def precision(self) -> int: \"\"\" Short for math.backend.get_precision() \"\"\"", "coordinates: `tuple` of tensor holding the coordinate vectors, i.e. (row,", "same nonzero locations. * linear function A(x), must be called", "[None] * batch_size converged = [False] * batch_size diverged =", "def clip(self, x, minimum, maximum): raise NotImplementedError(self) def sqrt(self, x):", "(pair for matrix) values: param shape: shape: Returns: \"\"\" raise", "edges of each axis in the form [[axis 0 lower,", "tuple(result) def equal(self, x, y): \"\"\" Element-wise equality check \"\"\"", "= [False] * batch_size diverged = [False] * batch_size messages", "the corresponding data type, float16, float32 or float64. Operations may", "is not really necessary but ensures batch-independence x += step_size", "function_evaluations, converged, diverged, \"\")) x = self.copy(x) iterations = self.copy(iterations)", "return tensors def __str__(self): return self.name def __repr__(self): return self.name", "component = tensor[tuple([slice(slice_idx, slice_idx + 1) if d == axis", "`f(*args)` and returns the result. This method may be used", "argument. Args: x: object to check only_native: If True, only", "helper functions def combined_dim(dim1, dim2, type_str: str = 'batch'): if", "Tensor of shape (batch_size, spatial..., channels) indices: Tensor of shape", "Whether the feature is supported. \"\"\" feature = feature if", "* dx, dx_dy) dy = self.linear(lin, dx); function_evaluations += continue_1", "not_finished_1 = self.to_int32(~finished) # ; active = self.to_float(self.expand_dims(not_finished_1, -1)) while", "[0], get_output=True) method_description = f\"SciPy {method} with {self.name}\" iterations =", "f_b_losses = None f_b_losses_np = None f_grad_np = None f_input_available", "exp def mod(self, dividend, divisor): dividend, divisor = self.auto_cast(dividend, divisor)", "1) for t, last_point in zip(trajectories, last_points)] trajectory = []", "[False] * batch_size diverged = [False] * batch_size messages =", "= [self.dtype(t) for t in tensors] result_type = self.combine_types(*dtypes) if", "available devices. \"\"\" raise NotImplementedError() def get_default_device(self) -> ComputeDevice: return", "f_b_losses_np = self.numpy(f_b_losses).astype(numpy.float64) f_grad_np = self.numpy(f_grad).astype(numpy.float64) f_output_available.wait() for b_thread in", "from 'with' blocks _PRECISION = [32] # [0] = global", "of `x` as float tensor \"\"\" return self.cast(x, self.float_type) def", "default backend, see `default_backend()`. Returns: Tensor belonging to `backend`. \"\"\"", "64)) def to_complex(self, x): return self.cast(x, DType(complex, max(64, min(self.precision *", "values): return backend return backends[0] class NoBackendFound(Exception): \"\"\" Thrown by", "varying matrices along batch, must have the same nonzero locations.", "method to prevent unnecessary casting. Args: *tensors: tensors to cast", "for out-of-bounds points if mode='constant' (Default value = 0) mode:", "Converts a tensor to floating point values with precision equal", "tensors as input. This method is called by the default", "locations `indices`. The first dimension of `values` and `indices` is", "xs[b] = res.x converged[b] = res.success diverged[b] = res.status not", "-> SolveResult or List[SolveResult]: \"\"\" Solve the system of linear", "trj) else: raise NotImplementedError(f\"Method '{method}' not supported for linear solve.\")", "Based on \"An Introduction to the Conjugate Gradient Method Without", "if trajectory is not None: trajectory.append(SolveResult(method, x, residual, iterations, function_evaluations,", "str, device_type: str, memory: int, processor_count: int, description: str, ref=None):", "Systems\" by <NAME> and <NAME> # https://nvlpubs.nist.gov/nistpubs/jres/049/jresv49n6p409_A1b.pdf method = f\"Φ-Flow", "or axis < 0: raise ValueError(\"Illegal axis value\") result =", "parallel y: target result of A * x. 2nd order", "slices = tuple(slice(None, None, -1 if i in axes else", "2D tensor specifying the number of values padded to the", "The precision can be set globally using `set_global_precision()` or locally", "determining the common data type Returns: tensors cast to a", "tensor-like, e.g. list, tuple, Python number, tensor convert_external: if False", "pro = f\"{self.processor_count} processors\" if self.processor_count > 0 else \"processors:", "supports(self, feature: str or Callable) -> bool: \"\"\" Tests if", "shape (batch_size, out_channel, spatial...) \"\"\" raise NotImplementedError(self) def expand_dims(self, a,", "a NumPy array. *Warning*: This operation breaks the automatic differentiation", ">= max_iter); not_finished_1 = self.to_int32(~finished) # ; active = self.to_float(self.expand_dims(not_finished_1,", "return NotImplemented def ndims(self, tensor): return len(self.staticshape(tensor)) def size(self, array):", "cast and to consider when determining the common data type", "tensor specifying the number of values padded to the edges", "a tensor argument. Args: x: object to check only_native: If", "or List[SolveResult]: \"\"\" Conjugate gradient algorithm with adaptive step size.", "`tensor`. If both backends support *DLPack* and `use_dlpack=True`, uses zero-copy", "will be converted by this method. Args: x: tensor-like, e.g.", "* dx diverged = self.any(residual_squared / rsq0 > 100, axis=(1,))", "function_evaluations, converged, diverged, messages)) return trajectory else: x = self.stack(xs)", "(rank=4) or 3D (rank=5). Higher dimensions may not be supported.", "and `constant_values`. If the mode is not supported, returns NotImplemented.", "of `('auto', 'CG', 'CG-adaptive')`. lin: Linear operation. One of *", "res.status not in (0, 1) # 0=success messages[b] = res.message", "Values to use for coordinates outside the grid. One of", "else slice(None) for d in range(len(tensor.shape))])] result.append(component) return tuple(result) def", "len(descr) > 30: descr = descr[:28] + \"...\" return f\"'{self.name}'", "self.all(residual_squared <= tolerance_sq, axis=(1,)) if trajectory is not None: trajectory.append(SolveResult(method,", "NotImplementedError() def as_tensor(self, x, convert_external=True): \"\"\" Converts a tensor-like object", "half, 32 for single, 64 for double \"\"\" _PRECISION.append(floating_point_bits) try:", "dimensions. Args: x: tensor of dimension 3 or higher Returns:", "shape (nnz, vector) \"\"\" raise NotImplementedError(self) def mean(self, value, axis=None,", "= y - self.linear(lin, x) dy = self.linear(lin, dx) iterations", "functions def combined_dim(dim1, dim2, type_str: str = 'batch'): if dim1", "dy # in-place subtraction affects convergence residual_squared_old = residual_squared residual_squared", "set_global_default_backend(backend: Backend): \"\"\" Sets the given backend as default. This", "operation. One of * sparse/dense matrix valid for all instances", "supported as tensors (Default value = False) Returns: bool: whether", "residual = residual - step_size * dy # in-place subtraction", "conversion using the DLPack library. Else, intermediately converts `tensor` to", "Returns: result: `SolveResult` or `List[SolveResult]`, depending on `trj`. \"\"\" if", "on the tensor rank, the convolution is either 1D (rank=3),", "x: tensor with any number of dimensions mask: 1D mask", "= self.maximum(rtol ** 2 * self.sum(y ** 2, -1), atol", "combine_types(*dtypes, fp_precision=self.precision) def auto_cast(self, *tensors) -> list: \"\"\" Determins the", "dtype(self, array) -> DType: raise NotImplementedError(self) def tile(self, value, multiples):", "= rsq0 = self.sum(residual ** 2, -1, keepdims=True) diverged =", "y): x, y = self.auto_cast(x, y) return x > y", "is not backend_fun def prefers_channels_last(self) -> bool: raise NotImplementedError() @property", "vectors. x0: Initial guess of size (batch, parameters) rtol: Relative", "NotImplementedError(self) def scatter(self, base_grid, indices, values, mode: str): \"\"\" Depending", "even if the input had a different precision. If `floating_point_bits`", "= res.x converged[b] = res.success diverged[b] = res.status not in", "64 for double \"\"\" return _PRECISION[-1] @contextmanager def precision(floating_point_bits: int):", "name=None): \"\"\" Calls `f(*args)` and returns the result. This method", "supported for linear solve.\") def conjugate_gradient(self, lin, y, x0, rtol,", "def __init__(self, name: str, default_device: ComputeDevice): \"\"\" Backends delegate low-level", "with different objects. \"\"\" def __repr__(self): mem = f\"{(self.memory /", "return True def _is_specific(backend, values): for value in values: if", "with mean 0 and std 1. \"\"\" raise NotImplementedError(self) def", "function_evaluations = self.ones([batch_size], DType(int, 32)) residual_squared = rsq0 = self.sum(residual", "backend.as_tensor(nparray) # Backend choice utility functions def _is_applicable(backend, values): for", "while any(values[0]): values = loop(*values) return values ``` This operation", "not None: trajectories[b].append(SolveResult(method_description, x0[b], f_b_losses[b], 0, 1, False, False, \"\"))", "len(lin_shape) == 2 return self.stack([self.matmul(m, v) for m, v in", "backprop. Will be called as `gradient(*d_out)` to compute the gradient", "x0 dx = residual = y - self.linear(lin, x) it_counter", "SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, \"\") def conjugate_gradient_adaptive(self,", "0: # Not traceable since Python bool # residual =", "{pro} | {descr}\" class Backend: def __init__(self, name: str, default_device:", "name(self) -> str: return self._name def supports(self, feature: str or", "x0[b], f_b_losses[b], 0, 1, False, False, \"\")) return f_b_losses_np[b], f_grad_np[b]", "device: ComputeDevice or str): if isinstance(device, str): devices = self.list_devices(device)", "returns the number as-is. This can help prevent type clashes", "= [None] * batch_size converged = [False] * batch_size diverged", "base_grid, indices, values, mode: str): \"\"\" Depending on `mode`, performs", "-> bool: \"\"\" Tests if this backend supports the given", "the given values. If True, raises a `NoBackendFound` error, else", "to_int64(self, x): return self.cast(x, DType(int, 64)) def to_complex(self, x): return", "dimensions may not be supported. Args: value: tensor of shape", "format of `backend`. If the target backend can operate natively", "same shape as `value`. Returns: Convolution result as tensor of", "or `List[SolveResult]`, depending on `trj`. \"\"\" if method == 'auto':", "default_device def __enter__(self): _DEFAULT.append(self) def __exit__(self, exc_type, exc_val, exc_tb): _DEFAULT.pop(-1)", "in backend.name: return backend raise RuntimeError(f\"Backend '{self}' is not visible.\")", "that uses a custom gradient for backprop. Args: f: Forward", "to the internal device representation. \"\"\" self.backend: 'Backend' = backend", "def copy(self, tensor, only_mutable=False): raise NotImplementedError() def call(self, f: Callable,", "If `tensor` is already a NumPy array, it is returned", "** 2, -1, keepdims=True) diverged = self.any(~self.isfinite(x), axis=(1,)) converged =", "current default backend, see `default_backend()`. Returns: Tensor belonging to `backend`.", "values. If True, raises a `NoBackendFound` error, else returns `None`.", "tensor. Args: tensor: Sparse tensor Returns: coordinates: `tuple` of tensor", "rank, the convolution is either 1D (rank=3), 2D (rank=4) or", "zip(lin, self.unstack(vector))]) else: lin_shape = self.staticshape(lin) assert len(lin_shape) == 2,", "decides whether to convert it unless the backend cannot handle", "extrapolation='constant'): \"\"\" Interpolates a regular grid at the specified coordinates.", "= loop(*values) return values ``` This operation does not support", "This setting can be overridden using `with backend:`. See `default_backend()`,", "# 0=success messages[b] = res.message finished[b] = True while not", "at indices. Tensor of shape (batch_size, spatial..., channels) indices: Tensor", "lies at position 0, the last at values.shape[i]-1. extrapolation: Values", "if _is_specific(backend, values): return backend return backends[0] class NoBackendFound(Exception): \"\"\"", "loss if trajectories is not None: trajectories[b].append(SolveResult(method_description, x, loss, iterations[b],", "`backend`. If the target backend can operate natively on `tensor`,", "x): raise NotImplementedError(self) def scatter(self, base_grid, indices, values, mode: str):", "native format of `backend`. If the target backend can operate", "default backend can be set globally using `set_global_default_backend()` and locally", "value = 0) mode: str: (Default value = 'constant') Returns:", "Tensor with only spatial dimensions Returns: non-zero multi-indices as tensor", "features: * `sparse_tensor` * `gradients Args: feature: `str` or unbound", "def sin(self, x): raise NotImplementedError(self) def cos(self, x): raise NotImplementedError(self)", "* batch_size xs = [None] * batch_size final_losses = [None]", "a, b): a, b = self.auto_cast(a, b) return a -", "the floating point precision for the local context. Usage: `with", "\"\"\" self.processor_count: int = processor_count \"\"\" Number of CPU cores", "raise NotImplementedError() def random_uniform(self, shape): \"\"\" Float tensor of selected", "a, b): a, b = self.auto_cast(a, b) return a *", "_, f_b_losses, f_grad = fg(self.stack(f_inputs)) f_b_losses_np = self.numpy(f_b_losses).astype(numpy.float64) f_grad_np =", "return self.name def list_devices(self, device_type: str or None = None)", "and return the optimization trajectory as a `List[SolveResult]`. Returns: result:", "def context_backend() -> Backend or None: \"\"\" Returns the backend", "in values: if not backend.is_tensor(value, only_native=False): return False return True", "the operation. Args: method: Which algorithm to use. One of", "[SolveResult(method_description, xs[b], final_losses[b], iterations[b], function_evaluations[b], converged[b], diverged[b], \"\") for b", "backend if no internal conversion is required by backend methods.", "description: str, ref=None): self.name: str = name \"\"\" Name of", "it_counter % 50 == 0: residual = y - self.linear(lin,", "registered backends are {BACKENDS}\") # --- Native tensors? --- for", "value of the tensor is not known at this point,", "1; iterations += not_finished_1 dy = self.linear(lin, dx); function_evaluations +=", "out-of-bounds points if mode='constant' (Default value = 0) mode: str:", "type_str: str = 'batch'): if dim1 is None and dim2", "converged, diverged, \"\")) x = self.copy(x) iterations = self.copy(iterations) continue_", "the given values. \"\"\" def __init__(self, msg): Exception.__init__(self, msg) def", "native tensor representation of this backend. If x is a", "tensor representation of `x` \"\"\" raise NotImplementedError() def is_available(self, tensor)", "def transpose(self, tensor, axes): raise NotImplementedError() def random_uniform(self, shape): \"\"\"", "profiler. Usage: choose_backend(key).call(custom_function, *args) \"\"\" return f(*args) def block_until_ready(self, values):", "NotImplementedError(self) def ceil(self, x): raise NotImplementedError(self) def floor(self, x): raise", "= name self._default_device = default_device def __enter__(self): _DEFAULT.append(self) def __exit__(self,", "raise NotImplementedError(self) def expand_dims(self, a, axis=0, number=1): raise NotImplementedError(self) def", "x): raise NotImplementedError(self) def cos(self, x): raise NotImplementedError(self) def tan(self,", "t in trajectories]) last_points = [SolveResult(method_description, xs[b], final_losses[b], iterations[b], function_evaluations[b],", "def linear_solve(self, method: str, lin, y, x0, rtol, atol, max_iter,", "number (numbers.Number instance), `convert_numbers` decides whether to convert it unless", "def sparse_tensor(self, indices, values, shape): \"\"\" Optional features. Args: indices:", "tensor (batch, vector) or list of vectors. x0: Initial guess", "rtol, atol, max_iter, trj) elif method == 'CG-adaptive': return self.conjugate_gradient_adaptive(lin,", "the variant described in \"Methods of Conjugate Gradients for Solving", "return lin(vector) elif isinstance(lin, (tuple, list)): for lin_i in lin:", "Tensor to the native format of `backend`. If the target", "\"\"\" raise NotImplementedError(self) def minimize(self, method: str, f, x0, atol,", "Agonizing Pain\" by <NAME> # symbols: dx=d, dy=q, step_size=alpha, residual_squared=delta,", "dy, axis=-1, keepdims=True) step_size = self.divide_no_nan(residual_squared, dx_dy) step_size *= self.expand_dims(self.to_float(not_finished_1),", "\"\"\" Convolve value with kernel. Depending on the tensor rank,", "b def or_(self, a, b): a, b = self.auto_cast(a, b)", "backend-compatible tensor Returns: bool \"\"\" raise NotImplementedError() def numpy(self, tensor)", "representation of the given tensor. If `tensor` is already a", "specified by `mode` and `constant_values`. If the mode is not", "return value[slices] def sum(self, value, axis=None, keepdims=False): raise NotImplementedError(self) def", "version. \"\"\" self.ref = ref \"\"\" (Optional) Reference to the", "axis=None, keepdims=False): raise NotImplementedError(self) def maximum(self, a, b): raise NotImplementedError(self)", "values, indices): \"\"\" Gathers values from the tensor `values` at", "dtype: DType = None): raise NotImplementedError(self) def ones_like(self, tensor): raise", "raise NotImplementedError(self) def abs(self, x): raise NotImplementedError(self) def sign(self, x):", "batch and component axes. mode: constant', 'boundary', 'periodic', 'symmetric', 'reflect'", "NotImplementedError(self) def flatten(self, x): return self.reshape(x, (-1,)) def std(self, x,", "raise NotImplementedError(self) def minimum(self, a, b): raise NotImplementedError(self) def clip(self,", "raise NotImplementedError() def copy(self, tensor, only_mutable=False): raise NotImplementedError() def call(self,", "(iterations < max_iter) return continue_, it_counter, x, dx, dy, residual,", "setting can be overridden using `with backend:`. See `default_backend()`, `choose_backend()`.", "[ 'method', 'x', 'residual', 'iterations', 'function_evaluations', 'converged', 'diverged', 'message', ])", "# symbols: dx=d, dy=q, step_size=alpha, residual_squared=delta, residual=r, y=b method =", "or 1, update_count or 1, channels or 1) mode: One", "+ [last_point] * (max_trajectory_length - len(t) + 1) for t,", "types {[type(v).__name__ for v in values]}; registered backends are {BACKENDS}\")", "function_evaluations += continue_1 diverged = self.any(residual_squared / rsq0 > 100,", "tensor pad_width: 2D tensor specifying the number of values padded", "Returns: Copy of base_grid with values at `indices` updated by", "elif isinstance(lin, (tuple, list)): for lin_i in lin: lin_shape =", "same device with different objects. \"\"\" def __repr__(self): mem =", "of times given by multiples. If `multiples` has more dimensions", "keepdims=True), dx_dy) step_size *= self.expand_dims(self.to_float(continue_1), -1) # this is not", "to `BACKENDS`. Args: name: Human-readable string default_device: `ComputeDevice` being used", "def tensordot(self, a, a_axes: tuple or list, b, b_axes: tuple", "+= continue_1 diverged = self.any(residual_squared / rsq0 > 100, axis=(1,))", "return self.stack([self.matmul(m, v) for m, v in zip(lin, self.unstack(vector))]) else:", "by this backend \"\"\" raise NotImplementedError() def as_tensor(self, x, convert_external=True):", "that is understood by this backend, this method returns the", "of shape (batch, spatial..., channel) indices: int tensor of shape", "- self.divide_no_nan(self.sum(residual * dy, axis=-1, keepdims=True) * dx, dx_dy) dy", "batch_gather(self, tensor, batches): if isinstance(batches, int): batches = [batches] return", "data type. values: Initial values of loop variables. Returns: Loop", "for state in states] diverged = [state.diverged for state in", "data type Returns: tensors cast to a common data type", "variable(self, value): return NotImplemented def ndims(self, tensor): return len(self.staticshape(tensor)) def", "exp = self.auto_cast(base, exp) return base ** exp def mod(self,", "raise NotImplementedError() def is_tensor(self, x, only_native=False): \"\"\" An object is", "for t in tensors] result_type = self.combine_types(*dtypes) if result_type.kind in", "prefer_default: if True, selects the default backend assuming it can", "Args: *tensors: tensors to cast and to consider when determining", "= self.to_int32(continue_) it_counter += 1 iterations += continue_1 dx_dy =", "\"\"\" raise NotImplementedError(self) def random_normal(self, shape): \"\"\" Float tensor of", "of shape (batch, any..., channel) \"\"\" raise NotImplementedError(self) def flatten(self,", "NotImplementedError(self) def tensordot(self, a, a_axes: tuple or list, b, b_axes:", "raise NotImplementedError(self) def shape(self, tensor): raise NotImplementedError(self) def staticshape(self, tensor):", "= [None] * batch_size f_b_losses = None f_b_losses_np = None", "at locations `indices`. The first dimension of `values` and `indices`", "if final_losses[b] is None: # first evaluation final_losses[b] = f_b_losses[b]", "vector) or list of vectors. x0: Initial guess of size", "else: raise NotImplementedError(f\"Method '{method}' not supported for linear solve.\") def", "* b def div(self, numerator, denominator): numerator, denominator = self.auto_cast(numerator,", "equal for both or one for either. Args: values: tensor", "precision(floating_point_bits: int): \"\"\" Sets the floating point precision for the", "from_dlpack(self, capsule): raise NotImplementedError() def copy(self, tensor, only_mutable=False): raise NotImplementedError()", "or `'TPU'`. \"\"\" self.memory: int = memory \"\"\" Maximum memory", "to_dlpack(self, tensor): raise NotImplementedError() def from_dlpack(self, capsule): raise NotImplementedError() def", "trj else SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, \"\")", "One of `('undefined', 'zeros', 'boundary', 'periodic', 'symmetric', 'reflect')`. Returns: sampled", "self.divide_no_nan(residual_squared, dx_dy) step_size *= self.expand_dims(self.to_float(not_finished_1), -1) # this is not", "DType: raise NotImplementedError(self) def tile(self, value, multiples): \"\"\" Repeats the", "2, f\"A must be a matrix but got shape {lin_shape}\"", "Initial values of loop variables. Returns: Loop variables upon loop", "`use_dlpack=True`, uses zero-copy conversion using the DLPack library. Else, intermediately", "minimize(self, method: str, f, x0, atol, max_iter, trj: bool): from", "return self._name def supports(self, feature: str or Callable) -> bool:", "not supported for linear solve.\") def conjugate_gradient(self, lin, y, x0,", "but returns 0 if y=0. Args: x: y: Returns: \"\"\"", "e.g. `'GPU'` or `'CPU'`. See `ComputeDevice.device_type`. Returns: `list` of all", "raise NotImplementedError(self) def exp(self, x): raise NotImplementedError(self) def conv(self, value,", "to use. One of `('auto', 'CG', 'CG-adaptive')`. lin: Linear operation.", "* PyTorch: [`torch.cuda.get_device_properties`](https://pytorch.org/docs/stable/cuda.html#torch.cuda.get_device_properties) * TensorFlow: `tensorflow.python.client.device_lib.list_local_devices` * Jax: [`jax.devices`](https://jax.readthedocs.io/en/latest/jax.html#jax.devices) Args:", "typically available when the backend operates in eager mode. Args:", "and `indices` is the batch dimension which must be either", "can be overridden using `with backend:`. See `default_backend()`, `choose_backend()`. Args:", "= [False] * batch_size messages = [\"\"] * batch_size f_inputs", "None) for i in range(self.ndims(value))) return value[slices] def sum(self, value,", "x, loss, iterations[b], function_evaluations[b], False, False, \"\")) res = minimize(fun=b_fun,", "of vectors. x0: Initial guess of size (batch, parameters) rtol:", "+= not_finished_1 dy = self.linear(lin, dx); function_evaluations += not_finished_1 dx_dy", "for slice_idx in range(tensor.shape[axis]): if keepdims: component = tensor[tuple([slice(slice_idx, slice_idx", "compute devices this backend can use. Implementations: * NumPy: [`os.cpu_count`](https://docs.python.org/3/library/os.html#os.cpu_count)", "the form [[axis 0 lower, axis 0 upper], ...] including", "to the backend precision but leave non-float tensors untouched, use", "or NotImplemented \"\"\" raise NotImplementedError(self) def reshape(self, value, shape): raise", "= Thread(target=b_thread) threads.append(b_thread) b_thread.start() while True: f_input_available.wait() if all(finished): all_finished", "ComputeDevice: \"\"\" A physical device that can be selected to", "\"\"\" def __repr__(self): mem = f\"{(self.memory / 1024 ** 2)}", "has more dimensions than `value`, these dimensions are added to", "dimensions mask: 1D mask tensor axis: Axis index >= 0", "random values in the range [0, 1) \"\"\" raise NotImplementedError(self)", "values: if backend.is_tensor(value, only_native=True): return True return False # Other", "performs scatter_update or scatter_add. Args: base_grid: Tensor into which scatter", "16 for half, 32 for single, 64 for double \"\"\"", "diverged, \"\")] if trj else None finished = converged |", "Introduction to the Conjugate Gradient Method Without the Agonizing Pain\"", "algorithm with adaptive step size. Signature matches to `Backend.linear_solve()`. \"\"\"", "True, only accepts true native tensor representations, not Python numbers", "raise NotImplementedError(self) def concat(self, values, axis): raise NotImplementedError(self) def pad(self,", "def shape(self, tensor): raise NotImplementedError(self) def staticshape(self, tensor): raise NotImplementedError(self)", "iterations, function_evaluations, converged, diverged, \"\") def linear(self, lin, vector): if", "e.g. list, tuple, Python number, tensor convert_external: if False and", "[] for b in range(batch_size): def b_thread(b=b): recent_b_losses = []", "NotImplementedError(self) def fft(self, x): \"\"\" Computes the n-dimensional FFT along", "# --- Default Backend has priority --- if _is_applicable(_DEFAULT[-1], values)", "if the value can be represented as a NumPy array.", "for the local context. Usage: `with precision(p):` This overrides the", "If `multiples` has more dimensions than `value`, these dimensions are", "by `choose_backend` if no backend can handle the given values.", "in_channel, spatial...) kernel: tensor of shape (batch_size or 1, out_channel,", "converged[b] = res.success diverged[b] = res.status not in (0, 1)", "globally using `set_global_precision()` or locally using `with precision(p):`. Any Backend", "i.e. (row, col) for matrices. indices: Tensor holding the corresponding", "def supports(self, feature: str or Callable) -> bool: \"\"\" Tests", "for matrices. indices: Tensor holding the corresponding values \"\"\" raise", "def max(self, x, axis=None, keepdims=False): raise NotImplementedError(self) def min(self, x,", "threading import Barrier from typing import List, Callable import numpy", "implemented if the feature is supported. Possible features: * `sparse_tensor`", "values sampled from a normal distribution with mean 0 and", "of selected precision containing random values sampled from a normal", "def einsum(self, equation, *tensors): raise NotImplementedError(self) def while_loop(self, loop: Callable,", "[] for states in zip(*trajectories): x = self.stack([self.to_float(state.x) for state", "DYNAMIC_BACKEND which affects all registered backends. If `floating_point_bits` is an", "\"memory: n/a\" pro = f\"{self.processor_count} processors\" if self.processor_count > 0", "dy, axis=-1, keepdims=True) * dx, dx_dy) dy = self.linear(lin, dx);", "dx, dy, residual, iterations, function_evaluations, converged, diverged)) return trajectory if", "of the device that can be allocated (in bytes). -1", "--- Native tensors? --- for backend in backends: if _is_specific(backend,", "not supported, returns NotImplemented. Args: value: tensor pad_width: 2D tensor", "batch dimension which must be either equal for both or", "type. values: Initial values of loop variables. Returns: Loop variables", "[batches] return tensor[batches, ...] def unstack(self, tensor, axis=0, keepdims=False) ->", "raise ValueError(f\"Not a valid feature: '{feature}'\") backend_fun = getattr(Backend, feature)", "by this backend, this method returns the number as-is. This", "similar signature and return values as `f`. However, the returned", "def concat(self, values, axis): raise NotImplementedError(self) def pad(self, value, pad_width,", "def gradients(self, y, xs: tuple or list, grad_y) -> tuple:", "x, residual, iterations, function_evaluations, converged, diverged, \"\")] if trj else", "if trj else SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged,", "in range(len(tensor.shape))])] result.append(component) return tuple(result) def equal(self, x, y): \"\"\"", "x: y: Returns: \"\"\" raise NotImplementedError(self) def where(self, condition, x=None,", "base, exp): base, exp = self.auto_cast(base, exp) return base **", "method is called by the default implementations of basic operators.", "descr = self.description.replace('\\n', ' ') if len(descr) > 30: descr", "tensor of shape (nnz, vector) \"\"\" raise NotImplementedError(self) def mean(self,", "i lies at position 0, the last at values.shape[i]-1. extrapolation:", "half, 32 for single, 64 for double \"\"\" return _PRECISION[-1]", "1: return dim1 assert dim1 == dim2, f\"Incompatible {type_str} dimensions:", "self.precision)) def combine_types(self, *dtypes: DType) -> DType: return combine_types(*dtypes, fp_precision=self.precision)", "def to_complex(self, x): return self.cast(x, DType(complex, max(64, min(self.precision * 2,", "\"\"\" raise NotImplementedError(self) def imag(self, x): raise NotImplementedError(self) def real(self,", "converged, diverged, \"\")) x = self.copy(x) iterations = self.copy(iterations) finished", "tensor to floating point values with precision equal to the", "= None, use_dlpack=True): \"\"\" Convert a Tensor to the native", "callback(x, *args): # L-BFGS-B only passes x but the documentation", "shape): \"\"\" Float tensor of selected precision containing random values", "return the optimization trajectory as a `List[SolveResult]`. Returns: result: `SolveResult`", "raise NotImplementedError(self) def coordinates(self, tensor): \"\"\" Returns the coordinates and", "def ifft(self, k): \"\"\" Computes the n-dimensional inverse FFT along", "def sub(self, a, b): a, b = self.auto_cast(a, b) return", "most math functions operating on `Tensor` objects to delegate the", "mode is not supported, returns NotImplemented. Args: value: tensor pad_width:", "the values stored in the tensor \"\"\" raise NotImplementedError() def", "is a native tensor of this backend, it is returned", "i in range(self.ndims(value))) return value[slices] def sum(self, value, axis=None, keepdims=False):", "default \"\"\" assert isinstance(backend, Backend) _DEFAULT[0] = backend def set_global_precision(floating_point_bits:", "value): raise NotImplementedError(self) def grid_sample(self, grid, spatial_dims: tuple, coordinates, extrapolation='constant'):", "allocated (in bytes). -1 for n/a. \"\"\" self.processor_count: int =", "raise NotImplementedError(self) def linspace(self, start, stop, number): raise NotImplementedError(self) def", "\"\"\" if method == 'auto': return self.conjugate_gradient_adaptive(lin, y, x0, rtol,", "+= 1; iterations += not_finished_1 dy = self.linear(lin, dx); function_evaluations", "resulting from operations involving the tensors as input. This method", "converged, diverged, messages) def linear_solve(self, method: str, lin, y, x0,", "of * sparse/dense matrix valid for all instances * tuple/list", "# this is not really necessary but ensures batch-independence x", "choose_backend(tensor, prefer_default=False) if backend.is_tensor(tensor, True) or backend is current_backend: return", "8) converged = self.all(residual_squared <= tolerance_sq, axis=(1,)) if trajectory is", "adaptive step size. Signature matches to `Backend.linear_solve()`. \"\"\" # Based", "indices, values, mode: str): \"\"\" Depending on `mode`, performs scatter_update", "gradient of `f`. Returns: Function with similar signature and return", "~diverged & (iterations < max_iter) return continue_, it_counter, x, dx,", "correspond to coordinate vectors coordinates: Tensor of floating grid indices.", "List[SolveResult]: \"\"\" Conjugate gradient algorithm with adaptive step size. Signature", "coordinates, extrapolation='constant'): \"\"\" Interpolates a regular grid at the specified", "the same precision as its inputs. Args: floating_point_bits: one of", "different precision. If `floating_point_bits` is None, new tensors will default", "value: tensor of shape (batch_size, in_channel, spatial...) kernel: tensor of", "`Backend` and register it by adding it to `BACKENDS`. Args:", "may be objects that are considered tensors by this backend", "staticshape(self, tensor): raise NotImplementedError(self) def cast(self, x, dtype: DType): raise", "atol, max_iter, trj: bool): from scipy.optimize import OptimizeResult, minimize from", "= fg(self.stack(f_inputs)) f_b_losses_np = self.numpy(f_b_losses).astype(numpy.float64) f_grad_np = self.numpy(f_grad).astype(numpy.float64) f_output_available.wait() for", "value = True) Returns: tensor representation of `x` \"\"\" raise", "be represented as a NumPy array. Args: tensor: backend-compatible tensor", "optimization trajectory as a `List[SolveResult]`. Returns: result: `SolveResult` or `List[SolveResult]`,", "considered a tensor by this backend \"\"\" raise NotImplementedError() def", "raise NotImplementedError(self) def matmul(self, A, b): raise NotImplementedError(self) def einsum(self,", "self.staticshape(y)[0] tolerance_sq = self.maximum(rtol ** 2 * self.sum(y ** 2,", "> 0 else \"processors: n/a\" descr = self.description.replace('\\n', ' ')", "= [state.diverged for state in states] trajectory.append(SolveResult(method_description, x, residual, iterations,", "lin_shape = self.staticshape(lin_i) assert len(lin_shape) == 2 return self.stack([self.matmul(m, v)", "& ~diverged & (iterations < max_iter) def loop(continue_, it_counter, x,", "using `set_global_precision()` or locally using `with precision(p):`. Any Backend method", "= self.divide_no_nan(residual_squared, dx_dy) step_size *= self.expand_dims(self.to_float(not_finished_1), -1) # this is", "a, b): a, b = self.auto_cast(a, b) return a |", "in values: if backend.is_tensor(value, only_native=True): return True return False #", "to `Backend.linear_solve()`. \"\"\" # Based on \"An Introduction to the", "{[type(v).__name__ for v in values]}; registered backends are {BACKENDS}\") #", "(row, col) for matrices. indices: Tensor holding the corresponding values", "method may be used to register internal calls with the", "= y - self.linear(lin, x); function_evaluations += 1 else: residual", "msg) def default_backend() -> Backend: \"\"\" The default backend is", "Python bool # residual = y - self.linear(lin, x); function_evaluations", "max(self, x, axis=None, keepdims=False): raise NotImplementedError(self) def min(self, x, axis=None,", "batch_size f_b_losses = None f_b_losses_np = None f_grad_np = None", "; active = self.to_float(self.expand_dims(not_finished_1, -1)) while ~self.all(finished): it_counter += 1;", "be implemented if the feature is supported. Possible features: *", "as tensor of shape (batch, any..., channel) \"\"\" raise NotImplementedError(self)", "this is not really necessary but ensures batch-independence x +=", "`ComputeDevice.device_type`. Returns: `list` of all currently available devices. \"\"\" raise", "by <NAME> and <NAME> # https://nvlpubs.nist.gov/nistpubs/jres/049/jresv49n6p409_A1b.pdf method = f\"Φ-Flow CG-adaptive", "_DEFAULT.append(self) def __exit__(self, exc_type, exc_val, exc_tb): _DEFAULT.pop(-1) @property def name(self)", "tensor of selected precision containing random values in the range", "keepdims=False): raise NotImplementedError(self) def fft(self, x): \"\"\" Computes the n-dimensional", "self.to_int32(~finished) # ; active = self.to_float(self.expand_dims(not_finished_1, -1)) return trajectory if", "tuple: if axis < 0: axis += len(tensor.shape) if axis", "if mode='constant' (Default value = 0) mode: str: (Default value", "& (iterations < max_iter) return continue_, it_counter, x, dx, dy,", "must return a `tuple` with entries equal to `values` in", "result = [] for slice_idx in range(tensor.shape[axis]): if keepdims: component", "Args: value: tensor multiples: tuple or list of integers Returns:", "self.processor_count > 0 else \"processors: n/a\" descr = self.description.replace('\\n', '", "DType): raise NotImplementedError(self) def to_float(self, x): \"\"\" Converts a tensor", "- self.linear(lin, x); function_evaluations += 1 # else: residual =", "axis: Axis index >= 0 \"\"\" raise NotImplementedError(self) def isfinite(self,", "value in values: if backend.is_tensor(value, only_native=True): return True return False", "tensor `values` at locations `indices`. The first dimension of `values`", "This overrides the global setting, see `set_global_precision()`. Args: floating_point_bits: 16", "the tensor is not known at this point, e.g. because", "last_points)] trajectory = [] for states in zip(*trajectories): x =", "converged, diverged, messages)) return trajectory else: x = self.stack(xs) residual", "feature = feature if isinstance(feature, str) else feature.__name__ if not", "x. 2nd order tensor (batch, vector) or list of vectors.", "NotImplementedError() def is_available(self, tensor) -> bool: \"\"\" Tests if the", "of size (batch,) atol: Absolute tolerance of size (batch,) max_iter:", "vectors coordinates: Tensor of floating grid indices. The last dimension", "axis): raise NotImplementedError(self) def pad(self, value, pad_width, mode: str =", "NotImplemented def variable(self, value): return NotImplemented def ndims(self, tensor): return", "self.auto_cast(a, b) return a + b def sub(self, a, b):", "*values: prefer_default: if True, selects the default backend assuming it", "the floating point precision of DYNAMIC_BACKEND which affects all registered", "True def _is_specific(backend, values): for value in values: if backend.is_tensor(value,", "= 'constant', constant_values=0): \"\"\" Pad a tensor with values as", "tensor): raise NotImplementedError(self) def staticshape(self, tensor): raise NotImplementedError(self) def cast(self,", "Returns: padded tensor or NotImplemented \"\"\" raise NotImplementedError(self) def reshape(self,", "Backend or None: \"\"\" Returns the backend set by the", "precision, even if the input had a different precision. If", "backend supports the given feature. Features correspond to a method", "mode: str: (Default value = 'constant') Returns: padded tensor or", "if dim1 is None and dim2 is None: return None", "dim1 assert dim1 == dim2, f\"Incompatible {type_str} dimensions: x0 {dim1},", "number): raise NotImplementedError(self) def tensordot(self, a, a_axes: tuple or list,", "for t in tensors] return tensors def __str__(self): return self.name", "iterations, function_evaluations, converged, diverged, messages) def linear_solve(self, method: str, lin,", "# residual = y - self.linear(lin, x); function_evaluations += 1", "copy(self, tensor, only_mutable=False): raise NotImplementedError() def call(self, f: Callable, *args,", "can be set globally using `set_global_default_backend()` and locally using `with", "see `default_backend()`. Returns: Tensor belonging to `backend`. \"\"\" backend =", "or None = None) -> List[ComputeDevice]: \"\"\" Fetches information about", "the same device with different objects. \"\"\" def __repr__(self): mem", "and to consider when determining the common data type Returns:", "`value`. Returns: Convolution result as tensor of shape (batch_size, out_channel,", "Determines the behavior of this function if no backend can", "(rank=5). Higher dimensions may not be supported. Args: value: tensor", "overrides the global setting, see `set_global_precision()`. Args: floating_point_bits: 16 for", "subclass `Backend` and register it by adding it to `BACKENDS`.", "grid_sample(self, grid, spatial_dims: tuple, coordinates, extrapolation='constant'): \"\"\" Interpolates a regular", "shape: Returns: \"\"\" raise NotImplementedError(self) def coordinates(self, tensor): \"\"\" Returns", "raise NotImplementedError(self) def ifft(self, k): \"\"\" Computes the n-dimensional inverse", "`default_backend()`. raise_error: Determines the behavior of this function if no", "in bits, [1:] from 'with' blocks def choose_backend(*values, prefer_default=False) ->", "return dim2 if dim2 is None or dim2 == 1:", "on `Tensor` objects to delegate the actual computations. Args: *values:", "str = 'batch'): if dim1 is None and dim2 is", "internal conversion is required by backend methods. An object is", "int64. (Default value = True) Returns: tensor representation of `x`", "if the value of the tensor is not known at", "tensor multiples: tuple or list of integers Returns: tile tensor", "finished = [False] * batch_size all_finished = False trajectories =", "shape (batch_size or 1, update_count, index_vector) values: Values to scatter", "`List[SolveResult]`. Returns: result: `SolveResult` or `List[SolveResult]`, depending on `trj`. \"\"\"", "\"\"\" Selects a suitable backend to handle the given values.", "if this backend supports the given feature. Features correspond to", "list, persistent=False): raise NotImplementedError(self) def stop_gradient(self, value): raise NotImplementedError(self) def", "def float_type(self) -> DType: return DType(float, self.precision) @property def as_registered(self)", "max_iter, trj) elif method == 'CG-adaptive': return self.conjugate_gradient_adaptive(lin, y, x0,", "self.sum(residual ** 2, -1, keepdims=True) dx = residual - self.divide_no_nan(self.sum(residual", "0: raise NoBackendFound(f\"No backend found for types {[type(v).__name__ for v", "Returns: Whether the feature is supported. \"\"\" feature = feature", "= [self.cast(t, result_type) for t in tensors] return tensors def", "of this backend. If x is a native tensor of", "belonging to any registered backend. backend: Target backend. If `None`,", "if len(descr) > 30: descr = descr[:28] + \"...\" return", "str, default_device: ComputeDevice): \"\"\" Backends delegate low-level operations to a", "raise NotImplementedError(self) def flatten(self, x): return self.reshape(x, (-1,)) def std(self,", "= tuple(slice(None, None, -1 if i in axes else None)", "trj else None finished = converged | diverged | (iterations", "b def div(self, numerator, denominator): numerator, denominator = self.auto_cast(numerator, denominator)", "*Warning*: This operation breaks the automatic differentiation chain. Args: tensor:", "Signature matches to `Backend.linear_solve()`. \"\"\" # Based on \"An Introduction", "or list, persistent=False): raise NotImplementedError(self) def stop_gradient(self, value): raise NotImplementedError(self)", "must match `spatial_dims`. The first grid point of dimension i", "128)))) def batched_gather_nd(self, values, indices): \"\"\" Gathers values from the", "dimension must match `spatial_dims`. The first grid point of dimension", "\"\"\" Multiply-sum-reduce a_axes of a with b_axes of b. \"\"\"", "slice_idx in range(tensor.shape[axis]): if keepdims: component = tensor[tuple([slice(slice_idx, slice_idx +", "where the size of multi_index is values.rank - 2. Returns:", "`backend`. \"\"\" backend = backend or default_backend() current_backend = choose_backend(tensor,", "features. Args: indices: tuple/list matching the dimensions (pair for matrix)", "block_until_ready(self, values): pass def jit_compile(self, f: Callable) -> Callable: return", "the tensor `values` at locations `indices`. The first dimension of", "import numpy from ._dtype import DType, combine_types SolveResult = namedtuple('SolveResult',", "this backend. If x is a native tensor of this", "a native tensor of this backend, it is returned without", "or list): \"\"\" Multiply-sum-reduce a_axes of a with b_axes of", "/ rsq0 > 100, axis=(1,)) & (iterations >= 8) converged", "'reflect')`. Returns: sampled values with linear interpolation \"\"\" return NotImplemented", "[last_point] * (max_trajectory_length - len(t) + 1) for t, last_point", "result: `SolveResult` or `List[SolveResult]`, depending on `trj`. \"\"\" if method", "xs: tuple or list, grad_y) -> tuple: raise NotImplementedError(self) def", "dividend, divisor = self.auto_cast(dividend, divisor) return dividend % divisor def", "a valid feature: '{feature}'\") backend_fun = getattr(Backend, feature) impl_fun =", "tensors to the backend precision but leave non-float tensors untouched,", "= Barrier(batch_size + 1) f_output_available = Barrier(batch_size + 1) finished", "len(tensor.shape) if axis >= len(tensor.shape) or axis < 0: raise", "(0, 1) # 0=success messages[b] = res.message finished[b] = True", "the compute device. CPUs are typically called `'CPU'`. \"\"\" self.device_type:", "operate natively on `tensor`, returns `tensor`. If both backends support", "raise NotImplementedError(self) def tensordot(self, a, a_axes: tuple or list, b,", "= None): raise NotImplementedError(self) def zeros_like(self, tensor): raise NotImplementedError(self) def", "Other low-level helper functions def combined_dim(dim1, dim2, type_str: str =", "Callable) -> bool: \"\"\" Tests if this backend supports the", "Python number that is understood by this backend, this method", "x, axis=None, keepdims=False): raise NotImplementedError(self) def maximum(self, a, b): raise", "x) it_counter = 0 iterations = self.zeros([batch_size], DType(int, 32)) function_evaluations", "the common data type Returns: tensors cast to a common", "return trajectory if trj else SolveResult(method, x, residual, iterations, function_evaluations,", "or Callable) -> bool: \"\"\" Tests if this backend supports", "NotImplementedError(self) def random_normal(self, shape): \"\"\" Float tensor of selected precision", "(x, state) iterations[b] += 1 loss = min(recent_b_losses) recent_b_losses.clear() final_losses[b]", "function_evaluations, converged, diverged, \"\")] if trj else None finished =", "import namedtuple from contextlib import contextmanager from threading import Barrier", "else \"memory: n/a\" pro = f\"{self.processor_count} processors\" if self.processor_count >", "= self.auto_cast(a, b) return a & b def or_(self, a,", "of the tensor is not known at this point, e.g.", "if keepdims: component = tensor[tuple([slice(slice_idx, slice_idx + 1) if d", "= self.ones([batch_size], DType(int, 32)) residual_squared = rsq0 = self.sum(residual **", "Filter out non-applicable --- backends = [backend for backend in", "max([len(t) for t in trajectories]) last_points = [SolveResult(method_description, xs[b], final_losses[b],", "operations involving the tensors as input. This method is called", "this backend \"\"\" raise NotImplementedError() def as_tensor(self, x, convert_external=True): \"\"\"", "0=success messages[b] = res.message finished[b] = True while not all_finished:", "name self._default_device = default_device def __enter__(self): _DEFAULT.append(self) def __exit__(self, exc_type,", "bool: \"\"\" Tests if this backend supports the given feature.", "= Barrier(batch_size + 1) finished = [False] * batch_size all_finished", "* batch_size diverged = [False] * batch_size messages = [\"\"]", "self.divide_no_nan(self.sum(residual * dy, axis=-1, keepdims=True) * dx, dx_dy) dy =", "x = self.copy(x) iterations = self.copy(iterations) continue_ = ~converged &", "residual, iterations, function_evaluations, converged, diverged, \"\")] if trj else None", "device that can be allocated (in bytes). -1 for n/a.", "= namedtuple('SolveResult', [ 'method', 'x', 'residual', 'iterations', 'function_evaluations', 'converged', 'diverged',", "self.matmul(lin, vector) def gradients(self, y, xs: tuple or list, grad_y)", "value of the tensor is known and can be read", "size (batch,) trj: Whether to record and return the optimization", "dy = self.linear(lin, dx); function_evaluations += continue_1 diverged = self.any(residual_squared", "raise NotImplementedError(self) def sqrt(self, x): raise NotImplementedError(self) def exp(self, x):", "NotImplementedError(self) def minimize(self, method: str, f, x0, atol, max_iter, trj:", "SolveResult or List[SolveResult]: \"\"\" Conjugate gradient algorithm with adaptive step", "tensors by this backend but are not native and thus,", "= [batches] return tensor[batches, ...] def unstack(self, tensor, axis=0, keepdims=False)", "self.backend: 'Backend' = backend \"\"\" Backend that this device belongs", "else: nparray = current_backend.numpy(tensor) return backend.as_tensor(nparray) # Backend choice utility", "y) return x >= y def add(self, a, b): a,", "None: # first evaluation final_losses[b] = f_b_losses[b] if trajectories is", "x): raise NotImplementedError(self) def tan(self, x): raise NotImplementedError(self) def log(self,", "`Backend` by adding it to the list. \"\"\" _DEFAULT =", "trajectories[b].append(SolveResult(method_description, x0[b], f_b_losses[b], 0, 1, False, False, \"\")) return f_b_losses_np[b],", "for value in values: if not backend.is_tensor(value, only_native=False): return False", "states]) iterations = [state.iterations for state in states] function_evaluations =", "scatter_update or scatter_add. Args: base_grid: Tensor into which scatter values", "\"\"\" return self.cast(x, self.float_type) def to_int32(self, x): return self.cast(x, DType(int,", "if trj else None finished = converged | diverged |", "m, v in zip(lin, self.unstack(vector))]) else: lin_shape = self.staticshape(lin) assert", "b_fun(x: numpy.ndarray): function_evaluations[b] += 1 f_inputs[b] = self.as_tensor(x, convert_external=True) f_input_available.wait()", "tensor convert_external: if False and `x` is a Python number", "`tuple` of tensor holding the coordinate vectors, i.e. (row, col)", "= self.to_int32(~finished) # ; active = self.to_float(self.expand_dims(not_finished_1, -1)) return trajectory", "of base_grid with values at `indices` updated by `values`. \"\"\"", "; active = self.to_float(self.expand_dims(not_finished_1, -1)) return trajectory if trj else", "NotImplementedError(self) def ones_like(self, tensor): raise NotImplementedError(self) def meshgrid(self, *coordinates): raise", "tan(self, x): raise NotImplementedError(self) def log(self, x): \"\"\" Natural logarithm", "y): \"\"\" Element-wise equality check \"\"\" raise NotImplementedError(self) def not_equal(self,", "def pow(self, base, exp): base, exp = self.auto_cast(base, exp) return", "`None` \"\"\" return _DEFAULT[-1] if len(_DEFAULT) > 1 else None", "default_device: ComputeDevice): \"\"\" Backends delegate low-level operations to a compute", "physical device that can be selected to perform backend computations.", "exit correctly if trj: max_trajectory_length = max([len(t) for t in", "('update', 'add') Returns: Copy of base_grid with values at `indices`", "= devices[0] self._default_device = device def seed(self, seed: int): raise", "atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]: \"\"\" Standard", "update_count, index_vector) values: Values to scatter at indices. Tensor of", "when the backend operates in eager mode. Args: tensor: backend-compatible", "for backend in BACKENDS: if self.name in backend.name: return backend", "device of this type is available.\" device = devices[0] self._default_device", "`values` in shape and data type. values: Initial values of", "`('auto', 'CG', 'CG-adaptive')`. lin: Linear operation. One of * sparse/dense", "nonzero locations. * linear function A(x), must be called on", "[1:] from 'with' blocks _PRECISION = [32] # [0] =", "def linear(self, lin, vector): if callable(lin): return lin(vector) elif isinstance(lin,", "y - self.linear(lin, x); function_evaluations += 1 else: residual =", "value, axes: tuple or list): slices = tuple(slice(None, None, -1", "device_type: str, memory: int, processor_count: int, description: str, ref=None): self.name:", "'Backend', name: str, device_type: str, memory: int, processor_count: int, description:", "list_devices(self, device_type: str or None = None) -> List[ComputeDevice]: \"\"\"", "pad_width: 2D tensor specifying the number of values padded to", "fg = self.functional_gradient(f, [0], get_output=True) method_description = f\"SciPy {method} with", "OptimizeResult, minimize from threading import Thread assert self.supports(Backend.functional_gradient) assert len(self.staticshape(x0))", "batch_size diverged = [False] * batch_size messages = [\"\"] *", "represented as a NumPy array. Args: tensor: backend-compatible tensor Returns:", "operators. Backends can override this method to prevent unnecessary casting.", "sampled from a normal distribution with mean 0 and std", "entries equal to `values` in shape and data type. values:", "auto_cast(self, *tensors) -> list: \"\"\" Determins the appropriate values type", "for t in trajectories]) last_points = [SolveResult(method_description, xs[b], final_losses[b], iterations[b],", "# ; active = self.to_float(self.expand_dims(not_finished_1, -1)) return trajectory if trj", "precision. See Also: `Backend.precision()`. If `x` is mutable and of", "Tensor of floating grid indices. The last dimension must match", "`tensor`, returns `tensor`. If both backends support *DLPack* and `use_dlpack=True`,", "NotImplementedError(self) def flip(self, value, axes: tuple or list): slices =", "1) finished = [False] * batch_size all_finished = False trajectories", "keepdims=False): raise NotImplementedError(self) def min(self, x, axis=None, keepdims=False): raise NotImplementedError(self)", "self.name def __repr__(self): return self.name def list_devices(self, device_type: str or", "shape (batch_size, in_channel, spatial...) kernel: tensor of shape (batch_size or", "Returns: the selected `Backend` \"\"\" # --- Default Backend has", "backend is preferred by `choose_backend()`. The default backend can be", "\"\"\" Solve the system of linear equations A · x", "tolerance of size (batch,) atol: Absolute tolerance of size (batch,)", "match `spatial_dims`. The first grid point of dimension i lies", "= [] for slice_idx in range(tensor.shape[axis]): if keepdims: component =", "otherwise) by a backend if it is not a struct", "added to `value` as outer dimensions. Args: value: tensor multiples:", "matrices for varying matrices along batch, must have the same", "str = name \"\"\" Name of the compute device. CPUs", "Args: name: Human-readable string default_device: `ComputeDevice` being used by default", "or float64. Operations may also convert floating point values to", "range(tensor.shape[axis]): if keepdims: component = tensor[tuple([slice(slice_idx, slice_idx + 1) if", "`default_backend()`. Returns: Tensor belonging to `backend`. \"\"\" backend = backend", "correct floating type, returns a copy of `x`. To convert", "== 'CG-adaptive': return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter, trj)", "def std(self, x, axis=None, keepdims=False): raise NotImplementedError(self) def boolean_mask(self, x,", "(batch_size, spatial..., channels) indices: Tensor of shape (batch_size or 1,", "divisor): dividend, divisor = self.auto_cast(dividend, divisor) return dividend % divisor", "residual, iterations, function_evaluations, _converged, _diverged): continue_1 = self.to_int32(continue_) it_counter +=", "to a common data type \"\"\" dtypes = [self.dtype(t) for", "call(self, f: Callable, *args, name=None): \"\"\" Calls `f(*args)` and returns", "is available.\" device = devices[0] self._default_device = device def seed(self,", "a custom gradient for backprop. Args: f: Forward function. gradient:", "raise NotImplementedError(self) def pad(self, value, pad_width, mode: str = 'constant',", "all but the first and last dimensions. Args: x: tensor", "for b in range(batch_size): def b_thread(b=b): recent_b_losses = [] def", "def loop(continue_, it_counter, x, dx, dy, residual, iterations, function_evaluations, _converged,", "be overridden using `with backend:`. See `default_backend()`, `choose_backend()`. Args: backend:", "x): return self.cast(x, DType(complex, max(64, min(self.precision * 2, 128)))) def", "raise NotImplementedError(self) def staticshape(self, tensor): raise NotImplementedError(self) def cast(self, x,", "** 2, -1, keepdims=True) dx = residual - self.divide_no_nan(self.sum(residual *", "A, b): raise NotImplementedError(self) def einsum(self, equation, *tensors): raise NotImplementedError(self)", "processors\" if self.processor_count > 0 else \"processors: n/a\" descr =", "return _DEFAULT[-1] if len(_DEFAULT) > 1 else None def set_global_default_backend(backend:", "None: trajectories[b].append(SolveResult(method_description, x, loss, iterations[b], function_evaluations[b], False, False, \"\")) res", "base_grid: Tensor into which scatter values are inserted at indices.", "coordinates and values of a tensor. Args: tensor: Sparse tensor", "If true, `numpy(tensor)` must return a valid NumPy representation of", "the coordinate vectors, i.e. (row, col) for matrices. indices: Tensor", "it_counter, x, dx, dy, residual, iterations, function_evaluations, converged, diverged _,", "the documentation says (x, state) iterations[b] += 1 loss =", "str = 'constant', constant_values=0): \"\"\" Pad a tensor with values", "dx diverged = self.any(residual_squared / rsq0 > 100, axis=(1,)) &", "device def seed(self, seed: int): raise NotImplementedError() def is_tensor(self, x,", "max_iter: Maximum number of iterations of size (batch,) trj: Whether", "'zeros', 'boundary', 'periodic', 'symmetric', 'reflect')`. Returns: sampled values with linear", "int): raise NotImplementedError() def is_tensor(self, x, only_native=False): \"\"\" An object", "Args: backend: `Backend` to set as default \"\"\" assert isinstance(backend,", "such as driver version. \"\"\" self.ref = ref \"\"\" (Optional)", "x): return self.cast(x, DType(int, 32)) def to_int64(self, x): return self.cast(x,", "as tensor of shape (nnz, vector) \"\"\" raise NotImplementedError(self) def", "from phi.math.backend import BACKENDS for backend in BACKENDS: if self.name", "matches to `Backend.linear_solve()`. \"\"\" # Based on the variant described", "about the device such as driver version. \"\"\" self.ref =", "natively on `tensor`, returns `tensor`. If both backends support *DLPack*", "this backend but are not native and thus, will be", "non-zero multi-indices as tensor of shape (nnz, vector) \"\"\" raise", "dy, residual, iterations, function_evaluations, _converged, _diverged): continue_1 = self.to_int32(continue_) it_counter", "Global list of all registered backends. Register a `Backend` by", "return backend return backends[0] class NoBackendFound(Exception): \"\"\" Thrown by `choose_backend`", "x, _, _, residual, iterations, function_evaluations, converged, diverged =\\ self.while_loop(loop,", "self.functional_gradient(f, [0], get_output=True) method_description = f\"SciPy {method} with {self.name}\" iterations", "values, axis=0): raise NotImplementedError(self) def concat(self, values, axis): raise NotImplementedError(self)", "since Python bool # residual = y - self.linear(lin, x);", "callback=callback) assert isinstance(res, OptimizeResult) # res.nit, res.nfev xs[b] = res.x", "feature if isinstance(feature, str) else feature.__name__ if not hasattr(Backend, feature):", "value with kernel. Depending on the tensor rank, the convolution", "isinstance(res, OptimizeResult) # res.nit, res.nfev xs[b] = res.x converged[b] =", "backend in backends: if _is_specific(backend, values): return backend return backends[0]", "else None continue_ = ~converged & ~diverged & (iterations <", "documentation says (x, state) iterations[b] += 1 loss = min(recent_b_losses)", "\"\"\" raise NotImplementedError(self) def isfinite(self, x): raise NotImplementedError(self) def scatter(self,", "self.sum(dx * dy, axis=-1, keepdims=True) step_size = self.divide_no_nan(residual_squared, dx_dy) step_size", "-> ComputeDevice: return self._default_device def set_default_device(self, device: ComputeDevice or str):", "of ('update', 'add') Returns: Copy of base_grid with values at", "x0 dx = residual = y - self.linear(lin, x) dy", "spatial_dims: Dimension indices that correspond to coordinate vectors coordinates: Tensor", "single, 64 for double \"\"\" _PRECISION.append(floating_point_bits) try: yield None finally:", "(iterations < max_iter) def loop(continue_, it_counter, x, dx, dy, residual,", "lin: lin_shape = self.staticshape(lin_i) assert len(lin_shape) == 2 return self.stack([self.matmul(m,", "'{method}' not supported for linear solve.\") def conjugate_gradient(self, lin, y,", "in BACKENDS: if self.name in backend.name: return backend raise RuntimeError(f\"Backend", "raise NotImplementedError(self) def min(self, x, axis=None, keepdims=False): raise NotImplementedError(self) def", "xs[b], final_losses[b], iterations[b], function_evaluations[b], converged[b], diverged[b], \"\") for b in", "function_evaluations = [state.function_evaluations for state in states] converged = [state.converged", "diverged _, _, x, _, _, residual, iterations, function_evaluations, converged,", "_DEFAULT[-1] # --- Filter out non-applicable --- backends = [backend", "to any registered backend. backend: Target backend. If `None`, uses", "is supported. \"\"\" feature = feature if isinstance(feature, str) else", "axis the number of times given by multiples. If `multiples`", "Calls `f(*args)` and returns the result. This method may be", "axis=None, keepdims=False): raise NotImplementedError(self) def prod(self, value, axis=None): raise NotImplementedError(self)", "as `value`. Returns: Convolution result as tensor of shape (batch_size,", "This method is called by the default implementations of basic", "tensor representations, not Python numbers or others that are also", "= self.stack([state.residual for state in states]) iterations = [state.iterations for", "x): raise NotImplementedError(self) def sin(self, x): raise NotImplementedError(self) def cos(self,", "DType(int, 32)) def to_int64(self, x): return self.cast(x, DType(int, 64)) def", "tensor): return len(self.staticshape(tensor)) def size(self, array): return self.prod(self.shape(array)) def batch_gather(self,", "denominator) return numerator / denominator def pow(self, base, exp): base,", "default, [1:] from 'with' blocks _PRECISION = [32] # [0]", "messages) def linear_solve(self, method: str, lin, y, x0, rtol, atol,", "import DType, combine_types SolveResult = namedtuple('SolveResult', [ 'method', 'x', 'residual',", "matrices along batch, must have the same nonzero locations. *", "mode. Args: tensor: backend-compatible tensor Returns: bool \"\"\" raise NotImplementedError()", "if it_counter % 50 == 0: # Not traceable since", "+= not_finished_1 dx_dy = self.sum(dx * dy, axis=-1, keepdims=True) step_size", "x >= y def add(self, a, b): a, b =", "Loop variables upon loop completion. \"\"\" raise NotImplementedError(self) def abs(self,", "iterations, function_evaluations, converged, diverged _, _, x, _, _, residual,", "def ones_like(self, tensor): raise NotImplementedError(self) def meshgrid(self, *coordinates): raise NotImplementedError(self)", "\"\"\" raise NotImplementedError() def is_available(self, tensor) -> bool: \"\"\" Tests", "def unstack(self, tensor, axis=0, keepdims=False) -> tuple: if axis <", "True while not all_finished: f_input_available.wait() f_output_available.wait() b_thread = Thread(target=b_thread) threads.append(b_thread)", "any(values[0]): values = loop(*values) return values ``` This operation does", "self.name: str = name \"\"\" Name of the compute device.", "affects all registered backends. If `floating_point_bits` is an integer, all", "NotImplementedError() def numpy(self, tensor) -> numpy.ndarray: \"\"\" Returns a NumPy", "pads the edges of `value` with zeros so that the", "prod(self, value, axis=None): raise NotImplementedError(self) def divide_no_nan(self, x, y): \"\"\"", "the target backend can operate natively on `tensor`, returns `tensor`.", "sum(self, value, axis=None, keepdims=False): raise NotImplementedError(self) def prod(self, value, axis=None):", "batched_gather_nd(self, values, indices): \"\"\" Gathers values from the tensor `values`", "if y=0. Args: x: y: Returns: \"\"\" raise NotImplementedError(self) def", "tensor along each axis the number of times given by", "2 # (batch, parameters) batch_size = self.staticshape(x0)[0] fg = self.functional_gradient(f,", "b_thread.start() while True: f_input_available.wait() if all(finished): all_finished = True f_output_available.wait()", "A · x = y. This method need not provide", "contextmanager from threading import Barrier from typing import List, Callable", "devices[0] self._default_device = device def seed(self, seed: int): raise NotImplementedError()", "backend but are not native and thus, will be converted", "of dimension i lies at position 0, the last at", "'symmetric', 'reflect')`. Returns: sampled values with linear interpolation \"\"\" return", "def equal(self, x, y): \"\"\" Element-wise equality check \"\"\" raise", "at this point. If true, `numpy(tensor)` must return a valid", "converged, diverged, \"\")] if trj else None finished = converged", "\"Methods of Conjugate Gradients for Solving Linear Systems\" by <NAME>", "a comprehensive list of available operations. To support a compute", "See Also: `Backend.precision()`. If `x` is mutable and of the", "continue_1 dx_dy = self.sum(dx * dy, axis=-1, keepdims=True) step_size =", "a, b = self.auto_cast(a, b) return a * b def", "and component axes. mode: constant', 'boundary', 'periodic', 'symmetric', 'reflect' constant_values:", "handle the given values. This function is used by most", "`Backend.precision()`. If `x` is mutable and of the correct floating", "Which algorithm to use. One of `('auto', 'CG', 'CG-adaptive')`. lin:", "iterations, function_evaluations, converged, diverged, messages)) return trajectory else: x =", "values)): return _DEFAULT[-1] # --- Filter out non-applicable --- backends", "in (0, 1) # 0=success messages[b] = res.message finished[b] =", "x, residual, iterations, function_evaluations, converged, diverged, \"\") def linear(self, lin,", "and last dimensions. Args: k: tensor of dimension 3 or", "values as specified by `mode` and `constant_values`. If the mode", "* tuple/list of sparse/dense matrices for varying matrices along batch,", "available compute devices this backend can use. Implementations: * NumPy:", "32)) def to_int64(self, x): return self.cast(x, DType(int, 64)) def to_complex(self,", "def conv(self, value, kernel, zero_padding=True): \"\"\" Convolve value with kernel.", "so that the result has the same shape as `value`.", "shape {lin_shape}\" return self.matmul(lin, vector) def gradients(self, y, xs: tuple", "1. \"\"\" raise NotImplementedError(self) def stack(self, values, axis=0): raise NotImplementedError(self)", "flatten(self, x): return self.reshape(x, (-1,)) def std(self, x, axis=None, keepdims=False):", "def name(self) -> str: return self._name def supports(self, feature: str", "= ~converged & ~diverged & (iterations < max_iter) def loop(continue_,", "intermediately converts `tensor` to a NumPy array. *Warning*: This operation", "Returns: bool: whether `x` is considered a tensor by this", "import contextmanager from threading import Barrier from typing import List,", "Callable: \"\"\" Creates a function based on `f` that uses", "# Not traceable since Python bool # residual = y", "the result has the same shape as `value`. Returns: Convolution", "input had a different precision. Returns: 16 for half, 32", "that correspond to coordinate vectors coordinates: Tensor of floating grid", "= self.auto_cast(dividend, divisor) return dividend % divisor def and_(self, a,", "equal(self, x, y): \"\"\" Element-wise equality check \"\"\" raise NotImplementedError(self)", "= y. This method need not provide a gradient for", "of device such as `'CPU'`, `'GPU'` or `'TPU'`. \"\"\" self.memory:", "\"\"\" Fetches information about all available compute devices this backend", "'{self}' is not visible.\") @property def complex_type(self) -> DType: return", "`values` and `indices` is the batch dimension which must be", "updated by `values`. \"\"\" raise NotImplementedError(self) def any(self, boolean_tensor, axis=None,", "NotImplementedError(self) def custom_gradient(self, f: Callable, gradient: Callable) -> Callable: \"\"\"", "Computes the n-dimensional inverse FFT along all but the first", "d == axis else slice(None) for d in range(len(tensor.shape))])] else:", "v in values]}; registered backends are {BACKENDS}\") # --- Native", "convergence residual_squared = self.sum(residual ** 2, -1, keepdims=True) dx =", "raise NotImplementedError(self) def imag(self, x): raise NotImplementedError(self) def real(self, x):", "diverged, \"\")) x = self.copy(x) iterations = self.copy(iterations) continue_ =", "backends. Register a `Backend` by adding it to the list.", "residual, iterations, function_evaluations, converged, diverged, \"\") def conjugate_gradient_adaptive(self, lin, y,", "[[] for _ in range(batch_size)] if trj else None threads", "tensors created henceforth will be of the corresponding data type,", "`spatial_dims`. The first grid point of dimension i lies at", "is known and can be read at this point. If", "common data type \"\"\" dtypes = [self.dtype(t) for t in", "raise NoBackendFound(f\"No backend found for types {[type(v).__name__ for v in", "compute device. CPUs are typically called `'CPU'`. \"\"\" self.device_type: str", "shape as `value`. Returns: Convolution result as tensor of shape", "False return True def _is_specific(backend, values): for value in values:", "the grid. One of `('undefined', 'zeros', 'boundary', 'periodic', 'symmetric', 'reflect')`.", "tensor \"\"\" raise NotImplementedError(self) def sparse_tensor(self, indices, values, shape): \"\"\"", "_converged, _diverged): continue_1 = self.to_int32(continue_) it_counter += 1 iterations +=", "backend can use. Implementations: * NumPy: [`os.cpu_count`](https://docs.python.org/3/library/os.html#os.cpu_count) * PyTorch: [`torch.cuda.get_device_properties`](https://pytorch.org/docs/stable/cuda.html#torch.cuda.get_device_properties)", "raise NotImplementedError(self) def grid_sample(self, grid, spatial_dims: tuple, coordinates, extrapolation='constant'): \"\"\"", "mem = f\"{(self.memory / 1024 ** 2)} MB\" if self.memory", "Returns: tensor representation of `x` \"\"\" raise NotImplementedError() def is_available(self,", "if dim2 is None or dim2 == 1: return dim1", "the Agonizing Pain\" by <NAME> # symbols: dx=d, dy=q, step_size=alpha,", "are inserted at indices. Tensor of shape (batch_size, spatial..., channels)", "-> Backend or None: \"\"\" Returns the backend set by", "descr = descr[:28] + \"...\" return f\"'{self.name}' ({self.device_type}) | {mem}", "continue_ = ~converged & ~diverged & (iterations < max_iter) def", "return _DEFAULT[-1] # --- Filter out non-applicable --- backends =", "To convert float tensors to the backend precision but leave", "converged, diverged, \"\")] if trj else None continue_ = ~converged", "shape (batch, spatial..., channel) indices: int tensor of shape (batch,", "int, processor_count: int, description: str, ref=None): self.name: str = name", "value, axis=None, keepdims=False): raise NotImplementedError(self) def prod(self, value, axis=None): raise", "assert dim1 == dim2, f\"Incompatible {type_str} dimensions: x0 {dim1}, y", "to register internal calls with the profiler. Usage: choose_backend(key).call(custom_function, *args)", "`tensor` to a NumPy array. *Warning*: This operation breaks the", "The last dimension must match `spatial_dims`. The first grid point", "a tensor-like object to the native tensor representation of this", "= self.sum(residual ** 2, -1, keepdims=True) dx = residual +", "all methods of the backend accept it as a tensor", "convert_external=True): \"\"\" Converts a tensor-like object to the native tensor", "[[axis 0 lower, axis 0 upper], ...] including batch and", "FFT along all but the first and last dimensions. Args:", "Maximum memory of the device that can be allocated (in", "values with linear interpolation \"\"\" return NotImplemented def variable(self, value):", "Operations may also convert floating point values to this precision,", "function. gradient: Function for backprop. Will be called as `gradient(*d_out)`", "the backend accept it as a tensor argument. Args: x:", "raise NotImplementedError(self) def minimize(self, method: str, f, x0, atol, max_iter,", "messages)) return trajectory else: x = self.stack(xs) residual = self.stack(final_losses)", "trajectory else: x = self.stack(xs) residual = self.stack(final_losses) return SolveResult(method_description,", "raise NotImplementedError(self) def nonzero(self, values): \"\"\" Args: values: Tensor with", "may not be supported. Args: value: tensor of shape (batch_size,", "=\\ self.while_loop(loop, (continue_, 0, x, dx, dy, residual, iterations, function_evaluations,", "tensor axis: Axis index >= 0 \"\"\" raise NotImplementedError(self) def", "padded to the edges of each axis in the form", "0 iterations = self.zeros([batch_size], DType(int, 32)) function_evaluations = self.ones([batch_size], DType(int,", "Returns: \"\"\" raise NotImplementedError(self) def imag(self, x): raise NotImplementedError(self) def", "values.shape[i]-1. extrapolation: Values to use for coordinates outside the grid.", "`set_global_precision()`. Args: floating_point_bits: 16 for half, 32 for single, 64", "backend is current_backend: return tensor if use_dlpack and current_backend.supports(Backend.to_dlpack) and", "last dimensions. Args: k: tensor of dimension 3 or higher", "backend.is_tensor(tensor, True) or backend is current_backend: return tensor if use_dlpack", "[`torch.cuda.get_device_properties`](https://pytorch.org/docs/stable/cuda.html#torch.cuda.get_device_properties) * TensorFlow: `tensorflow.python.client.device_lib.list_local_devices` * Jax: [`jax.devices`](https://jax.readthedocs.io/en/latest/jax.html#jax.devices) Args: device_type: (optional)", "NotImplementedError(self) def concat(self, values, axis): raise NotImplementedError(self) def pad(self, value,", "default backend assuming it can handle handle the values, see", "point values with precision equal to the currently set default", "batch_size function_evaluations = [0] * batch_size xs = [None] *", "[32] # [0] = global precision in bits, [1:] from", "ComputeDevice or str): if isinstance(device, str): devices = self.list_devices(device) assert", "`Tensor` objects to delegate the actual computations. Args: *values: prefer_default:", "NoBackendFound(f\"No backend found for types {[type(v).__name__ for v in values]};", "float Returns: Values of `x` as float tensor \"\"\" return", "non-float tensors untouched, use `Backend.as_tensor()`. Args: x: tensor of bool,", "`choose_backend()`. The default backend can be set globally using `set_global_default_backend()`", "block. If called outside a backend context, returns `None`. Returns:", "or `None` \"\"\" return _DEFAULT[-1] if len(_DEFAULT) > 1 else", "for single, 64 for double \"\"\" _PRECISION.append(floating_point_bits) try: yield None", "the edges of `value` with zeros so that the result", "y. This method need not provide a gradient for the", "rsq0 > 100, axis=(1,)) & (iterations >= 8) converged =", "def set_global_precision(floating_point_bits: int): \"\"\" Sets the floating point precision of", "size(self, array): return self.prod(self.shape(array)) def batch_gather(self, tensor, batches): if isinstance(batches,", "SolveResult = namedtuple('SolveResult', [ 'method', 'x', 'residual', 'iterations', 'function_evaluations', 'converged',", "if backend.is_tensor(tensor, True) or backend is current_backend: return tensor if", "ref=None): self.name: str = name \"\"\" Name of the compute", "trj: bool) -> SolveResult or List[SolveResult]: \"\"\" Solve the system", "def block_until_ready(self, values): pass def jit_compile(self, f: Callable) -> Callable:", "backend raise RuntimeError(f\"Backend '{self}' is not visible.\") @property def complex_type(self)", "0 if y=0. Args: x: y: Returns: \"\"\" raise NotImplementedError(self)", "or list of integers Returns: tile tensor \"\"\" raise NotImplementedError(self)", "for the operation. Args: method: Which algorithm to use. One", "stop_gradient(self, value): raise NotImplementedError(self) def grid_sample(self, grid, spatial_dims: tuple, coordinates,", "representation of this backend. If x is a native tensor", "def is_available(self, tensor) -> bool: \"\"\" Tests if the value", "precision(self) -> int: \"\"\" Short for math.backend.get_precision() \"\"\" return get_precision()", "f_b_losses_np[b], f_grad_np[b] def callback(x, *args): # L-BFGS-B only passes x", "\"\"\" feature = feature if isinstance(feature, str) else feature.__name__ if", "int: \"\"\" Short for math.backend.get_precision() \"\"\" return get_precision() @property def", "input. This method is called by the default implementations of", "dimension which must be either equal for both or one", "utility functions def _is_applicable(backend, values): for value in values: if", "axis=(1,)) trajectory = [SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged,", "processor_count: int, description: str, ref=None): self.name: str = name \"\"\"", "residual_squared = self.sum(residual ** 2, -1, keepdims=True) dx = residual", "dx_dy = self.sum(dx * dy, axis=-1, keepdims=True) step_size = self.divide_no_nan(residual_squared,", "self.float_type) def to_int32(self, x): return self.cast(x, DType(int, 32)) def to_int64(self,", "iterations = [state.iterations for state in states] function_evaluations = [state.function_evaluations", "'method', 'x', 'residual', 'iterations', 'function_evaluations', 'converged', 'diverged', 'message', ]) class", "must be implemented if the feature is supported. Possible features:", "dimensions. Args: value: tensor multiples: tuple or list of integers", "return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter, trj) elif method", "position 0, the last at values.shape[i]-1. extrapolation: Values to use", "b) return a ^ b def floordiv(self, a, b): a,", "Callable, gradient: Callable) -> Callable: \"\"\" Creates a function based", "return _PRECISION[-1] @contextmanager def precision(floating_point_bits: int): \"\"\" Sets the floating", "def round(self, x): raise NotImplementedError(self) def ceil(self, x): raise NotImplementedError(self)", "from scipy.optimize import OptimizeResult, minimize from threading import Thread assert", "2, 128)))) def batched_gather_nd(self, values, indices): \"\"\" Gathers values from", "`value` as outer dimensions. Args: value: tensor multiples: tuple or", "NotImplementedError(self) def sparse_tensor(self, indices, values, shape): \"\"\" Optional features. Args:", "`Backend.as_tensor()`. Args: x: tensor of bool, int or float Returns:", "< 0: raise ValueError(\"Illegal axis value\") result = [] for", "'periodic', 'symmetric', 'reflect')`. Returns: sampled values with linear interpolation \"\"\"", "True return False # Other low-level helper functions def combined_dim(dim1,", "n/a. \"\"\" self.description: str = description \"\"\" Further information about", "to check if the value can be represented as a", "bool) -> SolveResult or List[SolveResult]: \"\"\" Solve the system of", "residual = self.stack(final_losses) return SolveResult(method_description, x, residual, iterations, function_evaluations, converged,", "keepdims=True) dx = residual + self.divide_no_nan(residual_squared, residual_squared_old) * dx diverged", "self.linear(lin, dx); function_evaluations += not_finished_1 dx_dy = self.sum(dx * dy,", "divisor) return dividend % divisor def and_(self, a, b): a,", "help prevent type clashes like int32 vs int64. (Default value", "cast to a common data type \"\"\" dtypes = [self.dtype(t)", "used to register internal calls with the profiler. Usage: choose_backend(key).call(custom_function,", "- self.linear(lin, x) it_counter = 0 iterations = self.zeros([batch_size], DType(int,", "raise NotImplementedError(self) def round(self, x): raise NotImplementedError(self) def ceil(self, x):", "this function if no backend can handle the given values.", "lin_shape = self.staticshape(lin) assert len(lin_shape) == 2, f\"A must be", "operation breaks the automatic differentiation chain. Args: tensor: Native tensor", "= global default, [1:] from 'with' blocks _PRECISION = [32]", "imag(self, x): raise NotImplementedError(self) def real(self, x): raise NotImplementedError(self) def", "set globally using `set_global_default_backend()` and locally using `with backend:`. Returns:", "DType(int, 32)) residual_squared = rsq0 = self.sum(residual ** 2, -1,", "f_inputs[b] = self.as_tensor(x, convert_external=True) f_input_available.wait() f_output_available.wait() recent_b_losses.append(f_b_losses[b]) if final_losses[b] is", "or dim2 == 1: return dim1 assert dim1 == dim2,", "the default implementations of basic operators. Backends can override this", "raise NotImplementedError() def call(self, f: Callable, *args, name=None): \"\"\" Calls", "def prod(self, value, axis=None): raise NotImplementedError(self) def divide_no_nan(self, x, y):", "must be called on all instances in parallel y: target", "__repr__(self): mem = f\"{(self.memory / 1024 ** 2)} MB\" if", "for n/a. \"\"\" self.processor_count: int = processor_count \"\"\" Number of", "= self.copy(iterations) continue_ = ~converged & ~diverged & (iterations <", "'add') Returns: Copy of base_grid with values at `indices` updated", "axis else slice(None) for d in range(len(tensor.shape))])] else: component =", "Backends delegate low-level operations to a compute library or emulate", "different objects. \"\"\" def __repr__(self): mem = f\"{(self.memory / 1024", "return self.cast(x, DType(int, 32)) def to_int64(self, x): return self.cast(x, DType(int,", "x, only_native=False): \"\"\" An object is considered a native tensor", "or higher Returns: \"\"\" raise NotImplementedError(self) def imag(self, x): raise", "`tensorflow.python.client.device_lib.list_local_devices` * Jax: [`jax.devices`](https://jax.readthedocs.io/en/latest/jax.html#jax.devices) Args: device_type: (optional) Return only devices", "DType, combine_types SolveResult = namedtuple('SolveResult', [ 'method', 'x', 'residual', 'iterations',", "mode: str): \"\"\" Depending on `mode`, performs scatter_update or scatter_add.", "= loss if trajectories is not None: trajectories[b].append(SolveResult(method_description, x, loss,", "prefer_default=False) -> Backend: \"\"\" Selects a suitable backend to handle", "max(64, min(self.precision * 2, 128)))) def batched_gather_nd(self, values, indices): \"\"\"", "ones(self, shape, dtype: DType = None): raise NotImplementedError(self) def ones_like(self,", "to a method of this backend that must be implemented", "a NumPy array, it is returned without modification. This method", "A physical device that can be selected to perform backend", "at indices. Tensor of shape (batch_size or 1, update_count or", "coordinates(self, tensor): \"\"\" Returns the coordinates and values of a", "nonzero(self, values): \"\"\" Args: values: Tensor with only spatial dimensions", "in range(batch_size): def b_thread(b=b): recent_b_losses = [] def b_fun(x: numpy.ndarray):", "-1, keepdims=True) diverged = self.any(~self.isfinite(x), axis=(1,)) converged = self.all(residual_squared <=", "function_evaluations, converged, diverged, \"\")] if trj else None continue_ =", "given backend as default. This setting can be overridden using", "tuple, Python number, tensor convert_external: if False and `x` is", "tile tensor \"\"\" raise NotImplementedError(self) def sparse_tensor(self, indices, values, shape):", "else: lin_shape = self.staticshape(lin) assert len(lin_shape) == 2, f\"A must", "default. This setting can be overridden using `with backend:`. See", "indices: tuple/list matching the dimensions (pair for matrix) values: param", "return _DEFAULT[-1] def context_backend() -> Backend or None: \"\"\" Returns", "signature and return values as `f`. However, the returned function", "tensor of shape (batch, spatial..., channel) indices: int tensor of", "raises an error if the value of the tensor is", "(rank=3), 2D (rank=4) or 3D (rank=5). Higher dimensions may not", "considered a tensor (nativer or otherwise) by a backend if", "`f`. Returns: Function with similar signature and return values as", "tuple or list, grad_y) -> tuple: raise NotImplementedError(self) def record_gradients(self,", "{method} with {self.name}\" iterations = [0] * batch_size function_evaluations =", "'CG-adaptive': return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter, trj) else:", "Linear Systems\" by <NAME> and <NAME> # https://nvlpubs.nist.gov/nistpubs/jres/049/jresv49n6p409_A1b.pdf method =", "(in bytes). -1 for n/a. \"\"\" self.processor_count: int = processor_count", "prevent type clashes like int32 vs int64. (Default value =", "Sparse tensor Returns: coordinates: `tuple` of tensor holding the coordinate", "x); function_evaluations += 1 # else: residual = residual -", "= description \"\"\" Further information about the device such as", "priority --- if _is_applicable(_DEFAULT[-1], values) and (prefer_default or _is_specific(_DEFAULT[-1], values)):", "return f_b_losses_np[b], f_grad_np[b] def callback(x, *args): # L-BFGS-B only passes", "2nd order tensor (batch, vector) or list of vectors. x0:", "precision. Returns: 16 for half, 32 for single, 64 for", "f_b_losses_np = None f_grad_np = None f_input_available = Barrier(batch_size +", "keepdims=False): raise NotImplementedError(self) def all(self, boolean_tensor, axis=None, keepdims=False): raise NotImplementedError(self)", "result. This method may be used to register internal calls", "return self.cast(x, DType(complex, max(64, min(self.precision * 2, 128)))) def batched_gather_nd(self,", "[] # [0] = global default, [1:] from 'with' blocks", "return dividend % divisor def and_(self, a, b): a, b", "instance), `convert_numbers` decides whether to convert it unless the backend", "False and `x` is a Python number that is understood", "raise NotImplementedError(self) def stack(self, values, axis=0): raise NotImplementedError(self) def concat(self,", "\"\"\" assert isinstance(backend, Backend) _DEFAULT[0] = backend def set_global_precision(floating_point_bits: int):", "dimension 3 or higher Returns: \"\"\" raise NotImplementedError(self) def imag(self,", "first grid point of dimension i lies at position 0,", "(e.g. tuple, list) and all methods of the backend accept", "~converged & ~diverged & (iterations < max_iter) def loop(continue_, it_counter,", "None: \"\"\" Returns the backend set by the inner-most surrounding", "\"\"\" Short for math.backend.get_precision() \"\"\" return get_precision() @property def float_type(self)", "to `values` in shape and data type. values: Initial values", "to the currently set default precision. See Also: `Backend.precision()`. If", "\"\"\" self.backend: 'Backend' = backend \"\"\" Backend that this device", "list: \"\"\" Determins the appropriate values type resulting from operations", "-> str: return self._name def supports(self, feature: str or Callable)", "Tensor of shape (batch_size or 1, update_count or 1, channels", "NotImplementedError(self) def log10(self, x): raise NotImplementedError(self) def dtype(self, array) ->", "inserted at indices. Tensor of shape (batch_size, spatial..., channels) indices:", "in the range [0, 1) \"\"\" raise NotImplementedError(self) def random_normal(self,", "expand_dims(self, a, axis=0, number=1): raise NotImplementedError(self) def shape(self, tensor): raise", "1 f_inputs[b] = self.as_tensor(x, convert_external=True) f_input_available.wait() f_output_available.wait() recent_b_losses.append(f_b_losses[b]) if final_losses[b]", "to prevent unnecessary casting. Args: *tensors: tensors to cast and", "0, 1, False, False, \"\")) return f_b_losses_np[b], f_grad_np[b] def callback(x,", "max_iter); not_finished_1 = self.to_int32(~finished) # ; active = self.to_float(self.expand_dims(not_finished_1, -1))", "tuple/list of sparse/dense matrices for varying matrices along batch, must", "= residual = y - self.linear(lin, x) dy = self.linear(lin,", "eager mode. Args: tensor: backend-compatible tensor Returns: bool \"\"\" raise", "on `mode`, performs scatter_update or scatter_add. Args: base_grid: Tensor into", "** exp def mod(self, dividend, divisor): dividend, divisor = self.auto_cast(dividend,", "@contextmanager def precision(floating_point_bits: int): \"\"\" Sets the floating point precision", "1) if d == axis else slice(None) for d in", "tensor belonging to any registered backend. backend: Target backend. If", "Backend = None, use_dlpack=True): \"\"\" Convert a Tensor to the", "Args: *values: prefer_default: if True, selects the default backend assuming", "is either 1D (rank=3), 2D (rank=4) or 3D (rank=5). Higher", "representation of the values stored in the tensor \"\"\" raise", "the convolution is either 1D (rank=3), 2D (rank=4) or 3D", "= 'batch'): if dim1 is None and dim2 is None:", "+= step_size * dx # if it_counter % 50 ==", "Args: tensor: Sparse tensor Returns: coordinates: `tuple` of tensor holding", "these dimensions are added to `value` as outer dimensions. Args:", "else None def set_global_default_backend(backend: Backend): \"\"\" Sets the given backend", "tuple or list, get_output: bool): raise NotImplementedError(self) def custom_gradient(self, f:", "x, minimum, maximum): raise NotImplementedError(self) def sqrt(self, x): raise NotImplementedError(self)", "tensor): \"\"\" Returns the coordinates and values of a tensor.", "is None: # first evaluation final_losses[b] = f_b_losses[b] if trajectories", "* NumPy: [`os.cpu_count`](https://docs.python.org/3/library/os.html#os.cpu_count) * PyTorch: [`torch.cuda.get_device_properties`](https://pytorch.org/docs/stable/cuda.html#torch.cuda.get_device_properties) * TensorFlow: `tensorflow.python.client.device_lib.list_local_devices` *", "tensor is not known at this point, e.g. because it", "untouched, use `Backend.as_tensor()`. Args: x: tensor of bool, int or", "dx_dy) dy = self.linear(lin, dx); function_evaluations += continue_1 diverged =", "SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, \"\") def linear(self,", "self.unstack(vector))]) else: lin_shape = self.staticshape(lin) assert len(lin_shape) == 2, f\"A", "a, b): a, b = self.auto_cast(a, b) return a +", "def ones(self, shape, dtype: DType = None): raise NotImplementedError(self) def", "_PRECISION.pop(-1) def convert(tensor, backend: Backend = None, use_dlpack=True): \"\"\" Convert", "class ComputeDevice: \"\"\" A physical device that can be selected", "assert len(lin_shape) == 2, f\"A must be a matrix but", "being used by default \"\"\" self._name = name self._default_device =", "and last dimensions. Args: x: tensor of dimension 3 or", "(optional) Return only devices of this type, e.g. `'GPU'` or", "self.linear(lin, dx) iterations = self.zeros([batch_size], DType(int, 32)) function_evaluations = self.ones([batch_size],", "= current_backend.to_dlpack(tensor) return backend.from_dlpack(capsule) else: nparray = current_backend.numpy(tensor) return backend.as_tensor(nparray)", "math.backend.get_precision() \"\"\" return get_precision() @property def float_type(self) -> DType: return", "return backend.from_dlpack(capsule) else: nparray = current_backend.numpy(tensor) return backend.as_tensor(nparray) # Backend", "one for either. Args: values: tensor of shape (batch, spatial...,", "grad_y) -> tuple: raise NotImplementedError(self) def record_gradients(self, xs: tuple or", "dimension of `values` and `indices` is the batch dimension which", "trj else None threads = [] for b in range(batch_size):", "floating point precision for the local context. Usage: `with precision(p):`", "def stop_gradient(self, value): raise NotImplementedError(self) def grid_sample(self, grid, spatial_dims: tuple,", "atol ** 2) x = x0 dx = residual =", "def all(self, boolean_tensor, axis=None, keepdims=False): raise NotImplementedError(self) def fft(self, x):", "backend operates in eager mode. Args: tensor: backend-compatible tensor Returns:", "None f_input_available = Barrier(batch_size + 1) f_output_available = Barrier(batch_size +", "trajectories is not None: trajectories[b].append(SolveResult(method_description, x0[b], f_b_losses[b], 0, 1, False,", "not provide a gradient for the operation. Args: method: Which", "conv(self, value, kernel, zero_padding=True): \"\"\" Convolve value with kernel. Depending", "the tensor \"\"\" raise NotImplementedError() def to_dlpack(self, tensor): raise NotImplementedError()", "any..., multi_index) where the size of multi_index is values.rank -", "grid: Tensor spatial_dims: Dimension indices that correspond to coordinate vectors", "\"\"\" def __init__(self, msg): Exception.__init__(self, msg) def default_backend() -> Backend:", "trj: max_trajectory_length = max([len(t) for t in trajectories]) last_points =", "0 else \"memory: n/a\" pro = f\"{self.processor_count} processors\" if self.processor_count", "Args: x: tensor with any number of dimensions mask: 1D", "tensor): raise NotImplementedError(self) def cast(self, x, dtype: DType): raise NotImplementedError(self)", "b): raise NotImplementedError(self) def clip(self, x, minimum, maximum): raise NotImplementedError(self)", "point. If true, `numpy(tensor)` must return a valid NumPy representation", "currently set default precision. See Also: `Backend.precision()`. If `x` is", "f: Callable) -> Callable: return NotImplemented def functional_gradient(self, f, wrt:", "_DEFAULT[-1] def context_backend() -> Backend or None: \"\"\" Returns the", "NotImplementedError(self) def tan(self, x): raise NotImplementedError(self) def log(self, x): \"\"\"", "If x is a native tensor of this backend, it", "Args: base_grid: Tensor into which scatter values are inserted at", "= self.to_float(y) x0 = self.copy(self.to_float(x0), only_mutable=True) batch_size = self.staticshape(y)[0] tolerance_sq", "Gets the current target floating point precision in bits. The", "However, the returned function does not support keyword arguments. \"\"\"", "be called as `gradient(*d_out)` to compute the gradient of `f`.", "to convert it unless the backend cannot handle Python numbers.", "x): raise NotImplementedError(self) def sign(self, x): raise NotImplementedError(self) def round(self,", "x/y but returns 0 if y=0. Args: x: y: Returns:", "cos(self, x): raise NotImplementedError(self) def tan(self, x): raise NotImplementedError(self) def", "but the first and last dimensions. Args: k: tensor of", "tensor argument. Args: x: object to check only_native: If True,", "\"\"\" self.device_type: str = device_type \"\"\" Type of device such", "number as-is. This can help prevent type clashes like int32", "= f_b_losses[b] if trajectories is not None: trajectories[b].append(SolveResult(method_description, x0[b], f_b_losses[b],", "x = self.stack([self.to_float(state.x) for state in states]) residual = self.stack([state.residual", "f_output_available.wait() recent_b_losses.append(f_b_losses[b]) if final_losses[b] is None: # first evaluation final_losses[b]", "100, axis=(1,)) & (iterations >= 8) converged = self.all(residual_squared <=", "a Python number (numbers.Number instance), `convert_numbers` decides whether to convert", "NotImplementedError(self) def sin(self, x): raise NotImplementedError(self) def cos(self, x): raise", "self.linear(lin, x); function_evaluations += 1 # else: residual = residual", "coordinate vectors coordinates: Tensor of floating grid indices. The last", "Args: floating_point_bits: 16 for half, 32 for single, 64 for", "self.copy(self.to_float(x0), only_mutable=True) batch_size = self.staticshape(y)[0] tolerance_sq = self.maximum(rtol ** 2", "can help prevent type clashes like int32 vs int64. (Default", "range(self, start, limit=None, delta=1, dtype: DType = DType(int, 32)): raise", "tensors (Default value = False) Returns: bool: whether `x` is", "x): raise NotImplementedError(self) def exp(self, x): raise NotImplementedError(self) def conv(self,", "record_gradients(self, xs: tuple or list, persistent=False): raise NotImplementedError(self) def stop_gradient(self,", "denominator def pow(self, base, exp): base, exp = self.auto_cast(base, exp)", "2, -1), atol ** 2) x = x0 dx =", "axis < 0: axis += len(tensor.shape) if axis >= len(tensor.shape)", "used by most math functions operating on `Tensor` objects to", "new tensors will default to float32 unless specified otherwise. The", "context. Usage: `with precision(p):` This overrides the global setting, see", "return self.cast(x, DType(int, 64)) def to_complex(self, x): return self.cast(x, DType(complex,", "in threads: b_thread.join() # make sure threads exit correctly if", "default_backend() current_backend = choose_backend(tensor, prefer_default=False) if backend.is_tensor(tensor, True) or backend", "returns `tensor`. If both backends support *DLPack* and `use_dlpack=True`, uses", "\"\"\" Backends delegate low-level operations to a compute library or", "\"\"\" raise NotImplementedError(self) def where(self, condition, x=None, y=None): raise NotImplementedError(self)", "by <NAME> # symbols: dx=d, dy=q, step_size=alpha, residual_squared=delta, residual=r, y=b", "tuple or list, b, b_axes: tuple or list): \"\"\" Multiply-sum-reduce", "tensors cast to a common data type \"\"\" dtypes =", "tensor representation of this backend. If x is a native", "for backprop. Args: f: Forward function. gradient: Function for backprop.", "1024 ** 2)} MB\" if self.memory > 0 else \"memory:", "constant_values=0): \"\"\" Pad a tensor with values as specified by", "(batch_size, out_channel, spatial...) \"\"\" raise NotImplementedError(self) def expand_dims(self, a, axis=0,", "-1, keepdims=True) dx = residual + self.divide_no_nan(residual_squared, residual_squared_old) * dx", "len(lin_shape) == 2, f\"A must be a matrix but got", "This function is used by most math functions operating on", "a backend context, returns `None`. Returns: `Backend` or `None` \"\"\"", "logarithm \"\"\" raise NotImplementedError(self) def log2(self, x): raise NotImplementedError(self) def", "number of iterations of size (batch,) trj: Whether to record", "iterations += continue_1 dx_dy = self.sum(dx * dy, axis=-1, keepdims=True)", "__init__(self, name: str, default_device: ComputeDevice): \"\"\" Backends delegate low-level operations", "CG ({self.name})\" y = self.to_float(y) x0 = self.copy(self.to_float(x0), only_mutable=True) batch_size", "\"\")) x = self.copy(x) iterations = self.copy(iterations) continue_ = ~converged", "returned without modification. If x is a Python number (numbers.Number", "backend cannot handle Python numbers. *Note:* There may be objects", "containing random values in the range [0, 1) \"\"\" raise", "it unless the backend cannot handle Python numbers. *Note:* There", "indices. The last dimension must match `spatial_dims`. The first grid", "values at `indices` updated by `values`. \"\"\" raise NotImplementedError(self) def", "32 for single, 64 for double \"\"\" _PRECISION.append(floating_point_bits) try: yield", "= global precision in bits, [1:] from 'with' blocks def", "[1:] from 'with' blocks def choose_backend(*values, prefer_default=False) -> Backend: \"\"\"", "all floating point tensors created henceforth will be of the", "*args, name=None): \"\"\" Calls `f(*args)` and returns the result. This", "def combined_dim(dim1, dim2, type_str: str = 'batch'): if dim1 is", "on all instances in parallel y: target result of A", "clip(self, x, minimum, maximum): raise NotImplementedError(self) def sqrt(self, x): raise", "3 or higher Returns: \"\"\" raise NotImplementedError(self) def ifft(self, k):", "handle the values, see `default_backend()`. raise_error: Determines the behavior of", "for Solving Linear Systems\" by <NAME> and <NAME> # https://nvlpubs.nist.gov/nistpubs/jres/049/jresv49n6p409_A1b.pdf", "the local context. Usage: `with precision(p):` This overrides the global", "default_device: `ComputeDevice` being used by default \"\"\" self._name = name", "isinstance(batches, int): batches = [batches] return tensor[batches, ...] def unstack(self,", "in backends: if _is_specific(backend, values): return backend return backends[0] class", "method raises an error if the value of the tensor", "Forward function. gradient: Function for backprop. Will be called as", "traceable since Python bool # residual = y - self.linear(lin,", "x=None, y=None): raise NotImplementedError(self) def nonzero(self, values): \"\"\" Args: values:", "Higher dimensions may not be supported. Args: value: tensor of", "raise_error: Determines the behavior of this function if no backend", "return DType(float, self.precision) @property def as_registered(self) -> 'Backend': from phi.math.backend", "mask, axis=0): \"\"\" Args: x: tensor with any number of", "of `x`. To convert float tensors to the backend precision", "returns `None`. Returns: `Backend` or `None` \"\"\" return _DEFAULT[-1] if", "sparse_tensor(self, indices, values, shape): \"\"\" Optional features. Args: indices: tuple/list", "the native tensor representation of this backend. If x is", "raise NotImplementedError(self) def stop_gradient(self, value): raise NotImplementedError(self) def grid_sample(self, grid,", "the native format of `backend`. If the target backend can", "residual, iterations, function_evaluations, converged, diverged, messages)) return trajectory else: x", "delegate low-level operations to a compute library or emulate them.", "Natural logarithm \"\"\" raise NotImplementedError(self) def log2(self, x): raise NotImplementedError(self)", "list, get_output: bool): raise NotImplementedError(self) def custom_gradient(self, f: Callable, gradient:", "^ b def floordiv(self, a, b): a, b = self.auto_cast(a,", "of this type, e.g. `'GPU'` or `'CPU'`. See `ComputeDevice.device_type`. Returns:", "dy = self.linear(lin, dx); function_evaluations += not_finished_1 dx_dy = self.sum(dx", "batch_size messages = [\"\"] * batch_size f_inputs = [None] *", "namedtuple from contextlib import contextmanager from threading import Barrier from", "NotImplementedError(self) def record_gradients(self, xs: tuple or list, persistent=False): raise NotImplementedError(self)", "the given values. This function is used by most math", "automatic differentiation chain. Args: tensor: Native tensor belonging to any", "def ndims(self, tensor): return len(self.staticshape(tensor)) def size(self, array): return self.prod(self.shape(array))", "numerator / denominator def pow(self, base, exp): base, exp =", "if backend.is_tensor(value, only_native=True): return True return False # Other low-level", "return False return True def _is_specific(backend, values): for value in", "values as `f`. However, the returned function does not support", "algorithm to use. One of `('auto', 'CG', 'CG-adaptive')`. lin: Linear", "diverged = [False] * batch_size messages = [\"\"] * batch_size", "be allocated (in bytes). -1 for n/a. \"\"\" self.processor_count: int", "= self.description.replace('\\n', ' ') if len(descr) > 30: descr =", "continue_1 diverged = self.any(residual_squared / rsq0 > 100, axis=(1,)) &", "pad(self, value, pad_width, mode: str = 'constant', constant_values=0): \"\"\" Pad", "def minimize(self, method: str, f, x0, atol, max_iter, trj: bool):", "convert float tensors to the backend precision but leave non-float", "as a `List[SolveResult]`. Returns: result: `SolveResult` or `List[SolveResult]`, depending on", "-> Backend: \"\"\" Selects a suitable backend to handle the", "actual computations. Args: *values: prefer_default: if True, selects the default", "' ') if len(descr) > 30: descr = descr[:28] +", "stop, number): raise NotImplementedError(self) def tensordot(self, a, a_axes: tuple or", "jac=True, method=method, tol=atol[b], options={'maxiter': max_iter[b]}, callback=callback) assert isinstance(res, OptimizeResult) #", "= backend or default_backend() current_backend = choose_backend(tensor, prefer_default=False) if backend.is_tensor(tensor,", "method = f\"Φ-Flow CG-adaptive ({self.name})\" y = self.to_float(y) x0 =", "not support keyword arguments. \"\"\" return NotImplemented def jit_compile_grad(self, f,", "Returns: Loop variables upon loop completion. \"\"\" raise NotImplementedError(self) def", "Args: value: tensor pad_width: 2D tensor specifying the number of", "DType) -> DType: return combine_types(*dtypes, fp_precision=self.precision) def auto_cast(self, *tensors) ->", "batch_size final_losses = [None] * batch_size converged = [False] *", "with adaptive step size. Signature matches to `Backend.linear_solve()`. \"\"\" #", "function_evaluations, converged, diverged, \"\") def linear(self, lin, vector): if callable(lin):", "\"\"\" raise NotImplementedError(self) def expand_dims(self, a, axis=0, number=1): raise NotImplementedError(self)", "context, returns `None`. Returns: `Backend` or `None` \"\"\" return _DEFAULT[-1]", "for half, 32 for single, 64 for double \"\"\" _PRECISION.append(floating_point_bits)", "of `backend`. If the target backend can operate natively on", "# ; active = self.to_float(self.expand_dims(not_finished_1, -1)) while ~self.all(finished): it_counter +=", "raise NotImplementedError() def transpose(self, tensor, axes): raise NotImplementedError() def random_uniform(self,", "for state in states]) iterations = [state.iterations for state in", "def and_(self, a, b): a, b = self.auto_cast(a, b) return", "x): raise NotImplementedError(self) def floor(self, x): raise NotImplementedError(self) def max(self,", "may also convert floating point values to this precision, even", "does not support keyword arguments. \"\"\" return NotImplemented def jit_compile_grad(self,", "complex, bool): tensors = [self.cast(t, result_type) for t in tensors]", "the mode is not supported, returns NotImplemented. Args: value: tensor", "edges of `value` with zeros so that the result has", "= [] for states in zip(*trajectories): x = self.stack([self.to_float(state.x) for", "fg(self.stack(f_inputs)) f_b_losses_np = self.numpy(f_b_losses).astype(numpy.float64) f_grad_np = self.numpy(f_grad).astype(numpy.float64) f_output_available.wait() for b_thread", "raise RuntimeError(f\"Backend '{self}' is not visible.\") @property def complex_type(self) ->", "residual=r, y=b method = f\"Φ-Flow CG ({self.name})\" y = self.to_float(y)", "# in-place subtraction affects convergence residual_squared_old = residual_squared residual_squared =", "library or emulate them. The methods of `Backend` form a", "| {mem} | {pro} | {descr}\" class Backend: def __init__(self,", "dtype: DType = None): raise NotImplementedError(self) def zeros_like(self, tensor): raise", "* dx if it_counter % 50 == 0: residual =", "compute library or emulate them. The methods of `Backend` form", "self.linear(lin, x); function_evaluations += 1 else: residual = residual -", "None, new tensors will default to float32 unless specified otherwise.", "for backend in BACKENDS if _is_applicable(backend, values)] if len(backends) ==", "def flip(self, value, axes: tuple or list): slices = tuple(slice(None,", "and dim2 is None: return None if dim1 is None", "dim1 is None and dim2 is None: return None if", "`tensor` is already a NumPy array, it is returned without", "mean 0 and std 1. \"\"\" raise NotImplementedError(self) def stack(self,", "*args) \"\"\" return f(*args) def block_until_ready(self, values): pass def jit_compile(self,", "(int, float, complex, bool): tensors = [self.cast(t, result_type) for t", "self.copy(x) iterations = self.copy(iterations) finished = converged | diverged |", "selects the default backend assuming it can handle handle the", "Returns: \"\"\" raise NotImplementedError(self) def coordinates(self, tensor): \"\"\" Returns the", "to use for coordinates outside the grid. One of `('undefined',", "method may convert floating point values to this precision, even", "with any number of dimensions mask: 1D mask tensor axis:", "<NAME> # symbols: dx=d, dy=q, step_size=alpha, residual_squared=delta, residual=r, y=b method", "by a backend if no internal conversion is required by", "dx = residual - self.divide_no_nan(self.sum(residual * dy, axis=-1, keepdims=True) *", "y): x, y = self.auto_cast(x, y) return x >= y", "x): raise NotImplementedError(self) def real(self, x): raise NotImplementedError(self) def sin(self,", "for coordinates outside the grid. One of `('undefined', 'zeros', 'boundary',", "native tensor representations, not Python numbers or others that are", "Barrier(batch_size + 1) f_output_available = Barrier(batch_size + 1) finished =", "raise NotImplementedError(self) def record_gradients(self, xs: tuple or list, persistent=False): raise", "as input. This method is called by the default implementations", "Type of device such as `'CPU'`, `'GPU'` or `'TPU'`. \"\"\"", "solve.\") def conjugate_gradient(self, lin, y, x0, rtol, atol, max_iter, trj:", "because no device of this type is available.\" device =", "(nativer or otherwise) by a backend if it is not", "b def floordiv(self, a, b): a, b = self.auto_cast(a, b)", "values): for value in values: if backend.is_tensor(value, only_native=True): return True", "Callable import numpy from ._dtype import DType, combine_types SolveResult =", "using `with precision(p):`. Any Backend method may convert floating point", "the currently set default precision. See Also: `Backend.precision()`. If `x`", "== 0: raise NoBackendFound(f\"No backend found for types {[type(v).__name__ for", "indices: int tensor of shape (batch, any..., multi_index) where the", "of `value` with zeros so that the result has the", "_is_specific(_DEFAULT[-1], values)): return _DEFAULT[-1] # --- Filter out non-applicable ---", "& ~diverged & (iterations < max_iter) return continue_, it_counter, x,", "method_description = f\"SciPy {method} with {self.name}\" iterations = [0] *", "Linear operation. One of * sparse/dense matrix valid for all", "= None) -> List[ComputeDevice]: \"\"\" Fetches information about all available", "residual = self.stack([state.residual for state in states]) iterations = [state.iterations", "casting. Args: *tensors: tensors to cast and to consider when", "return a + b def sub(self, a, b): a, b", "limit=None, delta=1, dtype: DType = DType(int, 32)): raise NotImplementedError(self) def", "the n-dimensional FFT along all but the first and last", "returned without modification. This method raises an error if the", "affects convergence residual_squared = self.sum(residual ** 2, -1, keepdims=True) dx", "loop completion. \"\"\" raise NotImplementedError(self) def abs(self, x): raise NotImplementedError(self)", "`indices` is the batch dimension which must be either equal", "device_type: (optional) Return only devices of this type, e.g. `'GPU'`", "log10(self, x): raise NotImplementedError(self) def dtype(self, array) -> DType: raise", "tensor Returns: coordinates: `tuple` of tensor holding the coordinate vectors,", "but leave non-float tensors untouched, use `Backend.as_tensor()`. Args: x: tensor", "batch_size = self.staticshape(y)[0] tolerance_sq = self.maximum(rtol ** 2 * self.sum(y", "function_evaluations += 1 # else: residual = residual - step_size", "Args: x: object to check only_native: If True, only accepts", "y, xs: tuple or list, grad_y) -> tuple: raise NotImplementedError(self)", "(16, 32, 64, None) \"\"\" _PRECISION[0] = floating_point_bits def get_precision()", "the correct floating type, returns a copy of `x`. To", "\"\"\" _PRECISION.append(floating_point_bits) try: yield None finally: _PRECISION.pop(-1) def convert(tensor, backend:", "`with precision(p):` This overrides the global setting, see `set_global_precision()`. Args:", "self.linear(lin, x) dy = self.linear(lin, dx) iterations = self.zeros([batch_size], DType(int,", "is preferred by `choose_backend()`. The default backend can be set", "\"\"\" Element-wise equality check \"\"\" raise NotImplementedError(self) def not_equal(self, x,", "`floating_point_bits` is an integer, all floating point tensors created henceforth", "each axis in the form [[axis 0 lower, axis 0", "def any(self, boolean_tensor, axis=None, keepdims=False): raise NotImplementedError(self) def all(self, boolean_tensor,", "\"\") def conjugate_gradient_adaptive(self, lin, y, x0, rtol, atol, max_iter, trj:", "def set_default_device(self, device: ComputeDevice or str): if isinstance(device, str): devices", "= self.any(~self.isfinite(x), axis=(1,)) converged = self.all(residual_squared <= tolerance_sq, axis=(1,)) trajectory", "a normal distribution with mean 0 and std 1. \"\"\"", "NotImplementedError(self) def matmul(self, A, b): raise NotImplementedError(self) def einsum(self, equation,", "*tensors: tensors to cast and to consider when determining the", "not native and thus, will be converted by this method.", "returns 0 if y=0. Args: x: y: Returns: \"\"\" raise", "ones_like(self, tensor): raise NotImplementedError(self) def meshgrid(self, *coordinates): raise NotImplementedError(self) def", "<= tolerance_sq, axis=(1,)) if trajectory is not None: trajectory.append(SolveResult(method, x,", "if no backend can handle the given values. If True,", "by this method. Args: x: tensor-like, e.g. list, tuple, Python", "min(self, x, axis=None, keepdims=False): raise NotImplementedError(self) def maximum(self, a, b):", "functional_gradient(self, f, wrt: tuple or list, get_output: bool): raise NotImplementedError(self)", "threading import Thread assert self.supports(Backend.functional_gradient) assert len(self.staticshape(x0)) == 2 #", "= [state.function_evaluations for state in states] converged = [state.converged for", "= self.zeros([batch_size], DType(int, 32)) function_evaluations = self.ones([batch_size], DType(int, 32)) residual_squared", "\"\"\" raise NotImplementedError(self) def abs(self, x): raise NotImplementedError(self) def sign(self,", "size (batch,) atol: Absolute tolerance of size (batch,) max_iter: Maximum", "ValueError(f\"Not a valid feature: '{feature}'\") backend_fun = getattr(Backend, feature) impl_fun", "\"\"\" Returns the coordinates and values of a tensor. Args:", "variables. Returns: Loop variables upon loop completion. \"\"\" raise NotImplementedError(self)", "if it is not a struct (e.g. tuple, list) and", "Barrier(batch_size + 1) finished = [False] * batch_size all_finished =", "Name of the compute device. CPUs are typically called `'CPU'`.", "converged, diverged =\\ self.while_loop(loop, (continue_, 0, x, dx, dy, residual,", "[0, 1) \"\"\" raise NotImplementedError(self) def random_normal(self, shape): \"\"\" Float", "default \"\"\" self._name = name self._default_device = default_device def __enter__(self):", "raise ValueError(\"Illegal axis value\") result = [] for slice_idx in", "b = self.auto_cast(a, b) return a | b def xor(self,", "represent the same device with different objects. \"\"\" def __repr__(self):", "value in values: if not backend.is_tensor(value, only_native=False): return False return", "y: target result of A * x. 2nd order tensor", "of floating grid indices. The last dimension must match `spatial_dims`.", "constant', 'boundary', 'periodic', 'symmetric', 'reflect' constant_values: used for out-of-bounds points", "(iterations >= 8) converged = self.all(residual_squared <= tolerance_sq, axis=(1,)) if", "multiprocessors. -1 for n/a. \"\"\" self.description: str = description \"\"\"", "and all methods of the backend accept it as a", "understood by this backend, this method returns the number as-is.", "dy, axis=-1, keepdims=True) step_size = self.divide_no_nan(self.sum(dx * residual, axis=-1, keepdims=True),", "raise NotImplementedError(self) def tile(self, value, multiples): \"\"\" Repeats the tensor", "\"\") def linear(self, lin, vector): if callable(lin): return lin(vector) elif", "x) dy = self.linear(lin, dx) iterations = self.zeros([batch_size], DType(int, 32))", "def range(self, start, limit=None, delta=1, dtype: DType = DType(int, 32)):", "tensors = [self.cast(t, result_type) for t in tensors] return tensors", "isfinite(self, x): raise NotImplementedError(self) def scatter(self, base_grid, indices, values, mode:", "# first evaluation final_losses[b] = f_b_losses[b] if trajectories is not", "x, dx, dy, residual, iterations, function_evaluations, converged, diverged)) return trajectory", "available.\" device = devices[0] self._default_device = device def seed(self, seed:", "use_dlpack=True): \"\"\" Convert a Tensor to the native format of", "\"\"\" raise NotImplementedError() def to_dlpack(self, tensor): raise NotImplementedError() def from_dlpack(self,", "dividend, divisor): dividend, divisor = self.auto_cast(dividend, divisor) return dividend %", "backend return backends[0] class NoBackendFound(Exception): \"\"\" Thrown by `choose_backend` if", "= name \"\"\" Name of the compute device. CPUs are", "selected precision containing random values in the range [0, 1)", "axis=None, keepdims=False): raise NotImplementedError(self) def all(self, boolean_tensor, axis=None, keepdims=False): raise", "double \"\"\" _PRECISION.append(floating_point_bits) try: yield None finally: _PRECISION.pop(-1) def convert(tensor,", "Standard conjugate gradient algorithm. Signature matches to `Backend.linear_solve()`. \"\"\" #", "Multiply-sum-reduce a_axes of a with b_axes of b. \"\"\" raise", "NotImplementedError(self) def to_float(self, x): \"\"\" Converts a tensor to floating", "f(*args) def block_until_ready(self, values): pass def jit_compile(self, f: Callable) ->", "trajectory if trj else SolveResult(method, x, residual, iterations, function_evaluations, converged,", "backend context, returns `None`. Returns: `Backend` or `None` \"\"\" return", "'CG': return self.conjugate_gradient(lin, y, x0, rtol, atol, max_iter, trj) elif", "dtype: DType): raise NotImplementedError(self) def to_float(self, x): \"\"\" Converts a", "def min(self, x, axis=None, keepdims=False): raise NotImplementedError(self) def maximum(self, a,", "int, description: str, ref=None): self.name: str = name \"\"\" Name", "as outer dimensions. Args: value: tensor multiples: tuple or list", "Tests if the value of the tensor is known and", "from threading import Thread assert self.supports(Backend.functional_gradient) assert len(self.staticshape(x0)) == 2", "in tensors] return tensors def __str__(self): return self.name def __repr__(self):", "1, f\"{self.name}: Cannot select '{device} because no device of this", "trajectories is not None: trajectories[b].append(SolveResult(method_description, x, loss, iterations[b], function_evaluations[b], False,", "import Thread assert self.supports(Backend.functional_gradient) assert len(self.staticshape(x0)) == 2 # (batch,", "\"An Introduction to the Conjugate Gradient Method Without the Agonizing", "self.all(residual_squared <= tolerance_sq, axis=(1,)) trajectory = [SolveResult(method, x, residual, iterations,", "def ceil(self, x): raise NotImplementedError(self) def floor(self, x): raise NotImplementedError(self)", "base, exp = self.auto_cast(base, exp) return base ** exp def", "`None`. Returns: the selected `Backend` \"\"\" # --- Default Backend", "in range(tensor.shape[axis]): if keepdims: component = tensor[tuple([slice(slice_idx, slice_idx + 1)", "str, ref=None): self.name: str = name \"\"\" Name of the", "(Optional) Reference to the internal device representation. \"\"\" self.backend: 'Backend'", "with entries equal to `values` in shape and data type.", "def b_fun(x: numpy.ndarray): function_evaluations[b] += 1 f_inputs[b] = self.as_tensor(x, convert_external=True)", "# --- Native tensors? --- for backend in backends: if", "list, grad_y) -> tuple: raise NotImplementedError(self) def record_gradients(self, xs: tuple", "backend accept it as a tensor argument. Args: x: object", "False, False, \"\")) res = minimize(fun=b_fun, x0=x0[b], jac=True, method=method, tol=atol[b],", "self.copy(x) iterations = self.copy(iterations) continue_ = ~converged & ~diverged &", "single, 64 for double \"\"\" return _PRECISION[-1] @contextmanager def precision(floating_point_bits:", "only devices of this type, e.g. `'GPU'` or `'CPU'`. See", "else returns `None`. Returns: the selected `Backend` \"\"\" # ---", "convergence residual_squared_old = residual_squared residual_squared = self.sum(residual ** 2, -1,", "else \"processors: n/a\" descr = self.description.replace('\\n', ' ') if len(descr)", "required by backend methods. An object is considered a tensor", "tensor or NotImplemented \"\"\" raise NotImplementedError(self) def reshape(self, value, shape):", "y): \"\"\" Computes x/y but returns 0 if y=0. Args:", "range(self.ndims(value))) return value[slices] def sum(self, value, axis=None, keepdims=False): raise NotImplementedError(self)", "b_axes of b. \"\"\" raise NotImplementedError(self) def matmul(self, A, b):", "number of dimensions mask: 1D mask tensor axis: Axis index", "for b in range(batch_size)] trajectories = [t[:-1] + [last_point] *", "equation, *tensors): raise NotImplementedError(self) def while_loop(self, loop: Callable, values: tuple):", "+ 1) for t, last_point in zip(trajectories, last_points)] trajectory =", "f, wrt: tuple or list, get_output: bool): raise NotImplementedError() def", "d == axis else slice(None) for d in range(len(tensor.shape))])] result.append(component)", "int): \"\"\" Sets the floating point precision for the local", "def _is_applicable(backend, values): for value in values: if not backend.is_tensor(value,", "raise NotImplementedError(self) def custom_gradient(self, f: Callable, gradient: Callable) -> Callable:", "and register it by adding it to `BACKENDS`. Args: name:", "the backend precision but leave non-float tensors untouched, use `Backend.as_tensor()`.", "it by adding it to `BACKENDS`. Args: name: Human-readable string", "Float tensor of selected precision containing random values in the", "devices = self.list_devices(device) assert len(devices) >= 1, f\"{self.name}: Cannot select", "dx); function_evaluations += not_finished_1 dx_dy = self.sum(dx * dy, axis=-1,", "# Based on \"An Introduction to the Conjugate Gradient Method", "self.auto_cast(a, b) return a * b def div(self, numerator, denominator):", "or_(self, a, b): a, b = self.auto_cast(a, b) return a", "global precision in bits, [1:] from 'with' blocks def choose_backend(*values,", "`Backend` to set as default \"\"\" assert isinstance(backend, Backend) _DEFAULT[0]", "this backend, it is returned without modification. If x is", "each axis the number of times given by multiples. If", "or None: \"\"\" Returns the backend set by the inner-most", "A(x), must be called on all instances in parallel y:", "Gradient Method Without the Agonizing Pain\" by <NAME> # symbols:", "Whether to record and return the optimization trajectory as a", "isinstance(feature, str) else feature.__name__ if not hasattr(Backend, feature): raise ValueError(f\"Not", "self.cast(x, self.float_type) def to_int32(self, x): return self.cast(x, DType(int, 32)) def", "spatial..., channels) indices: Tensor of shape (batch_size or 1, update_count,", "else: residual = residual - step_size * dy # in-place", "return self._default_device def set_default_device(self, device: ComputeDevice or str): if isinstance(device,", "f, wrt: tuple or list, get_output: bool): raise NotImplementedError(self) def", "floordiv(self, a, b): a, b = self.auto_cast(a, b) return a", "values: param shape: shape: Returns: \"\"\" raise NotImplementedError(self) def coordinates(self,", "boolean_tensor, axis=None, keepdims=False): raise NotImplementedError(self) def fft(self, x): \"\"\" Computes", "raise NotImplementedError(self) def range(self, start, limit=None, delta=1, dtype: DType =", "= [SolveResult(method_description, xs[b], final_losses[b], iterations[b], function_evaluations[b], converged[b], diverged[b], \"\") for", "len(t) + 1) for t, last_point in zip(trajectories, last_points)] trajectory", "b = self.auto_cast(a, b) return a + b def sub(self,", "residual, iterations, function_evaluations, converged, diverged, messages) def linear_solve(self, method: str,", "Will be called as `gradient(*d_out)` to compute the gradient of", "int): batches = [batches] return tensor[batches, ...] def unstack(self, tensor,", "2)} MB\" if self.memory > 0 else \"memory: n/a\" pro", "by a backend if it is not a struct (e.g.", "return tensor[batches, ...] def unstack(self, tensor, axis=0, keepdims=False) -> tuple:", "f: Callable, gradient: Callable) -> Callable: \"\"\" Creates a function", "a compute library or emulate them. The methods of `Backend`", "res = minimize(fun=b_fun, x0=x0[b], jac=True, method=method, tol=atol[b], options={'maxiter': max_iter[b]}, callback=callback)", "32)) residual_squared = rsq0 = self.sum(residual ** 2, -1, keepdims=True)", "internal calls with the profiler. Usage: choose_backend(key).call(custom_function, *args) \"\"\" return", "instances * tuple/list of sparse/dense matrices for varying matrices along", "the result. This method may be used to register internal", "L-BFGS-B only passes x but the documentation says (x, state)", "axes): raise NotImplementedError() def random_uniform(self, shape): \"\"\" Float tensor of", "'reflect' constant_values: used for out-of-bounds points if mode='constant' (Default value", "name: str, device_type: str, memory: int, processor_count: int, description: str,", "(batch,) trj: Whether to record and return the optimization trajectory", "if use_dlpack and current_backend.supports(Backend.to_dlpack) and backend.supports(Backend.from_dlpack): capsule = current_backend.to_dlpack(tensor) return", "% divisor def and_(self, a, b): a, b = self.auto_cast(a,", "feature: str or Callable) -> bool: \"\"\" Tests if this", "the feature is supported. \"\"\" feature = feature if isinstance(feature,", "of values padded to the edges of each axis in", "be either equal for both or one for either. Args:", "native tensor by a backend if no internal conversion is", "`constant_values`. If the mode is not supported, returns NotImplemented. Args:", "\"\"\" Converts a tensor to floating point values with precision", "need not provide a gradient for the operation. Args: method:", "__repr__(self): return self.name def list_devices(self, device_type: str or None =", "pow(self, base, exp): base, exp = self.auto_cast(base, exp) return base", "= [None] * batch_size final_losses = [None] * batch_size converged", "hasattr(Backend, feature): raise ValueError(f\"Not a valid feature: '{feature}'\") backend_fun =", "None f_b_losses_np = None f_grad_np = None f_input_available = Barrier(batch_size", "outside a backend context, returns `None`. Returns: `Backend` or `None`", "boolean_mask(self, x, mask, axis=0): \"\"\" Args: x: tensor with any", "+= step_size * dx if it_counter % 50 == 0:", "y def greater_or_equal(self, x, y): x, y = self.auto_cast(x, y)", "feature: '{feature}'\") backend_fun = getattr(Backend, feature) impl_fun = getattr(self.__class__, feature)", "-> Callable: \"\"\" Creates a function based on `f` that", "xs = [None] * batch_size final_losses = [None] * batch_size", "backend in BACKENDS if _is_applicable(backend, values)] if len(backends) == 0:", "NotImplemented def jit_compile_grad(self, f, wrt: tuple or list, get_output: bool):", "def to_float(self, x): \"\"\" Converts a tensor to floating point", "max_trajectory_length = max([len(t) for t in trajectories]) last_points = [SolveResult(method_description,", "BACKENDS: if self.name in backend.name: return backend raise RuntimeError(f\"Backend '{self}'", "all but the first and last dimensions. Args: k: tensor", "converged | diverged | (iterations >= max_iter); not_finished_1 = self.to_int32(~finished)", "default backend is preferred by `choose_backend()`. The default backend can", "self.auto_cast(numerator, denominator) return numerator / denominator def pow(self, base, exp):", "numpy(self, tensor) -> numpy.ndarray: \"\"\" Returns a NumPy representation of", "sqrt(self, x): raise NotImplementedError(self) def exp(self, x): raise NotImplementedError(self) def", "else: x = self.stack(xs) residual = self.stack(final_losses) return SolveResult(method_description, x,", "is None, new tensors will default to float32 unless specified", "If the target backend can operate natively on `tensor`, returns", "raise NotImplementedError() def get_default_device(self) -> ComputeDevice: return self._default_device def set_default_device(self,", "__init__(self, msg): Exception.__init__(self, msg) def default_backend() -> Backend: \"\"\" The", "= backend \"\"\" Backend that this device belongs to. Different", "f_b_losses[b] if trajectories is not None: trajectories[b].append(SolveResult(method_description, x0[b], f_b_losses[b], 0,", "~self.all(finished): it_counter += 1; iterations += not_finished_1 dy = self.linear(lin,", "convolution is either 1D (rank=3), 2D (rank=4) or 3D (rank=5).", "of shape (batch_size or 1, update_count, index_vector) values: Values to", "backend \"\"\" raise NotImplementedError() def as_tensor(self, x, convert_external=True): \"\"\" Converts", "1 else None def set_global_default_backend(backend: Backend): \"\"\" Sets the given", "= True f_output_available.wait() break _, f_b_losses, f_grad = fg(self.stack(f_inputs)) f_b_losses_np", "== axis else slice(None) for d in range(len(tensor.shape))])] else: component", "== axis else slice(None) for d in range(len(tensor.shape))])] result.append(component) return", "can operate natively on `tensor`, returns `tensor`. If both backends", "from operations involving the tensors as input. This method is", "\"\")) return f_b_losses_np[b], f_grad_np[b] def callback(x, *args): # L-BFGS-B only", "a, b = self.auto_cast(a, b) return a - b def", "method. Args: x: tensor-like, e.g. list, tuple, Python number, tensor", "upper], ...] including batch and component axes. mode: constant', 'boundary',", "\"\"\" Backend that this device belongs to. Different backends represent", "* dy # in-place subtraction affects convergence residual_squared = self.sum(residual", "type, returns a copy of `x`. To convert float tensors", "+= len(tensor.shape) if axis >= len(tensor.shape) or axis < 0:", "Returns: `list` of all currently available devices. \"\"\" raise NotImplementedError()", "dx if it_counter % 50 == 0: residual = y", "dx = residual + self.divide_no_nan(residual_squared, residual_squared_old) * dx diverged =", "dy = self.linear(lin, dx) iterations = self.zeros([batch_size], DType(int, 32)) function_evaluations", "are {BACKENDS}\") # --- Native tensors? --- for backend in", "processor_count \"\"\" Number of CPU cores or GPU multiprocessors. -1", "it_counter = 0 iterations = self.zeros([batch_size], DType(int, 32)) function_evaluations =", "the backend set by the inner-most surrounding `with backend:` block.", "NotImplementedError(self) def log(self, x): \"\"\" Natural logarithm \"\"\" raise NotImplementedError(self)", "return DType(complex, max(64, self.precision)) def combine_types(self, *dtypes: DType) -> DType:", "+= 1 # else: residual = residual - step_size *", "raise NotImplementedError() def is_available(self, tensor) -> bool: \"\"\" Tests if", "msg): Exception.__init__(self, msg) def default_backend() -> Backend: \"\"\" The default", "if trajectories is not None: trajectories[b].append(SolveResult(method_description, x0[b], f_b_losses[b], 0, 1,", "\"\"\" Sets the floating point precision of DYNAMIC_BACKEND which affects", "= choose_backend(tensor, prefer_default=False) if backend.is_tensor(tensor, True) or backend is current_backend:", "threads = [] for b in range(batch_size): def b_thread(b=b): recent_b_losses", "raise NotImplementedError(self) def where(self, condition, x=None, y=None): raise NotImplementedError(self) def", "zero_padding: If True, pads the edges of `value` with zeros", "Returns: Values of `x` as float tensor \"\"\" return self.cast(x,", "indices. Tensor of shape (batch_size or 1, update_count or 1,", "phi.math.backend import BACKENDS for backend in BACKENDS: if self.name in", "`'GPU'` or `'TPU'`. \"\"\" self.memory: int = memory \"\"\" Maximum", "common data type Returns: tensors cast to a common data", "multi_index) where the size of multi_index is values.rank - 2.", "import Barrier from typing import List, Callable import numpy from", "arguments. \"\"\" return NotImplemented def jit_compile_grad(self, f, wrt: tuple or", "\"\"\" Sets the given backend as default. This setting can", "Backend choice utility functions def _is_applicable(backend, values): for value in", "tensor rank, the convolution is either 1D (rank=3), 2D (rank=4)", "self.numpy(f_b_losses).astype(numpy.float64) f_grad_np = self.numpy(f_grad).astype(numpy.float64) f_output_available.wait() for b_thread in threads: b_thread.join()", "-> bool: raise NotImplementedError() @property def precision(self) -> int: \"\"\"", "component = tensor[tuple([slice_idx if d == axis else slice(None) for", "backends support *DLPack* and `use_dlpack=True`, uses zero-copy conversion using the", "The default backend is preferred by `choose_backend()`. The default backend", "math functions operating on `Tensor` objects to delegate the actual", "NoBackendFound(Exception): \"\"\" Thrown by `choose_backend` if no backend can handle", "def complex_type(self) -> DType: return DType(complex, max(64, self.precision)) def combine_types(self,", "Tensor into which scatter values are inserted at indices. Tensor", "sub(self, a, b): a, b = self.auto_cast(a, b) return a", "states] converged = [state.converged for state in states] diverged =", "NumPy representation of the given tensor. If `tensor` is already", "indices: Tensor holding the corresponding values \"\"\" raise NotImplementedError(self) def", "Args: device_type: (optional) Return only devices of this type, e.g.", "diverged[b] = res.status not in (0, 1) # 0=success messages[b]", "None): raise NotImplementedError(self) def ones_like(self, tensor): raise NotImplementedError(self) def meshgrid(self,", "the same shape as `value`. Returns: Convolution result as tensor", "of A * x. 2nd order tensor (batch, vector) or", "None finished = converged | diverged | (iterations >= max_iter);", "spatial_dims: tuple, coordinates, extrapolation='constant'): \"\"\" Interpolates a regular grid at", "raise NotImplementedError(self) def reshape(self, value, shape): raise NotImplementedError(self) def flip(self,", "Initial guess of size (batch, parameters) rtol: Relative tolerance of", "along all but the first and last dimensions. Args: x:", "tuple or list of integers Returns: tile tensor \"\"\" raise", "and current_backend.supports(Backend.to_dlpack) and backend.supports(Backend.from_dlpack): capsule = current_backend.to_dlpack(tensor) return backend.from_dlpack(capsule) else:", "Backend method may convert floating point values to this precision,", "coordinates: Tensor of floating grid indices. The last dimension must", "be selected to perform backend computations. \"\"\" def __init__(self, backend:", "'function_evaluations', 'converged', 'diverged', 'message', ]) class ComputeDevice: \"\"\" A physical", "read at this point. If true, `numpy(tensor)` must return a", "True, pads the edges of `value` with zeros so that", "= None f_grad_np = None f_input_available = Barrier(batch_size + 1)", "batch, must have the same nonzero locations. * linear function", "of `f`. Returns: Function with similar signature and return values", "`'TPU'`. \"\"\" self.memory: int = memory \"\"\" Maximum memory of", "not Python numbers or others that are also supported as", "\"\"\" Args: values: Tensor with only spatial dimensions Returns: non-zero", "backend.is_tensor(value, only_native=True): return True return False # Other low-level helper", "to_int32(self, x): return self.cast(x, DType(int, 32)) def to_int64(self, x): return", "values in the range [0, 1) \"\"\" raise NotImplementedError(self) def", "max_iter, trj: bool) -> SolveResult or List[SolveResult]: \"\"\" Solve the", "If both backends support *DLPack* and `use_dlpack=True`, uses zero-copy conversion", "from 'with' blocks def choose_backend(*values, prefer_default=False) -> Backend: \"\"\" Selects", "** 2, -1), atol ** 2) x = x0 dx", "--- Filter out non-applicable --- backends = [backend for backend", "NotImplementedError(self) def divide_no_nan(self, x, y): \"\"\" Computes x/y but returns", "uses a custom gradient for backprop. Args: f: Forward function.", "`convert_numbers` decides whether to convert it unless the backend cannot", "Usage: choose_backend(key).call(custom_function, *args) \"\"\" return f(*args) def block_until_ready(self, values): pass", "final_losses[b] = f_b_losses[b] if trajectories is not None: trajectories[b].append(SolveResult(method_description, x0[b],", "may be used to register internal calls with the profiler.", "Based on the variant described in \"Methods of Conjugate Gradients", "tensor Returns: NumPy representation of the values stored in the", "max_iter, trj) else: raise NotImplementedError(f\"Method '{method}' not supported for linear", "[t[:-1] + [last_point] * (max_trajectory_length - len(t) + 1) for", "rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]: \"\"\"", "keepdims=False): raise NotImplementedError(self) def maximum(self, a, b): raise NotImplementedError(self) def", "tensor of shape (batch_size, in_channel, spatial...) kernel: tensor of shape", "axis=0, number=1): raise NotImplementedError(self) def shape(self, tensor): raise NotImplementedError(self) def", "is an integer, all floating point tensors created henceforth will", "def to_int32(self, x): return self.cast(x, DType(int, 32)) def to_int64(self, x):", "*dtypes: DType) -> DType: return combine_types(*dtypes, fp_precision=self.precision) def auto_cast(self, *tensors)", "but the first and last dimensions. Args: x: tensor of", "kernel: tensor of shape (batch_size or 1, out_channel, in_channel, spatial...)", "for linear solve.\") def conjugate_gradient(self, lin, y, x0, rtol, atol,", "for b_thread in threads: b_thread.join() # make sure threads exit", "`choose_backend` if no backend can handle the given values. \"\"\"", "-> int: \"\"\" Gets the current target floating point precision", "converged = self.all(residual_squared <= tolerance_sq, axis=(1,)) if trajectory is not", "range(len(tensor.shape))])] result.append(component) return tuple(result) def equal(self, x, y): \"\"\" Element-wise", "(batch, parameters) batch_size = self.staticshape(x0)[0] fg = self.functional_gradient(f, [0], get_output=True)", "and returns the result. This method may be used to", "keepdims=False): raise NotImplementedError(self) def range(self, start, limit=None, delta=1, dtype: DType", "loop(*values) return values ``` This operation does not support backpropagation.", "+ \"...\" return f\"'{self.name}' ({self.device_type}) | {mem} | {pro} |", "unless the backend cannot handle Python numbers. *Note:* There may", "is not None: trajectories[b].append(SolveResult(method_description, x, loss, iterations[b], function_evaluations[b], False, False,", "if trj else None continue_ = ~converged & ~diverged &", "converged, diverged)) return trajectory if trj else SolveResult(method, x, residual,", "= descr[:28] + \"...\" return f\"'{self.name}' ({self.device_type}) | {mem} |", "integers Returns: tile tensor \"\"\" raise NotImplementedError(self) def sparse_tensor(self, indices,", "converged, diverged, \"\") def conjugate_gradient_adaptive(self, lin, y, x0, rtol, atol,", "-> DType: raise NotImplementedError(self) def tile(self, value, multiples): \"\"\" Repeats", "List[SolveResult]: \"\"\" Solve the system of linear equations A ·", "= self.staticshape(y)[0] tolerance_sq = self.maximum(rtol ** 2 * self.sum(y **", "tensor of shape (batch, any..., multi_index) where the size of", "with b_axes of b. \"\"\" raise NotImplementedError(self) def matmul(self, A,", "int or float Returns: Values of `x` as float tensor", "max_iter) return continue_, it_counter, x, dx, dy, residual, iterations, function_evaluations,", "def to_dlpack(self, tensor): raise NotImplementedError() def from_dlpack(self, capsule): raise NotImplementedError()", "axis=(1,)) & (iterations >= 8) converged = self.all(residual_squared <= tolerance_sq,", "not really necessary but ensures batch-independence x += step_size *", "if it_counter % 50 == 0: residual = y -", "equal to `values` in shape and data type. values: Initial", "raise NotImplementedError(self) def clip(self, x, minimum, maximum): raise NotImplementedError(self) def", "# if it_counter % 50 == 0: # Not traceable", "that are also supported as tensors (Default value = False)", "that can be selected to perform backend computations. \"\"\" def", "scatter at indices. Tensor of shape (batch_size or 1, update_count", "= tensor[tuple([slice(slice_idx, slice_idx + 1) if d == axis else", "MB\" if self.memory > 0 else \"memory: n/a\" pro =", "floating grid indices. The last dimension must match `spatial_dims`. The", "current_backend.supports(Backend.to_dlpack) and backend.supports(Backend.from_dlpack): capsule = current_backend.to_dlpack(tensor) return backend.from_dlpack(capsule) else: nparray", "cannot handle Python numbers. *Note:* There may be objects that", "if dim1 is None or dim1 == 1: return dim2", "the gradient of `f`. Returns: Function with similar signature and", "a struct (e.g. tuple, list) and all methods of the", "Backend) _DEFAULT[0] = backend def set_global_precision(floating_point_bits: int): \"\"\" Sets the", "NotImplementedError(self) def exp(self, x): raise NotImplementedError(self) def conv(self, value, kernel,", "raise NotImplementedError(self) def fft(self, x): \"\"\" Computes the n-dimensional FFT", "str): devices = self.list_devices(device) assert len(devices) >= 1, f\"{self.name}: Cannot", "len(self.staticshape(x0)) == 2 # (batch, parameters) batch_size = self.staticshape(x0)[0] fg", "def list_devices(self, device_type: str or None = None) -> List[ComputeDevice]:", "Optional features. Args: indices: tuple/list matching the dimensions (pair for", "matching the dimensions (pair for matrix) values: param shape: shape:", "try: yield None finally: _PRECISION.pop(-1) def convert(tensor, backend: Backend =", "for all instances * tuple/list of sparse/dense matrices for varying", "both or one for either. Args: values: tensor of shape", "values, axis): raise NotImplementedError(self) def pad(self, value, pad_width, mode: str", "An object is considered a tensor (nativer or otherwise) by", "backend: `Backend` to set as default \"\"\" assert isinstance(backend, Backend)", "b) return a | b def xor(self, a, b): a,", "Native tensors? --- for backend in backends: if _is_specific(backend, values):", "with the profiler. Usage: choose_backend(key).call(custom_function, *args) \"\"\" return f(*args) def", "residual_squared_old) * dx diverged = self.any(residual_squared / rsq0 > 100,", "that this device belongs to. Different backends represent the same", "list, tuple, Python number, tensor convert_external: if False and `x`", "all instances in parallel y: target result of A *", "exp): base, exp = self.auto_cast(base, exp) return base ** exp", "suitable backend to handle the given values. This function is", "def scatter(self, base_grid, indices, values, mode: str): \"\"\" Depending on", "(iterations >= max_iter); not_finished_1 = self.to_int32(~finished) # ; active =", "at this point, e.g. because it represents a node in", "of shape (nnz, vector) \"\"\" raise NotImplementedError(self) def mean(self, value,", "a, a_axes: tuple or list, b, b_axes: tuple or list):", "feature. Features correspond to a method of this backend that", "as default \"\"\" assert isinstance(backend, Backend) _DEFAULT[0] = backend def", "values, shape): \"\"\" Optional features. Args: indices: tuple/list matching the", "trajectories = [[] for _ in range(batch_size)] if trj else", "of Conjugate Gradients for Solving Linear Systems\" by <NAME> and", "== dim2, f\"Incompatible {type_str} dimensions: x0 {dim1}, y {dim2}\" return", "* (max_trajectory_length - len(t) + 1) for t, last_point in", "\"\"\" Tests if this backend supports the given feature. Features", "not be supported. Args: value: tensor of shape (batch_size, in_channel,", "are also supported as tensors (Default value = False) Returns:", "the inner-most surrounding `with backend:` block. If called outside a", "= self.auto_cast(a, b) return a | b def xor(self, a,", "raise NotImplementedError(self) def zeros(self, shape, dtype: DType = None): raise", "self.to_float(self.expand_dims(not_finished_1, -1)) while ~self.all(finished): it_counter += 1; iterations += not_finished_1", "gradients(self, y, xs: tuple or list, grad_y) -> tuple: raise", "tensor of this backend, it is returned without modification. If", "or list, get_output: bool): raise NotImplementedError() def transpose(self, tensor, axes):", "convert_external=True) f_input_available.wait() f_output_available.wait() recent_b_losses.append(f_b_losses[b]) if final_losses[b] is None: # first", "raise NotImplementedError(self) def real(self, x): raise NotImplementedError(self) def sin(self, x):", "self.while_loop(loop, (continue_, 0, x, dx, dy, residual, iterations, function_evaluations, converged,", "x, y): x, y = self.auto_cast(x, y) return x >=", "2, -1, keepdims=True) diverged = self.any(~self.isfinite(x), axis=(1,)) converged = self.all(residual_squared", "for double \"\"\" _PRECISION.append(floating_point_bits) try: yield None finally: _PRECISION.pop(-1) def", "def variable(self, value): return NotImplemented def ndims(self, tensor): return len(self.staticshape(tensor))", "gradient for backprop. Args: f: Forward function. gradient: Function for", "Python numbers or others that are also supported as tensors", "not None: trajectories[b].append(SolveResult(method_description, x, loss, iterations[b], function_evaluations[b], False, False, \"\"))", "active = self.to_float(self.expand_dims(not_finished_1, -1)) while ~self.all(finished): it_counter += 1; iterations", "operations. To support a compute library, subclass `Backend` and register", "@property def float_type(self) -> DType: return DType(float, self.precision) @property def", "string default_device: `ComputeDevice` being used by default \"\"\" self._name =", "is already a NumPy array, it is returned without modification.", "* batch_size converged = [False] * batch_size diverged = [False]", "NotImplementedError(self) def min(self, x, axis=None, keepdims=False): raise NotImplementedError(self) def maximum(self,", "linear solve.\") def conjugate_gradient(self, lin, y, x0, rtol, atol, max_iter,", "return a // b BACKENDS = [] \"\"\" Global list", "| {pro} | {descr}\" class Backend: def __init__(self, name: str,", "this backend supports the given feature. Features correspond to a", "raise NotImplementedError(self) def einsum(self, equation, *tensors): raise NotImplementedError(self) def while_loop(self,", "all available compute devices this backend can use. Implementations: *", "raise NotImplementedError() def as_tensor(self, x, convert_external=True): \"\"\" Converts a tensor-like", "- step_size * dy # in-place subtraction affects convergence residual_squared_old", "= residual_squared residual_squared = self.sum(residual ** 2, -1, keepdims=True) dx", "** 2, -1, keepdims=True) dx = residual + self.divide_no_nan(residual_squared, residual_squared_old)", "to the list. \"\"\" _DEFAULT = [] # [0] =", "+ self.divide_no_nan(residual_squared, residual_squared_old) * dx diverged = self.any(residual_squared / rsq0", "axis=(1,)) converged = self.all(residual_squared <= tolerance_sq, axis=(1,)) trajectory = [SolveResult(method,", "raise NotImplementedError(self) def any(self, boolean_tensor, axis=None, keepdims=False): raise NotImplementedError(self) def", "tolerance_sq, axis=(1,)) trajectory = [SolveResult(method, x, residual, iterations, function_evaluations, converged,", "double \"\"\" return _PRECISION[-1] @contextmanager def precision(floating_point_bits: int): \"\"\" Sets", "Conjugate Gradients for Solving Linear Systems\" by <NAME> and <NAME>", "NotImplementedError(self) def max(self, x, axis=None, keepdims=False): raise NotImplementedError(self) def min(self,", "NotImplementedError(self) def sqrt(self, x): raise NotImplementedError(self) def exp(self, x): raise", "assert len(devices) >= 1, f\"{self.name}: Cannot select '{device} because no", "for d in range(len(tensor.shape))])] result.append(component) return tuple(result) def equal(self, x,", "def jit_compile(self, f: Callable) -> Callable: return NotImplemented def functional_gradient(self,", "returns `None`. Returns: the selected `Backend` \"\"\" # --- Default", "device belongs to. Different backends represent the same device with", "backprop. Args: f: Forward function. gradient: Function for backprop. Will", "'symmetric', 'reflect' constant_values: used for out-of-bounds points if mode='constant' (Default", "of `values` and `indices` is the batch dimension which must", "def add(self, a, b): a, b = self.auto_cast(a, b) return", "slice(None) for d in range(len(tensor.shape))])] result.append(component) return tuple(result) def equal(self,", "not known at this point, e.g. because it represents a", "n/a\" descr = self.description.replace('\\n', ' ') if len(descr) > 30:", "import List, Callable import numpy from ._dtype import DType, combine_types", "meshgrid(self, *coordinates): raise NotImplementedError(self) def linspace(self, start, stop, number): raise", "tensor \"\"\" return self.cast(x, self.float_type) def to_int32(self, x): return self.cast(x,", "[0] * batch_size xs = [None] * batch_size final_losses =", "seed: int): raise NotImplementedError() def is_tensor(self, x, only_native=False): \"\"\" An", "adding it to `BACKENDS`. Args: name: Human-readable string default_device: `ComputeDevice`", "BACKENDS if _is_applicable(backend, values)] if len(backends) == 0: raise NoBackendFound(f\"No", "objects. \"\"\" def __repr__(self): mem = f\"{(self.memory / 1024 **", "def precision(self) -> int: \"\"\" Short for math.backend.get_precision() \"\"\" return", "ndims(self, tensor): return len(self.staticshape(tensor)) def size(self, array): return self.prod(self.shape(array)) def", "overridden using `with backend:`. See `default_backend()`, `choose_backend()`. Args: backend: `Backend`", "list of vectors. x0: Initial guess of size (batch, parameters)", "If `floating_point_bits` is an integer, all floating point tensors created", "NotImplementedError(self) def tile(self, value, multiples): \"\"\" Repeats the tensor along", "channels) indices: Tensor of shape (batch_size or 1, update_count, index_vector)", "combine_types(self, *dtypes: DType) -> DType: return combine_types(*dtypes, fp_precision=self.precision) def auto_cast(self,", "def dtype(self, array) -> DType: raise NotImplementedError(self) def tile(self, value,", "*tensors) -> list: \"\"\" Determins the appropriate values type resulting", "of CPU cores or GPU multiprocessors. -1 for n/a. \"\"\"", "= None f_input_available = Barrier(batch_size + 1) f_output_available = Barrier(batch_size", "({self.device_type}) | {mem} | {pro} | {descr}\" class Backend: def", "\"\"\" Thrown by `choose_backend` if no backend can handle the", "can be allocated (in bytes). -1 for n/a. \"\"\" self.processor_count:", "axis=None, keepdims=False): raise NotImplementedError(self) def boolean_mask(self, x, mask, axis=0): \"\"\"", "b_thread in threads: b_thread.join() # make sure threads exit correctly", "else SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, \"\") def", "= self.copy(x) iterations = self.copy(iterations) continue_ = ~converged & ~diverged", "f_grad = fg(self.stack(f_inputs)) f_b_losses_np = self.numpy(f_b_losses).astype(numpy.float64) f_grad_np = self.numpy(f_grad).astype(numpy.float64) f_output_available.wait()", "NotImplementedError(self) def prod(self, value, axis=None): raise NotImplementedError(self) def divide_no_nan(self, x,", "for state in states] converged = [state.converged for state in", "if isinstance(batches, int): batches = [batches] return tensor[batches, ...] def", "_PRECISION = [32] # [0] = global precision in bits,", "f_output_available.wait() b_thread = Thread(target=b_thread) threads.append(b_thread) b_thread.start() while True: f_input_available.wait() if", "= f\"Φ-Flow CG ({self.name})\" y = self.to_float(y) x0 = self.copy(self.to_float(x0),", "`list` of all currently available devices. \"\"\" raise NotImplementedError() def", "slice(None) for d in range(len(tensor.shape))])] else: component = tensor[tuple([slice_idx if", "= [SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, \"\")] if", "rtol: Relative tolerance of size (batch,) atol: Absolute tolerance of", "specified otherwise. The output of math operations has the same", "and data type. values: Initial values of loop variables. Returns:", "point of dimension i lies at position 0, the last", "= min(recent_b_losses) recent_b_losses.clear() final_losses[b] = loss if trajectories is not", "depending on `trj`. \"\"\" if method == 'auto': return self.conjugate_gradient_adaptive(lin,", "linear equations A · x = y. This method need", "return backend raise RuntimeError(f\"Backend '{self}' is not visible.\") @property def", "`default_backend()`, `choose_backend()`. Args: backend: `Backend` to set as default \"\"\"", "in the form [[axis 0 lower, axis 0 upper], ...]", "= [state.iterations for state in states] function_evaluations = [state.function_evaluations for", "return backend.as_tensor(nparray) # Backend choice utility functions def _is_applicable(backend, values):", "False, False, \"\")) return f_b_losses_np[b], f_grad_np[b] def callback(x, *args): #", "Exception.__init__(self, msg) def default_backend() -> Backend: \"\"\" The default backend", "using the DLPack library. Else, intermediately converts `tensor` to a", "shape: shape: Returns: \"\"\" raise NotImplementedError(self) def coordinates(self, tensor): \"\"\"", "must return a valid NumPy representation of the value. Tensors", "NotImplementedError(self) def any(self, boolean_tensor, axis=None, keepdims=False): raise NotImplementedError(self) def all(self,", "assert self.supports(Backend.functional_gradient) assert len(self.staticshape(x0)) == 2 # (batch, parameters) batch_size", "Returns the backend set by the inner-most surrounding `with backend:`", "2) x = x0 dx = residual = y -", "context_backend() -> Backend or None: \"\"\" Returns the backend set", "memory of the device that can be allocated (in bytes).", "Returns: coordinates: `tuple` of tensor holding the coordinate vectors, i.e.", "x: tensor-like, e.g. list, tuple, Python number, tensor convert_external: if", "or list): slices = tuple(slice(None, None, -1 if i in", "NotImplementedError(self) def stop_gradient(self, value): raise NotImplementedError(self) def grid_sample(self, grid, spatial_dims:", "to handle the given values. This function is used by", "inputs. Args: floating_point_bits: one of (16, 32, 64, None) \"\"\"", "def _is_specific(backend, values): for value in values: if backend.is_tensor(value, only_native=True):", "str, lin, y, x0, rtol, atol, max_iter, trj: bool) ->", "= f\"{self.processor_count} processors\" if self.processor_count > 0 else \"processors: n/a\"", "computations. Args: *values: prefer_default: if True, selects the default backend", "the default backend assuming it can handle handle the values,", "else slice(None) for d in range(len(tensor.shape))])] else: component = tensor[tuple([slice_idx", "devices this backend can use. Implementations: * NumPy: [`os.cpu_count`](https://docs.python.org/3/library/os.html#os.cpu_count) *", "and of the correct floating type, returns a copy of", "as tensor of shape (batch_size, out_channel, spatial...) \"\"\" raise NotImplementedError(self)", "{lin_shape}\" return self.matmul(lin, vector) def gradients(self, y, xs: tuple or", "objects that are considered tensors by this backend but are", "Args: k: tensor of dimension 3 or higher Returns: \"\"\"", "b_thread(b=b): recent_b_losses = [] def b_fun(x: numpy.ndarray): function_evaluations[b] += 1", "Args: indices: tuple/list matching the dimensions (pair for matrix) values:", "f_grad_np[b] def callback(x, *args): # L-BFGS-B only passes x but", "self.stack(xs) residual = self.stack(final_losses) return SolveResult(method_description, x, residual, iterations, function_evaluations,", "values to this precision, even if the input had a", "def callback(x, *args): # L-BFGS-B only passes x but the", "-> int: \"\"\" Short for math.backend.get_precision() \"\"\" return get_precision() @property", "to_complex(self, x): return self.cast(x, DType(complex, max(64, min(self.precision * 2, 128))))", "evaluation final_losses[b] = f_b_losses[b] if trajectories is not None: trajectories[b].append(SolveResult(method_description,", "true, `numpy(tensor)` must return a valid NumPy representation of the", "any..., channel) \"\"\" raise NotImplementedError(self) def flatten(self, x): return self.reshape(x,", "to. Different backends represent the same device with different objects.", "tensor): raise NotImplementedError(self) def meshgrid(self, *coordinates): raise NotImplementedError(self) def linspace(self,", "max_iter, trj: bool) -> SolveResult or List[SolveResult]: \"\"\" Standard conjugate", "50 == 0: # Not traceable since Python bool #", "{BACKENDS}\") # --- Native tensors? --- for backend in backends:", "See `default_backend()`, `choose_backend()`. Args: backend: `Backend` to set as default", "* dx # if it_counter % 50 == 0: #", "had a different precision. If `floating_point_bits` is None, new tensors", "can override this method to prevent unnecessary casting. Args: *tensors:", "def functional_gradient(self, f, wrt: tuple or list, get_output: bool): raise", "x): raise NotImplementedError(self) def log(self, x): \"\"\" Natural logarithm \"\"\"", "+= 1 iterations += continue_1 dx_dy = self.sum(dx * dy,", "return get_precision() @property def float_type(self) -> DType: return DType(float, self.precision)", "linspace(self, start, stop, number): raise NotImplementedError(self) def tensordot(self, a, a_axes:", "`Backend` \"\"\" # --- Default Backend has priority --- if", "a & b def or_(self, a, b): a, b =", "def mean(self, value, axis=None, keepdims=False): raise NotImplementedError(self) def range(self, start,", "minimize(fun=b_fun, x0=x0[b], jac=True, method=method, tol=atol[b], options={'maxiter': max_iter[b]}, callback=callback) assert isinstance(res,", "'residual', 'iterations', 'function_evaluations', 'converged', 'diverged', 'message', ]) class ComputeDevice: \"\"\"", "TensorFlow: `tensorflow.python.client.device_lib.list_local_devices` * Jax: [`jax.devices`](https://jax.readthedocs.io/en/latest/jax.html#jax.devices) Args: device_type: (optional) Return only", "= self.staticshape(x0)[0] fg = self.functional_gradient(f, [0], get_output=True) method_description = f\"SciPy", "min(self.precision * 2, 128)))) def batched_gather_nd(self, values, indices): \"\"\" Gathers", "values): pass def jit_compile(self, f: Callable) -> Callable: return NotImplemented", "_, _, x, _, _, residual, iterations, function_evaluations, converged, diverged", "std 1. \"\"\" raise NotImplementedError(self) def stack(self, values, axis=0): raise", "by `values`. \"\"\" raise NotImplementedError(self) def any(self, boolean_tensor, axis=None, keepdims=False):", "the values, see `default_backend()`. raise_error: Determines the behavior of this", "as_tensor(self, x, convert_external=True): \"\"\" Converts a tensor-like object to the", "from typing import List, Callable import numpy from ._dtype import", "a graph. Use `is_available(tensor)` to check if the value can", "as a NumPy array. Args: tensor: backend-compatible tensor Returns: NumPy", "b in range(batch_size): def b_thread(b=b): recent_b_losses = [] def b_fun(x:", "class Backend: def __init__(self, name: str, default_device: ComputeDevice): \"\"\" Backends", "memory \"\"\" Maximum memory of the device that can be", "return impl_fun is not backend_fun def prefers_channels_last(self) -> bool: raise", "not visible.\") @property def complex_type(self) -> DType: return DType(complex, max(64,", "function does not support keyword arguments. \"\"\" return NotImplemented def", "self.device_type: str = device_type \"\"\" Type of device such as", "for lin_i in lin: lin_shape = self.staticshape(lin_i) assert len(lin_shape) ==", "--- backends = [backend for backend in BACKENDS if _is_applicable(backend,", "tensor: backend-compatible tensor Returns: bool \"\"\" raise NotImplementedError() def numpy(self,", "in range(self.ndims(value))) return value[slices] def sum(self, value, axis=None, keepdims=False): raise", "* `sparse_tensor` * `gradients Args: feature: `str` or unbound Backend", "the corresponding values \"\"\" raise NotImplementedError(self) def minimize(self, method: str,", "have the same nonzero locations. * linear function A(x), must", "NotImplementedError(self) def zeros(self, shape, dtype: DType = None): raise NotImplementedError(self)", "the backend operates in eager mode. Args: tensor: backend-compatible tensor", "states] diverged = [state.diverged for state in states] trajectory.append(SolveResult(method_description, x,", "x, convert_external=True): \"\"\" Converts a tensor-like object to the native", "value = 'constant') Returns: padded tensor or NotImplemented \"\"\" raise", "tensor of dimension 3 or higher Returns: \"\"\" raise NotImplementedError(self)", "is mutable and of the correct floating type, returns a", "mode: One of ('update', 'add') Returns: Copy of base_grid with", "f_output_available.wait() break _, f_b_losses, f_grad = fg(self.stack(f_inputs)) f_b_losses_np = self.numpy(f_b_losses).astype(numpy.float64)", "dx_dy = self.sum(dx * dy, axis=-1, keepdims=True) step_size = self.divide_no_nan(self.sum(dx", "return tuple(result) def equal(self, x, y): \"\"\" Element-wise equality check", "precision can be set globally using `set_global_precision()` or locally using", "Args: feature: `str` or unbound Backend method, e.g. `Backend.sparse_tensor` Returns:", "if no backend can handle the given values. \"\"\" def", "list): \"\"\" Multiply-sum-reduce a_axes of a with b_axes of b.", "a // b BACKENDS = [] \"\"\" Global list of", "the current target floating point precision in bits. The precision", "can be read at this point. If true, `numpy(tensor)` must", "tensor if use_dlpack and current_backend.supports(Backend.to_dlpack) and backend.supports(Backend.from_dlpack): capsule = current_backend.to_dlpack(tensor)", "class NoBackendFound(Exception): \"\"\" Thrown by `choose_backend` if no backend can", "Args: f: Forward function. gradient: Function for backprop. Will be", "transpose(self, tensor, axes): raise NotImplementedError() def random_uniform(self, shape): \"\"\" Float", "feature) return impl_fun is not backend_fun def prefers_channels_last(self) -> bool:", "all_finished = True f_output_available.wait() break _, f_b_losses, f_grad = fg(self.stack(f_inputs))", "t in tensors] result_type = self.combine_types(*dtypes) if result_type.kind in (int,", "bool): tensors = [self.cast(t, result_type) for t in tensors] return", "<filename>phi/math/backend/_backend.py from collections import namedtuple from contextlib import contextmanager from", "device with different objects. \"\"\" def __repr__(self): mem = f\"{(self.memory", "true native tensor representations, not Python numbers or others that", "be set globally using `set_global_default_backend()` and locally using `with backend:`.", "= res.message finished[b] = True while not all_finished: f_input_available.wait() f_output_available.wait()", "\"\"\" Calls `f(*args)` and returns the result. This method may", "`gradients Args: feature: `str` or unbound Backend method, e.g. `Backend.sparse_tensor`", "NumPy: [`os.cpu_count`](https://docs.python.org/3/library/os.html#os.cpu_count) * PyTorch: [`torch.cuda.get_device_properties`](https://pytorch.org/docs/stable/cuda.html#torch.cuda.get_device_properties) * TensorFlow: `tensorflow.python.client.device_lib.list_local_devices` * Jax:", "backend_fun = getattr(Backend, feature) impl_fun = getattr(self.__class__, feature) return impl_fun", "dimensions Returns: non-zero multi-indices as tensor of shape (nnz, vector)", "None: trajectories[b].append(SolveResult(method_description, x0[b], f_b_losses[b], 0, 1, False, False, \"\")) return", "y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or", "to this precision, even if the input had a different", "None or dim2 == 1: return dim1 assert dim1 ==", "NotImplementedError() def to_dlpack(self, tensor): raise NotImplementedError() def from_dlpack(self, capsule): raise", "backend def set_global_precision(floating_point_bits: int): \"\"\" Sets the floating point precision", "tensor with any number of dimensions mask: 1D mask tensor", "for backend in backends: if _is_specific(backend, values): return backend return", "can handle the given values. \"\"\" def __init__(self, msg): Exception.__init__(self,", "lin: Linear operation. One of * sparse/dense matrix valid for", "size. Signature matches to `Backend.linear_solve()`. \"\"\" # Based on the", "either 1D (rank=3), 2D (rank=4) or 3D (rank=5). Higher dimensions", "a node in a graph. Use `is_available(tensor)` to check if", "at values.shape[i]-1. extrapolation: Values to use for coordinates outside the", "a different precision. Returns: 16 for half, 32 for single,", "def stack(self, values, axis=0): raise NotImplementedError(self) def concat(self, values, axis):", "in_channel, spatial...) zero_padding: If True, pads the edges of `value`", "y, x0, rtol, atol, max_iter, trj) elif method == 'CG':", "all(finished): all_finished = True f_output_available.wait() break _, f_b_losses, f_grad =", "Returns: Tensor belonging to `backend`. \"\"\" backend = backend or", "b) return a // b BACKENDS = [] \"\"\" Global", "not a struct (e.g. tuple, list) and all methods of", "distribution with mean 0 and std 1. \"\"\" raise NotImplementedError(self)", "d in range(len(tensor.shape))])] else: component = tensor[tuple([slice_idx if d ==", "Returns: 16 for half, 32 for single, 64 for double", "conjugate_gradient(self, lin, y, x0, rtol, atol, max_iter, trj: bool) ->", "def numpy(self, tensor) -> numpy.ndarray: \"\"\" Returns a NumPy representation", "value, kernel, zero_padding=True): \"\"\" Convolve value with kernel. Depending on", "given by multiples. If `multiples` has more dimensions than `value`,", "Callable: return NotImplemented def functional_gradient(self, f, wrt: tuple or list,", "~diverged & (iterations < max_iter) def loop(continue_, it_counter, x, dx,", "NotImplementedError(self) def grid_sample(self, grid, spatial_dims: tuple, coordinates, extrapolation='constant'): \"\"\" Interpolates", "values: tensor of shape (batch, spatial..., channel) indices: int tensor", "True f_output_available.wait() break _, f_b_losses, f_grad = fg(self.stack(f_inputs)) f_b_losses_np =", "equality check \"\"\" raise NotImplementedError(self) def not_equal(self, x, y): return", "the coordinates and values of a tensor. Args: tensor: Sparse", "There may be objects that are considered tensors by this", "dim2 if dim2 is None or dim2 == 1: return", "or _is_specific(_DEFAULT[-1], values)): return _DEFAULT[-1] # --- Filter out non-applicable", "pass def jit_compile(self, f: Callable) -> Callable: return NotImplemented def", "dimension i lies at position 0, the last at values.shape[i]-1.", "list. \"\"\" _DEFAULT = [] # [0] = global default,", "def combine_types(self, *dtypes: DType) -> DType: return combine_types(*dtypes, fp_precision=self.precision) def", "tensordot(self, a, a_axes: tuple or list, b, b_axes: tuple or", "Depending on `mode`, performs scatter_update or scatter_add. Args: base_grid: Tensor", "Selects a suitable backend to handle the given values. This", "trajectories]) last_points = [SolveResult(method_description, xs[b], final_losses[b], iterations[b], function_evaluations[b], converged[b], diverged[b],", "CG-adaptive ({self.name})\" y = self.to_float(y) x0 = self.copy(self.to_float(x0), only_mutable=True) batch_size", "dimensions are added to `value` as outer dimensions. Args: value:", "This method need not provide a gradient for the operation.", "get_precision() -> int: \"\"\" Gets the current target floating point", "for i in range(self.ndims(value))) return value[slices] def sum(self, value, axis=None,", "return values ``` This operation does not support backpropagation. Args:", "iterations of size (batch,) trj: Whether to record and return", "rtol, atol, max_iter, trj) elif method == 'CG': return self.conjugate_gradient(lin,", "keepdims=True) dx = residual - self.divide_no_nan(self.sum(residual * dy, axis=-1, keepdims=True)", "a, b): a, b = self.auto_cast(a, b) return a &", "Backend method, e.g. `Backend.sparse_tensor` Returns: Whether the feature is supported.", "for state in states] function_evaluations = [state.function_evaluations for state in", "# L-BFGS-B only passes x but the documentation says (x,", "emulate them. The methods of `Backend` form a comprehensive list", "using `with backend:`. Returns: current default `Backend` \"\"\" return _DEFAULT[-1]", "a_axes: tuple or list, b, b_axes: tuple or list): \"\"\"", "DType(complex, max(64, self.precision)) def combine_types(self, *dtypes: DType) -> DType: return", "\"\"\" raise NotImplementedError(self) def ifft(self, k): \"\"\" Computes the n-dimensional", "(Default value = False) Returns: bool: whether `x` is considered", "\"\"\" Global list of all registered backends. Register a `Backend`", "backend:` block. If called outside a backend context, returns `None`.", "# make sure threads exit correctly if trj: max_trajectory_length =", "minimize from threading import Thread assert self.supports(Backend.functional_gradient) assert len(self.staticshape(x0)) ==", "to check only_native: If True, only accepts true native tensor", "This operation breaks the automatic differentiation chain. Args: tensor: Native", "current target floating point precision in bits. The precision can", "the automatic differentiation chain. Args: tensor: Native tensor belonging to", "x): return self.cast(x, DType(int, 64)) def to_complex(self, x): return self.cast(x,", "-1) # this is not really necessary but ensures batch-independence", "array) -> DType: raise NotImplementedError(self) def tile(self, value, multiples): \"\"\"", "def __repr__(self): mem = f\"{(self.memory / 1024 ** 2)} MB\"", "backend. backend: Target backend. If `None`, uses the current default", "Callable) -> Callable: return NotImplemented def functional_gradient(self, f, wrt: tuple", "is a Python number (numbers.Number instance), `convert_numbers` decides whether to", "first and last dimensions. Args: k: tensor of dimension 3", "y = self.to_float(y) x0 = self.copy(self.to_float(x0), only_mutable=True) batch_size = self.staticshape(y)[0]", "= self.to_float(self.expand_dims(not_finished_1, -1)) while ~self.all(finished): it_counter += 1; iterations +=", "compute the gradient of `f`. Returns: Function with similar signature", "condition, x=None, y=None): raise NotImplementedError(self) def nonzero(self, values): \"\"\" Args:", "return x >= y def add(self, a, b): a, b", "_PRECISION.append(floating_point_bits) try: yield None finally: _PRECISION.pop(-1) def convert(tensor, backend: Backend", "t in tensors] return tensors def __str__(self): return self.name def", "list): slices = tuple(slice(None, None, -1 if i in axes", "b): a, b = self.auto_cast(a, b) return a * b", "Conjugate Gradient Method Without the Agonizing Pain\" by <NAME> #", "if self.processor_count > 0 else \"processors: n/a\" descr = self.description.replace('\\n',", "Solve the system of linear equations A · x =", "residual, iterations, function_evaluations, converged, diverged)) return trajectory if trj else", "axis < 0: raise ValueError(\"Illegal axis value\") result = []", "tensor of bool, int or float Returns: Values of `x`", "first dimension of `values` and `indices` is the batch dimension", "function_evaluations, converged, diverged _, _, x, _, _, residual, iterations,", "first evaluation final_losses[b] = f_b_losses[b] if trajectories is not None:", "b): raise NotImplementedError(self) def einsum(self, equation, *tensors): raise NotImplementedError(self) def", "set_global_precision(floating_point_bits: int): \"\"\" Sets the floating point precision of DYNAMIC_BACKEND", "precision as its inputs. Args: floating_point_bits: one of (16, 32,", "Python number (numbers.Number instance), `convert_numbers` decides whether to convert it", "available when the backend operates in eager mode. Args: tensor:", "+= continue_1 dx_dy = self.sum(dx * dy, axis=-1, keepdims=True) step_size", "multiples: tuple or list of integers Returns: tile tensor \"\"\"", "outer dimensions. Args: value: tensor multiples: tuple or list of", "the value of the tensor is not known at this", "`Backend` \"\"\" return _DEFAULT[-1] def context_backend() -> Backend or None:", "x, axis=None, keepdims=False): raise NotImplementedError(self) def boolean_mask(self, x, mask, axis=0):", "the given feature. Features correspond to a method of this", "callable(lin): return lin(vector) elif isinstance(lin, (tuple, list)): for lin_i in", "DType(complex, max(64, min(self.precision * 2, 128)))) def batched_gather_nd(self, values, indices):", "= self.auto_cast(x, y) return x > y def greater_or_equal(self, x,", "= self.auto_cast(a, b) return a ^ b def floordiv(self, a,", "backend.name: return backend raise RuntimeError(f\"Backend '{self}' is not visible.\") @property", "// b BACKENDS = [] \"\"\" Global list of all", "== 1: return dim2 if dim2 is None or dim2", "description \"\"\" Further information about the device such as driver", "= [[] for _ in range(batch_size)] if trj else None", "but the documentation says (x, state) iterations[b] += 1 loss", "= self.to_float(self.expand_dims(not_finished_1, -1)) return trajectory if trj else SolveResult(method, x,", "`BACKENDS`. Args: name: Human-readable string default_device: `ComputeDevice` being used by", "-1)) while ~self.all(finished): it_counter += 1; iterations += not_finished_1 dy", "supports the given feature. Features correspond to a method of", "def as_tensor(self, x, convert_external=True): \"\"\" Converts a tensor-like object to", "If True, raises a `NoBackendFound` error, else returns `None`. Returns:", "from contextlib import contextmanager from threading import Barrier from typing", "self.to_float(y) x0 = self.copy(self.to_float(x0), only_mutable=True) batch_size = self.staticshape(y)[0] tolerance_sq =", "`Backend.linear_solve()`. \"\"\" # Based on the variant described in \"Methods", "value, axis=None, keepdims=False): raise NotImplementedError(self) def range(self, start, limit=None, delta=1,", "atol, max_iter, trj) elif method == 'CG-adaptive': return self.conjugate_gradient_adaptive(lin, y,", "def sign(self, x): raise NotImplementedError(self) def round(self, x): raise NotImplementedError(self)", "def set_global_default_backend(backend: Backend): \"\"\" Sets the given backend as default.", "\"\"\" _PRECISION[0] = floating_point_bits def get_precision() -> int: \"\"\" Gets", "This can help prevent type clashes like int32 vs int64.", "tensor of shape (batch_size or 1, out_channel, in_channel, spatial...) zero_padding:", "backends. If `floating_point_bits` is an integer, all floating point tensors", "= None f_b_losses_np = None f_grad_np = None f_input_available =", "1, update_count or 1, channels or 1) mode: One of", "f, x0, atol, max_iter, trj: bool): from scipy.optimize import OptimizeResult,", "functions operating on `Tensor` objects to delegate the actual computations.", "Returns: \"\"\" raise NotImplementedError(self) def where(self, condition, x=None, y=None): raise", "grid at the specified coordinates. Args: grid: Tensor spatial_dims: Dimension", "Native tensor belonging to any registered backend. backend: Target backend.", "a suitable backend to handle the given values. This function", "= self.staticshape(lin) assert len(lin_shape) == 2, f\"A must be a", "the optimization trajectory as a `List[SolveResult]`. Returns: result: `SolveResult` or", "last_point in zip(trajectories, last_points)] trajectory = [] for states in", "ensures batch-independence x += step_size * dx if it_counter %", "ComputeDevice): \"\"\" Backends delegate low-level operations to a compute library", "This method raises an error if the value of the", "of size (batch,) trj: Whether to record and return the", "of selected precision containing random values in the range [0,", "based on `f` that uses a custom gradient for backprop.", "values padded to the edges of each axis in the", "without modification. This method raises an error if the value", "iterations += not_finished_1 dy = self.linear(lin, dx); function_evaluations += not_finished_1", "== 0: residual = y - self.linear(lin, x); function_evaluations +=", "raise NotImplementedError(self) def floor(self, x): raise NotImplementedError(self) def max(self, x,", "len(self.staticshape(tensor)) def size(self, array): return self.prod(self.shape(array)) def batch_gather(self, tensor, batches):", "0 and std 1. \"\"\" raise NotImplementedError(self) def stack(self, values,", "sin(self, x): raise NotImplementedError(self) def cos(self, x): raise NotImplementedError(self) def", "values. This function is used by most math functions operating", "True: f_input_available.wait() if all(finished): all_finished = True f_output_available.wait() break _,", "of this function if no backend can handle the given", "Args: tensor: backend-compatible tensor Returns: bool \"\"\" raise NotImplementedError() def", "The default backend can be set globally using `set_global_default_backend()` and", "self.stack([self.matmul(m, v) for m, v in zip(lin, self.unstack(vector))]) else: lin_shape", "the number as-is. This can help prevent type clashes like", "keyword arguments. \"\"\" return NotImplemented def jit_compile_grad(self, f, wrt: tuple", "shape (batch, any..., multi_index) where the size of multi_index is", "x = self.stack(xs) residual = self.stack(final_losses) return SolveResult(method_description, x, residual,", "as a tensor argument. Args: x: object to check only_native:", "given values. This function is used by most math functions", "NotImplementedError(self) def not_equal(self, x, y): return ~self.equal(x, y) def greater_than(self,", "'message', ]) class ComputeDevice: \"\"\" A physical device that can", "belongs to. Different backends represent the same device with different", "dim1 == 1: return dim2 if dim2 is None or", "such as `'CPU'`, `'GPU'` or `'TPU'`. \"\"\" self.memory: int =", "backend:`. Returns: current default `Backend` \"\"\" return _DEFAULT[-1] def context_backend()", ">= 1, f\"{self.name}: Cannot select '{device} because no device of", "tensor) -> numpy.ndarray: \"\"\" Returns a NumPy representation of the", "real(self, x): raise NotImplementedError(self) def sin(self, x): raise NotImplementedError(self) def", "NotImplementedError() def call(self, f: Callable, *args, name=None): \"\"\" Calls `f(*args)`", "choose_backend(key).call(custom_function, *args) \"\"\" return f(*args) def block_until_ready(self, values): pass def", "-1 for n/a. \"\"\" self.processor_count: int = processor_count \"\"\" Number", "only_mutable=True) batch_size = self.staticshape(y)[0] tolerance_sq = self.maximum(rtol ** 2 *", "def choose_backend(*values, prefer_default=False) -> Backend: \"\"\" Selects a suitable backend", "supported. Args: value: tensor of shape (batch_size, in_channel, spatial...) kernel:", "= x0 dx = residual = y - self.linear(lin, x)", "correspond to a method of this backend that must be", "methods of the backend accept it as a tensor argument.", "breaks the automatic differentiation chain. Args: tensor: Native tensor belonging", "n-dimensional inverse FFT along all but the first and last", "as-is. This can help prevent type clashes like int32 vs", "i in axes else None) for i in range(self.ndims(value))) return", "out_channel, spatial...) \"\"\" raise NotImplementedError(self) def expand_dims(self, a, axis=0, number=1):", "Args: tensor: backend-compatible tensor Returns: NumPy representation of the values", "self.divide_no_nan(residual_squared, residual_squared_old) * dx diverged = self.any(residual_squared / rsq0 >", "abs(self, x): raise NotImplementedError(self) def sign(self, x): raise NotImplementedError(self) def", "sparse/dense matrix valid for all instances * tuple/list of sparse/dense", "NotImplementedError(self) def coordinates(self, tensor): \"\"\" Returns the coordinates and values", "in states] converged = [state.converged for state in states] diverged", "or higher Returns: \"\"\" raise NotImplementedError(self) def ifft(self, k): \"\"\"", "to consider when determining the common data type Returns: tensors", "minimum(self, a, b): raise NotImplementedError(self) def clip(self, x, minimum, maximum):", "bool) -> SolveResult or List[SolveResult]: \"\"\" Conjugate gradient algorithm with", "dx # if it_counter % 50 == 0: # Not", "of math operations has the same precision as its inputs.", "\"\"\" Gets the current target floating point precision in bits.", "tuple): \"\"\" ```python while any(values[0]): values = loop(*values) return values", "str: (Default value = 'constant') Returns: padded tensor or NotImplemented", "in trajectories]) last_points = [SolveResult(method_description, xs[b], final_losses[b], iterations[b], function_evaluations[b], converged[b],", "state in states]) iterations = [state.iterations for state in states]", "result has the same shape as `value`. Returns: Convolution result", "trajectory as a `List[SolveResult]`. Returns: result: `SolveResult` or `List[SolveResult]`, depending", "= residual - self.divide_no_nan(self.sum(residual * dy, axis=-1, keepdims=True) * dx,", "return self.matmul(lin, vector) def gradients(self, y, xs: tuple or list,", "def or_(self, a, b): a, b = self.auto_cast(a, b) return", "constant_values: used for out-of-bounds points if mode='constant' (Default value =", "`x` is a Python number that is understood by this", "def jit_compile_grad(self, f, wrt: tuple or list, get_output: bool): raise", "max_iter, trj: bool) -> SolveResult or List[SolveResult]: \"\"\" Conjugate gradient", "method=method, tol=atol[b], options={'maxiter': max_iter[b]}, callback=callback) assert isinstance(res, OptimizeResult) # res.nit,", "be called on all instances in parallel y: target result", "numbers. *Note:* There may be objects that are considered tensors", "backend, see `default_backend()`. Returns: Tensor belonging to `backend`. \"\"\" backend", "y): return ~self.equal(x, y) def greater_than(self, x, y): x, y", "the first and last dimensions. Args: x: tensor of dimension", "xor(self, a, b): a, b = self.auto_cast(a, b) return a", "non-applicable --- backends = [backend for backend in BACKENDS if", "def batch_gather(self, tensor, batches): if isinstance(batches, int): batches = [batches]", "Register a `Backend` by adding it to the list. \"\"\"", "with precision equal to the currently set default precision. See", "trajectories = [t[:-1] + [last_point] * (max_trajectory_length - len(t) +", "= [t[:-1] + [last_point] * (max_trajectory_length - len(t) + 1)", "this device belongs to. Different backends represent the same device", "wrt: tuple or list, get_output: bool): raise NotImplementedError() def transpose(self,", "0 \"\"\" raise NotImplementedError(self) def isfinite(self, x): raise NotImplementedError(self) def", "given feature. Features correspond to a method of this backend", "raise NotImplementedError(self) def scatter(self, base_grid, indices, values, mode: str): \"\"\"", "= self.list_devices(device) assert len(devices) >= 1, f\"{self.name}: Cannot select '{device}", "Gathered values as tensor of shape (batch, any..., channel) \"\"\"", "parameters) batch_size = self.staticshape(x0)[0] fg = self.functional_gradient(f, [0], get_output=True) method_description", "converged = [state.converged for state in states] diverged = [state.diverged", "bool # residual = y - self.linear(lin, x); function_evaluations +=", "backend can handle the given values. \"\"\" def __init__(self, msg):", "= self.auto_cast(x, y) return x >= y def add(self, a,", "return ~self.equal(x, y) def greater_than(self, x, y): x, y =", "* batch_size messages = [\"\"] * batch_size f_inputs = [None]", "tensors will default to float32 unless specified otherwise. The output", "it is returned without modification. If x is a Python", "f_grad_np = None f_input_available = Barrier(batch_size + 1) f_output_available =", "use. Implementations: * NumPy: [`os.cpu_count`](https://docs.python.org/3/library/os.html#os.cpu_count) * PyTorch: [`torch.cuda.get_device_properties`](https://pytorch.org/docs/stable/cuda.html#torch.cuda.get_device_properties) * TensorFlow:", "`x` as float tensor \"\"\" return self.cast(x, self.float_type) def to_int32(self,", "use. One of `('auto', 'CG', 'CG-adaptive')`. lin: Linear operation. One", "called as `gradient(*d_out)` to compute the gradient of `f`. Returns:", "col) for matrices. indices: Tensor holding the corresponding values \"\"\"", "k): \"\"\" Computes the n-dimensional inverse FFT along all but", "tuple or list, get_output: bool): raise NotImplementedError() def transpose(self, tensor,", "computations. \"\"\" def __init__(self, backend: 'Backend', name: str, device_type: str,", "fft(self, x): \"\"\" Computes the n-dimensional FFT along all but", "if result_type.kind in (int, float, complex, bool): tensors = [self.cast(t,", "called on all instances in parallel y: target result of", "interpolation \"\"\" return NotImplemented def variable(self, value): return NotImplemented def", "y: Returns: \"\"\" raise NotImplementedError(self) def where(self, condition, x=None, y=None):", "algorithm. Signature matches to `Backend.linear_solve()`. \"\"\" # Based on \"An", "atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]: \"\"\" Conjugate", "divisor = self.auto_cast(dividend, divisor) return dividend % divisor def and_(self,", "backend:`. See `default_backend()`, `choose_backend()`. Args: backend: `Backend` to set as", "same precision as its inputs. Args: floating_point_bits: one of (16,", "atol: Absolute tolerance of size (batch,) max_iter: Maximum number of", "indices, values, shape): \"\"\" Optional features. Args: indices: tuple/list matching", "Fetches information about all available compute devices this backend can", "all(self, boolean_tensor, axis=None, keepdims=False): raise NotImplementedError(self) def fft(self, x): \"\"\"", "final_losses[b] is None: # first evaluation final_losses[b] = f_b_losses[b] if", "backend, it is returned without modification. If x is a", "trj: bool) -> SolveResult or List[SolveResult]: \"\"\" Standard conjugate gradient", "last dimension must match `spatial_dims`. The first grid point of", "backend can be set globally using `set_global_default_backend()` and locally using", "active = self.to_float(self.expand_dims(not_finished_1, -1)) return trajectory if trj else SolveResult(method,", "y, x0, rtol, atol, max_iter, trj) else: raise NotImplementedError(f\"Method '{method}'", "of shape (batch_size or 1, update_count or 1, channels or", "(prefer_default or _is_specific(_DEFAULT[-1], values)): return _DEFAULT[-1] # --- Filter out", "`indices` updated by `values`. \"\"\" raise NotImplementedError(self) def any(self, boolean_tensor,", "\"\"\" raise NotImplementedError(self) def sparse_tensor(self, indices, values, shape): \"\"\" Optional", "* batch_size function_evaluations = [0] * batch_size xs = [None]", "keepdims=True) * dx, dx_dy) dy = self.linear(lin, dx); function_evaluations +=", "the device that can be allocated (in bytes). -1 for", "minimum, maximum): raise NotImplementedError(self) def sqrt(self, x): raise NotImplementedError(self) def", "return True return False # Other low-level helper functions def", "shape(self, tensor): raise NotImplementedError(self) def staticshape(self, tensor): raise NotImplementedError(self) def", "to scatter at indices. Tensor of shape (batch_size or 1,", "raise NotImplementedError() @property def precision(self) -> int: \"\"\" Short for", "or otherwise) by a backend if it is not a", "delta=1, dtype: DType = DType(int, 32)): raise NotImplementedError(self) def zeros(self,", "`List[SolveResult]`, depending on `trj`. \"\"\" if method == 'auto': return", "bits. The precision can be set globally using `set_global_precision()` or", "in \"Methods of Conjugate Gradients for Solving Linear Systems\" by", "log(self, x): \"\"\" Natural logarithm \"\"\" raise NotImplementedError(self) def log2(self,", "return a - b def mul(self, a, b): a, b", "NotImplementedError(self) def cast(self, x, dtype: DType): raise NotImplementedError(self) def to_float(self,", "trj: bool) -> SolveResult or List[SolveResult]: \"\"\" Conjugate gradient algorithm", "= current_backend.numpy(tensor) return backend.as_tensor(nparray) # Backend choice utility functions def", "-> SolveResult or List[SolveResult]: \"\"\" Standard conjugate gradient algorithm. Signature", "where(self, condition, x=None, y=None): raise NotImplementedError(self) def nonzero(self, values): \"\"\"", "form a comprehensive list of available operations. To support a", "NotImplementedError(self) def stack(self, values, axis=0): raise NotImplementedError(self) def concat(self, values,", "\"\"\" raise NotImplementedError(self) def log2(self, x): raise NotImplementedError(self) def log10(self,", "None and dim2 is None: return None if dim1 is", "and can be read at this point. If true, `numpy(tensor)`", "NotImplementedError(self) def mean(self, value, axis=None, keepdims=False): raise NotImplementedError(self) def range(self,", "of bool, int or float Returns: Values of `x` as", "by `choose_backend()`. The default backend can be set globally using", "does not support backpropagation. Args: loop: Loop function, must return", "'x', 'residual', 'iterations', 'function_evaluations', 'converged', 'diverged', 'message', ]) class ComputeDevice:", "only passes x but the documentation says (x, state) iterations[b]", "lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult", "2. Returns: Gathered values as tensor of shape (batch, any...,", "% 50 == 0: # Not traceable since Python bool", "called outside a backend context, returns `None`. Returns: `Backend` or", "can be set globally using `set_global_precision()` or locally using `with", "or default_backend() current_backend = choose_backend(tensor, prefer_default=False) if backend.is_tensor(tensor, True) or", "\"\"\" An object is considered a native tensor by a", "operates in eager mode. Args: tensor: backend-compatible tensor Returns: bool", "One of ('update', 'add') Returns: Copy of base_grid with values", "self.staticshape(lin_i) assert len(lin_shape) == 2 return self.stack([self.matmul(m, v) for m,", "step_size = self.divide_no_nan(residual_squared, dx_dy) step_size *= self.expand_dims(self.to_float(not_finished_1), -1) # this", "# --- Filter out non-applicable --- backends = [backend for", "output of math operations has the same precision as its", "backend: Target backend. If `None`, uses the current default backend,", "std(self, x, axis=None, keepdims=False): raise NotImplementedError(self) def boolean_mask(self, x, mask,", "== 2 # (batch, parameters) batch_size = self.staticshape(x0)[0] fg =", "NotImplementedError(self) def log2(self, x): raise NotImplementedError(self) def log10(self, x): raise", "b): a, b = self.auto_cast(a, b) return a & b", "by adding it to the list. \"\"\" _DEFAULT = []", "vectors, i.e. (row, col) for matrices. indices: Tensor holding the", "keepdims: component = tensor[tuple([slice(slice_idx, slice_idx + 1) if d ==", "sparse/dense matrices for varying matrices along batch, must have the", "iterations, function_evaluations, converged, diverged, \"\")] if trj else None continue_", "Relative tolerance of size (batch,) atol: Absolute tolerance of size", "device_type \"\"\" Type of device such as `'CPU'`, `'GPU'` or", "the range [0, 1) \"\"\" raise NotImplementedError(self) def random_normal(self, shape):", "point precision of DYNAMIC_BACKEND which affects all registered backends. If", "vector) def gradients(self, y, xs: tuple or list, grad_y) ->", "'diverged', 'message', ]) class ComputeDevice: \"\"\" A physical device that", "any registered backend. backend: Target backend. If `None`, uses the", "Barrier from typing import List, Callable import numpy from ._dtype", "higher Returns: \"\"\" raise NotImplementedError(self) def ifft(self, k): \"\"\" Computes", "`SolveResult` or `List[SolveResult]`, depending on `trj`. \"\"\" if method ==", "`'CPU'`, `'GPU'` or `'TPU'`. \"\"\" self.memory: int = memory \"\"\"", "last_points = [SolveResult(method_description, xs[b], final_losses[b], iterations[b], function_evaluations[b], converged[b], diverged[b], \"\")", "known at this point, e.g. because it represents a node", "loop: Loop function, must return a `tuple` with entries equal", "tensor of shape (batch_size, out_channel, spatial...) \"\"\" raise NotImplementedError(self) def", "* TensorFlow: `tensorflow.python.client.device_lib.list_local_devices` * Jax: [`jax.devices`](https://jax.readthedocs.io/en/latest/jax.html#jax.devices) Args: device_type: (optional) Return", "- self.linear(lin, x); function_evaluations += 1 else: residual = residual", "outside the grid. One of `('undefined', 'zeros', 'boundary', 'periodic', 'symmetric',", "appropriate values type resulting from operations involving the tensors as", "a copy of `x`. To convert float tensors to the", "dtype: DType = DType(int, 32)): raise NotImplementedError(self) def zeros(self, shape,", "NotImplementedError(self) def linspace(self, start, stop, number): raise NotImplementedError(self) def tensordot(self,", "kernel, zero_padding=True): \"\"\" Convolve value with kernel. Depending on the", "residual = y - self.linear(lin, x); function_evaluations += 1 #", "belonging to `backend`. \"\"\" backend = backend or default_backend() current_backend", "dtypes = [self.dtype(t) for t in tensors] result_type = self.combine_types(*dtypes)", "multiples. If `multiples` has more dimensions than `value`, these dimensions", "recent_b_losses = [] def b_fun(x: numpy.ndarray): function_evaluations[b] += 1 f_inputs[b]", "step_size * dx if it_counter % 50 == 0: residual", "Backends can override this method to prevent unnecessary casting. Args:", "x): raise NotImplementedError(self) def log10(self, x): raise NotImplementedError(self) def dtype(self,", "delegate the actual computations. Args: *values: prefer_default: if True, selects", "self._name def supports(self, feature: str or Callable) -> bool: \"\"\"", "`x` \"\"\" raise NotImplementedError() def is_available(self, tensor) -> bool: \"\"\"", "to a NumPy array. *Warning*: This operation breaks the automatic", "f: Forward function. gradient: Function for backprop. Will be called", "BACKENDS for backend in BACKENDS: if self.name in backend.name: return", "- self.linear(lin, x) dy = self.linear(lin, dx) iterations = self.zeros([batch_size],", "> 0 else \"memory: n/a\" pro = f\"{self.processor_count} processors\" if", "but ensures batch-independence x += step_size * dx # if", "\"\"\" return _DEFAULT[-1] if len(_DEFAULT) > 1 else None def", "\"\"\" Returns the backend set by the inner-most surrounding `with", "else feature.__name__ if not hasattr(Backend, feature): raise ValueError(f\"Not a valid", "the number of values padded to the edges of each", "name: str, default_device: ComputeDevice): \"\"\" Backends delegate low-level operations to", "isinstance(lin, (tuple, list)): for lin_i in lin: lin_shape = self.staticshape(lin_i)", "/ 1024 ** 2)} MB\" if self.memory > 0 else", "def not_equal(self, x, y): return ~self.equal(x, y) def greater_than(self, x,", "Thrown by `choose_backend` if no backend can handle the given", "def abs(self, x): raise NotImplementedError(self) def sign(self, x): raise NotImplementedError(self)", "of integers Returns: tile tensor \"\"\" raise NotImplementedError(self) def sparse_tensor(self,", "target backend can operate natively on `tensor`, returns `tensor`. If", "# [0] = global precision in bits, [1:] from 'with'", "[state.iterations for state in states] function_evaluations = [state.function_evaluations for state", "residual_squared = rsq0 = self.sum(residual ** 2, -1, keepdims=True) diverged", "local context. Usage: `with precision(p):` This overrides the global setting,", "b = self.auto_cast(a, b) return a * b def div(self,", "about all available compute devices this backend can use. Implementations:", "iterations = [0] * batch_size function_evaluations = [0] * batch_size", "value): return NotImplemented def ndims(self, tensor): return len(self.staticshape(tensor)) def size(self,", "_is_specific(backend, values): for value in values: if backend.is_tensor(value, only_native=True): return", "get_output: bool): raise NotImplementedError() def transpose(self, tensor, axes): raise NotImplementedError()", "dim1 == dim2, f\"Incompatible {type_str} dimensions: x0 {dim1}, y {dim2}\"", "residual_squared residual_squared = self.sum(residual ** 2, -1, keepdims=True) dx =", "NotImplementedError() def random_uniform(self, shape): \"\"\" Float tensor of selected precision", "as its inputs. Args: floating_point_bits: one of (16, 32, 64,", "raise NotImplementedError(self) def mean(self, value, axis=None, keepdims=False): raise NotImplementedError(self) def", "in range(batch_size)] if trj else None threads = [] for", "step_size *= self.expand_dims(self.to_float(continue_1), -1) # this is not really necessary", "axis 0 upper], ...] including batch and component axes. mode:", "= self.stack(final_losses) return SolveResult(method_description, x, residual, iterations, function_evaluations, converged, diverged,", "batch-independence x += step_size * dx if it_counter % 50", "this backend that must be implemented if the feature is", "Repeats the tensor along each axis the number of times", "default_backend() -> Backend: \"\"\" The default backend is preferred by", "# Other low-level helper functions def combined_dim(dim1, dim2, type_str: str", "If True, pads the edges of `value` with zeros so", "= self.auto_cast(base, exp) return base ** exp def mod(self, dividend,", "methods of `Backend` form a comprehensive list of available operations.", "= [] # [0] = global default, [1:] from 'with'", "shape and data type. values: Initial values of loop variables.", "\"\"\" Name of the compute device. CPUs are typically called", "if the feature is supported. Possible features: * `sparse_tensor` *", "and return values as `f`. However, the returned function does", "None: return None if dim1 is None or dim1 ==", "_PRECISION[-1] @contextmanager def precision(floating_point_bits: int): \"\"\" Sets the floating point", "size of multi_index is values.rank - 2. Returns: Gathered values", "assert len(self.staticshape(x0)) == 2 # (batch, parameters) batch_size = self.staticshape(x0)[0]", "backend can handle the given values. If True, raises a", "1: return dim2 if dim2 is None or dim2 ==", "x but the documentation says (x, state) iterations[b] += 1", "None f_grad_np = None f_input_available = Barrier(batch_size + 1) f_output_available", "the behavior of this function if no backend can handle", "is returned without modification. If x is a Python number", "residual = y - self.linear(lin, x) dy = self.linear(lin, dx)", "Args: x: tensor of dimension 3 or higher Returns: \"\"\"", "raise NotImplementedError(self) def sparse_tensor(self, indices, values, shape): \"\"\" Optional features.", "it is returned without modification. This method raises an error", "the n-dimensional inverse FFT along all but the first and", "`ComputeDevice` being used by default \"\"\" self._name = name self._default_device", "zeros_like(self, tensor): raise NotImplementedError(self) def ones(self, shape, dtype: DType =", "def maximum(self, a, b): raise NotImplementedError(self) def minimum(self, a, b):", "NotImplementedError(self) def expand_dims(self, a, axis=0, number=1): raise NotImplementedError(self) def shape(self,", "a regular grid at the specified coordinates. Args: grid: Tensor", "`values` at locations `indices`. The first dimension of `values` and", "raise NotImplementedError(self) def tan(self, x): raise NotImplementedError(self) def log(self, x):", "to `value` as outer dimensions. Args: value: tensor multiples: tuple", "raise NotImplementedError(self) def ones_like(self, tensor): raise NotImplementedError(self) def meshgrid(self, *coordinates):", "b): raise NotImplementedError(self) def minimum(self, a, b): raise NotImplementedError(self) def", "residual, axis=-1, keepdims=True), dx_dy) step_size *= self.expand_dims(self.to_float(continue_1), -1) # this", "matches to `Backend.linear_solve()`. \"\"\" # Based on \"An Introduction to", "random_normal(self, shape): \"\"\" Float tensor of selected precision containing random", "Args: x: tensor-like, e.g. list, tuple, Python number, tensor convert_external:", "\"\"\" raise NotImplementedError(self) def reshape(self, value, shape): raise NotImplementedError(self) def", "use `Backend.as_tensor()`. Args: x: tensor of bool, int or float", "dimensions. Args: k: tensor of dimension 3 or higher Returns:", "str = description \"\"\" Further information about the device such", "v in zip(lin, self.unstack(vector))]) else: lin_shape = self.staticshape(lin) assert len(lin_shape)", "f\"Φ-Flow CG ({self.name})\" y = self.to_float(y) x0 = self.copy(self.to_float(x0), only_mutable=True)", "more dimensions than `value`, these dimensions are added to `value`", "def __exit__(self, exc_type, exc_val, exc_tb): _DEFAULT.pop(-1) @property def name(self) ->", "gradient for the operation. Args: method: Which algorithm to use.", "or list, grad_y) -> tuple: raise NotImplementedError(self) def record_gradients(self, xs:", "self.precision) @property def as_registered(self) -> 'Backend': from phi.math.backend import BACKENDS", "list)): for lin_i in lin: lin_shape = self.staticshape(lin_i) assert len(lin_shape)", "1 else: residual = residual - step_size * dy #", "Conjugate gradient algorithm with adaptive step size. Signature matches to", "This method may be used to register internal calls with", "backend. If x is a native tensor of this backend,", "b BACKENDS = [] \"\"\" Global list of all registered", "rsq0 = self.sum(residual ** 2, -1, keepdims=True) diverged = self.any(~self.isfinite(x),", "-> DType: return DType(complex, max(64, self.precision)) def combine_types(self, *dtypes: DType)", "bool: whether `x` is considered a tensor by this backend", "(continue_, 0, x, dx, dy, residual, iterations, function_evaluations, converged, diverged))", "bool \"\"\" raise NotImplementedError() def numpy(self, tensor) -> numpy.ndarray: \"\"\"", "trajectory is not None: trajectory.append(SolveResult(method, x, residual, iterations, function_evaluations, converged,", "* 2, 128)))) def batched_gather_nd(self, values, indices): \"\"\" Gathers values", "result_type.kind in (int, float, complex, bool): tensors = [self.cast(t, result_type)", "32, 64, None) \"\"\" _PRECISION[0] = floating_point_bits def get_precision() ->", "it represents a node in a graph. Use `is_available(tensor)` to", "\"\"\" Converts a tensor-like object to the native tensor representation", "axis=None, keepdims=False): raise NotImplementedError(self) def min(self, x, axis=None, keepdims=False): raise", "Tensor holding the corresponding values \"\"\" raise NotImplementedError(self) def minimize(self,", "trajectory = [] for states in zip(*trajectories): x = self.stack([self.to_float(state.x)", "value: tensor multiples: tuple or list of integers Returns: tile", "diverged, \"\") def linear(self, lin, vector): if callable(lin): return lin(vector)", "residual_squared=delta, residual=r, y=b method = f\"Φ-Flow CG ({self.name})\" y =", "the number of times given by multiples. If `multiples` has", "Function for backprop. Will be called as `gradient(*d_out)` to compute", "self.copy(iterations) continue_ = ~converged & ~diverged & (iterations < max_iter)", "def convert(tensor, backend: Backend = None, use_dlpack=True): \"\"\" Convert a", "If `None`, uses the current default backend, see `default_backend()`. Returns:", "NotImplementedError(self) def nonzero(self, values): \"\"\" Args: values: Tensor with only", "Pad a tensor with values as specified by `mode` and", "a_axes of a with b_axes of b. \"\"\" raise NotImplementedError(self)", "backend.from_dlpack(capsule) else: nparray = current_backend.numpy(tensor) return backend.as_tensor(nparray) # Backend choice", "raise NotImplementedError(self) def to_float(self, x): \"\"\" Converts a tensor to", "index >= 0 \"\"\" raise NotImplementedError(self) def isfinite(self, x): raise", "tensor[batches, ...] def unstack(self, tensor, axis=0, keepdims=False) -> tuple: if", "raise NotImplementedError(self) def random_normal(self, shape): \"\"\" Float tensor of selected", "* `gradients Args: feature: `str` or unbound Backend method, e.g.", "object to the native tensor representation of this backend. If", "from collections import namedtuple from contextlib import contextmanager from threading", "chain. Args: tensor: Native tensor belonging to any registered backend.", "If True, only accepts true native tensor representations, not Python", "including batch and component axes. mode: constant', 'boundary', 'periodic', 'symmetric',", "method of this backend that must be implemented if the", "only_mutable=False): raise NotImplementedError() def call(self, f: Callable, *args, name=None): \"\"\"", "DType = DType(int, 32)): raise NotImplementedError(self) def zeros(self, shape, dtype:", "backends are {BACKENDS}\") # --- Native tensors? --- for backend", "self.auto_cast(a, b) return a // b BACKENDS = [] \"\"\"", "in zip(trajectories, last_points)] trajectory = [] for states in zip(*trajectories):", "floating_point_bits: one of (16, 32, 64, None) \"\"\" _PRECISION[0] =", "in tensors] result_type = self.combine_types(*dtypes) if result_type.kind in (int, float,", "tensors] return tensors def __str__(self): return self.name def __repr__(self): return", "floating_point_bits def get_precision() -> int: \"\"\" Gets the current target", "\"\"\" Further information about the device such as driver version.", "= 0 iterations = self.zeros([batch_size], DType(int, 32)) function_evaluations = self.ones([batch_size],", "type, e.g. `'GPU'` or `'CPU'`. See `ComputeDevice.device_type`. Returns: `list` of", "f_grad_np = self.numpy(f_grad).astype(numpy.float64) f_output_available.wait() for b_thread in threads: b_thread.join() #", "handle the given values. \"\"\" def __init__(self, msg): Exception.__init__(self, msg)", "only_native=True): return True return False # Other low-level helper functions", "bool) -> SolveResult or List[SolveResult]: \"\"\" Standard conjugate gradient algorithm.", "this point, e.g. because it represents a node in a", "[SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, \"\")] if trj", "*tensors): raise NotImplementedError(self) def while_loop(self, loop: Callable, values: tuple): \"\"\"", "NotImplementedError(self) def floor(self, x): raise NotImplementedError(self) def max(self, x, axis=None,", "[] for slice_idx in range(tensor.shape[axis]): if keepdims: component = tensor[tuple([slice(slice_idx,", "a function based on `f` that uses a custom gradient", "self.as_tensor(x, convert_external=True) f_input_available.wait() f_output_available.wait() recent_b_losses.append(f_b_losses[b]) if final_losses[b] is None: #", "type is available.\" device = devices[0] self._default_device = device def", "that are considered tensors by this backend but are not", "assert len(lin_shape) == 2 return self.stack([self.matmul(m, v) for m, v", "as float tensor \"\"\" return self.cast(x, self.float_type) def to_int32(self, x):", "value\") result = [] for slice_idx in range(tensor.shape[axis]): if keepdims:", "\"\"\" _DEFAULT = [] # [0] = global default, [1:]", "any(self, boolean_tensor, axis=None, keepdims=False): raise NotImplementedError(self) def all(self, boolean_tensor, axis=None,", "if _is_applicable(_DEFAULT[-1], values) and (prefer_default or _is_specific(_DEFAULT[-1], values)): return _DEFAULT[-1]", "Features correspond to a method of this backend that must", "def mod(self, dividend, divisor): dividend, divisor = self.auto_cast(dividend, divisor) return", "Values to scatter at indices. Tensor of shape (batch_size or", "atol, max_iter, trj) else: raise NotImplementedError(f\"Method '{method}' not supported for", "returns NotImplemented. Args: value: tensor pad_width: 2D tensor specifying the", "\"\"\" Float tensor of selected precision containing random values in", "return self.cast(x, self.float_type) def to_int32(self, x): return self.cast(x, DType(int, 32))", "by adding it to `BACKENDS`. Args: name: Human-readable string default_device:", "returns the result. This method may be used to register", "to_float(self, x): \"\"\" Converts a tensor to floating point values", "channel) \"\"\" raise NotImplementedError(self) def flatten(self, x): return self.reshape(x, (-1,))", "tensor: backend-compatible tensor Returns: NumPy representation of the values stored", "if all(finished): all_finished = True f_output_available.wait() break _, f_b_losses, f_grad", "= self.stack([self.to_float(state.x) for state in states]) residual = self.stack([state.residual for", "f_b_losses, f_grad = fg(self.stack(f_inputs)) f_b_losses_np = self.numpy(f_b_losses).astype(numpy.float64) f_grad_np = self.numpy(f_grad).astype(numpy.float64)", "'CG', 'CG-adaptive')`. lin: Linear operation. One of * sparse/dense matrix", "result of A * x. 2nd order tensor (batch, vector)", "first and last dimensions. Args: x: tensor of dimension 3", "b in range(batch_size)] trajectories = [t[:-1] + [last_point] * (max_trajectory_length", "type Returns: tensors cast to a common data type \"\"\"", "raise NotImplementedError(self) def while_loop(self, loop: Callable, values: tuple): \"\"\" ```python", "(batch, spatial..., channel) indices: int tensor of shape (batch, any...,", "Maximum number of iterations of size (batch,) trj: Whether to", "function_evaluations, converged, diverged)) return trajectory if trj else SolveResult(method, x,", "self.auto_cast(a, b) return a | b def xor(self, a, b):", "continue_, it_counter, x, dx, dy, residual, iterations, function_evaluations, converged, diverged", "b): a, b = self.auto_cast(a, b) return a | b", "iterations, function_evaluations, converged, diverged =\\ self.while_loop(loop, (continue_, 0, x, dx,", "set_default_device(self, device: ComputeDevice or str): if isinstance(device, str): devices =", "the tensors as input. This method is called by the", "Backend: def __init__(self, name: str, default_device: ComputeDevice): \"\"\" Backends delegate", "gradient: Function for backprop. Will be called as `gradient(*d_out)` to", "or one for either. Args: values: tensor of shape (batch,", "messages[b] = res.message finished[b] = True while not all_finished: f_input_available.wait()", "of loop variables. Returns: Loop variables upon loop completion. \"\"\"", "it to `BACKENDS`. Args: name: Human-readable string default_device: `ComputeDevice` being", "-> tuple: if axis < 0: axis += len(tensor.shape) if", "shape, dtype: DType = None): raise NotImplementedError(self) def zeros_like(self, tensor):", "x: tensor of bool, int or float Returns: Values of", "values are inserted at indices. Tensor of shape (batch_size, spatial...,", "* batch_size f_b_losses = None f_b_losses_np = None f_grad_np =", "iterations, function_evaluations, converged, diverged)) return trajectory if trj else SolveResult(method,", "v) for m, v in zip(lin, self.unstack(vector))]) else: lin_shape =", "for half, 32 for single, 64 for double \"\"\" return", "\"\"\" Interpolates a regular grid at the specified coordinates. Args:", "base ** exp def mod(self, dividend, divisor): dividend, divisor =", "for single, 64 for double \"\"\" return _PRECISION[-1] @contextmanager def", "to delegate the actual computations. Args: *values: prefer_default: if True,", "`with backend:`. See `default_backend()`, `choose_backend()`. Args: backend: `Backend` to set", "x: tensor of dimension 3 or higher Returns: \"\"\" raise", "device_type: str or None = None) -> List[ComputeDevice]: \"\"\" Fetches", "y - self.linear(lin, x) dy = self.linear(lin, dx) iterations =", "Loop function, must return a `tuple` with entries equal to", "axis=None, keepdims=False): raise NotImplementedError(self) def fft(self, x): \"\"\" Computes the", "is not supported, returns NotImplemented. Args: value: tensor pad_width: 2D", "of a tensor. Args: tensor: Sparse tensor Returns: coordinates: `tuple`", "method = f\"Φ-Flow CG ({self.name})\" y = self.to_float(y) x0 =", "x): raise NotImplementedError(self) def ceil(self, x): raise NotImplementedError(self) def floor(self,", "state in states]) residual = self.stack([state.residual for state in states])", "raise NotImplementedError(self) def conv(self, value, kernel, zero_padding=True): \"\"\" Convolve value", "with kernel. Depending on the tensor rank, the convolution is", "< max_iter) return continue_, it_counter, x, dx, dy, residual, iterations,", "feature is supported. \"\"\" feature = feature if isinstance(feature, str)", "or List[SolveResult]: \"\"\" Solve the system of linear equations A", "Sets the given backend as default. This setting can be", "tensor, only_mutable=False): raise NotImplementedError() def call(self, f: Callable, *args, name=None):", "= self.linear(lin, dx) iterations = self.zeros([batch_size], DType(int, 32)) function_evaluations =", "convert floating point values to this precision, even if the", "f\"{(self.memory / 1024 ** 2)} MB\" if self.memory > 0", "batches = [batches] return tensor[batches, ...] def unstack(self, tensor, axis=0,", "can use. Implementations: * NumPy: [`os.cpu_count`](https://docs.python.org/3/library/os.html#os.cpu_count) * PyTorch: [`torch.cuda.get_device_properties`](https://pytorch.org/docs/stable/cuda.html#torch.cuda.get_device_properties) *", "[None] * batch_size final_losses = [None] * batch_size converged =", "function_evaluations, converged, diverged =\\ self.while_loop(loop, (continue_, 0, x, dx, dy,", "when determining the common data type Returns: tensors cast to", "def staticshape(self, tensor): raise NotImplementedError(self) def cast(self, x, dtype: DType):", "NumPy representation of the values stored in the tensor \"\"\"", "NotImplementedError(self) def real(self, x): raise NotImplementedError(self) def sin(self, x): raise", "result.append(component) return tuple(result) def equal(self, x, y): \"\"\" Element-wise equality", "a - b def mul(self, a, b): a, b =", "`'CPU'`. See `ComputeDevice.device_type`. Returns: `list` of all currently available devices.", "objects to delegate the actual computations. Args: *values: prefer_default: if", "which affects all registered backends. If `floating_point_bits` is an integer,", "by multiples. If `multiples` has more dimensions than `value`, these", "sure threads exit correctly if trj: max_trajectory_length = max([len(t) for", "list, get_output: bool): raise NotImplementedError() def transpose(self, tensor, axes): raise", "form [[axis 0 lower, axis 0 upper], ...] including batch", "NumPy representation of the value. Tensors are typically available when", "global setting, see `set_global_precision()`. Args: floating_point_bits: 16 for half, 32", "NotImplementedError(self) def pad(self, value, pad_width, mode: str = 'constant', constant_values=0):", "raise NotImplementedError(self) def ceil(self, x): raise NotImplementedError(self) def floor(self, x):", "self.auto_cast(x, y) return x > y def greater_or_equal(self, x, y):", "get_output=True) method_description = f\"SciPy {method} with {self.name}\" iterations = [0]", "all instances * tuple/list of sparse/dense matrices for varying matrices", "DType(float, self.precision) @property def as_registered(self) -> 'Backend': from phi.math.backend import", "log2(self, x): raise NotImplementedError(self) def log10(self, x): raise NotImplementedError(self) def", "method == 'auto': return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter,", "if self.name in backend.name: return backend raise RuntimeError(f\"Backend '{self}' is", "sampled values with linear interpolation \"\"\" return NotImplemented def variable(self,", "self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter, trj) elif method ==", "for v in values]}; registered backends are {BACKENDS}\") # ---", "ref \"\"\" (Optional) Reference to the internal device representation. \"\"\"", "> y def greater_or_equal(self, x, y): x, y = self.auto_cast(x,", "its inputs. Args: floating_point_bits: one of (16, 32, 64, None)", "is current_backend: return tensor if use_dlpack and current_backend.supports(Backend.to_dlpack) and backend.supports(Backend.from_dlpack):", "self.name def list_devices(self, device_type: str or None = None) ->", "Tests if this backend supports the given feature. Features correspond", "raise NotImplementedError(self) def ones(self, shape, dtype: DType = None): raise", "x = self.copy(x) iterations = self.copy(iterations) finished = converged |", "only accepts true native tensor representations, not Python numbers or", "b_thread = Thread(target=b_thread) threads.append(b_thread) b_thread.start() while True: f_input_available.wait() if all(finished):", "trj: Whether to record and return the optimization trajectory as", "not all_finished: f_input_available.wait() f_output_available.wait() b_thread = Thread(target=b_thread) threads.append(b_thread) b_thread.start() while", "SolveResult or List[SolveResult]: \"\"\" Solve the system of linear equations", "to perform backend computations. \"\"\" def __init__(self, backend: 'Backend', name:", "involving the tensors as input. This method is called by", "stored in the tensor \"\"\" raise NotImplementedError() def to_dlpack(self, tensor):", "** 2) x = x0 dx = residual = y", "data type, float16, float32 or float64. Operations may also convert", "bool): raise NotImplementedError(self) def custom_gradient(self, f: Callable, gradient: Callable) ->", "return len(self.staticshape(tensor)) def size(self, array): return self.prod(self.shape(array)) def batch_gather(self, tensor,", "1, out_channel, in_channel, spatial...) zero_padding: If True, pads the edges", "`x` is considered a tensor by this backend \"\"\" raise", "def random_uniform(self, shape): \"\"\" Float tensor of selected precision containing", "```python while any(values[0]): values = loop(*values) return values ``` This", "return self.prod(self.shape(array)) def batch_gather(self, tensor, batches): if isinstance(batches, int): batches", "def conjugate_gradient_adaptive(self, lin, y, x0, rtol, atol, max_iter, trj: bool)", "on the variant described in \"Methods of Conjugate Gradients for", "that the result has the same shape as `value`. Returns:", "tensor by a backend if no internal conversion is required", "\"\"\" Optional features. Args: indices: tuple/list matching the dimensions (pair", "be a matrix but got shape {lin_shape}\" return self.matmul(lin, vector)", "y=b method = f\"Φ-Flow CG ({self.name})\" y = self.to_float(y) x0", "and <NAME> # https://nvlpubs.nist.gov/nistpubs/jres/049/jresv49n6p409_A1b.pdf method = f\"Φ-Flow CG-adaptive ({self.name})\" y", "not_finished_1 dx_dy = self.sum(dx * dy, axis=-1, keepdims=True) step_size =", "--- if _is_applicable(_DEFAULT[-1], values) and (prefer_default or _is_specific(_DEFAULT[-1], values)): return", "self.numpy(f_grad).astype(numpy.float64) f_output_available.wait() for b_thread in threads: b_thread.join() # make sure", "return combine_types(*dtypes, fp_precision=self.precision) def auto_cast(self, *tensors) -> list: \"\"\" Determins", "had a different precision. Returns: 16 for half, 32 for", "it_counter, x, dx, dy, residual, iterations, function_evaluations, _converged, _diverged): continue_1", "def fft(self, x): \"\"\" Computes the n-dimensional FFT along all", "unless specified otherwise. The output of math operations has the", "register internal calls with the profiler. Usage: choose_backend(key).call(custom_function, *args) \"\"\"", "self.memory: int = memory \"\"\" Maximum memory of the device", "1D (rank=3), 2D (rank=4) or 3D (rank=5). Higher dimensions may", "raise NotImplementedError(self) def not_equal(self, x, y): return ~self.equal(x, y) def", "axes else None) for i in range(self.ndims(value))) return value[slices] def", "= [] def b_fun(x: numpy.ndarray): function_evaluations[b] += 1 f_inputs[b] =", "[] \"\"\" Global list of all registered backends. Register a", "handle handle the values, see `default_backend()`. raise_error: Determines the behavior", "of the value. Tensors are typically available when the backend", "def as_registered(self) -> 'Backend': from phi.math.backend import BACKENDS for backend", "List[ComputeDevice]: \"\"\" Fetches information about all available compute devices this", "the size of multi_index is values.rank - 2. Returns: Gathered", "choice utility functions def _is_applicable(backend, values): for value in values:", "accepts true native tensor representations, not Python numbers or others", "support *DLPack* and `use_dlpack=True`, uses zero-copy conversion using the DLPack", "holding the coordinate vectors, i.e. (row, col) for matrices. indices:", "a tensor with values as specified by `mode` and `constant_values`.", "def expand_dims(self, a, axis=0, number=1): raise NotImplementedError(self) def shape(self, tensor):", "dimensions than `value`, these dimensions are added to `value` as", "self.ref = ref \"\"\" (Optional) Reference to the internal device", "subtraction affects convergence residual_squared_old = residual_squared residual_squared = self.sum(residual **", "data type \"\"\" dtypes = [self.dtype(t) for t in tensors]", "> 100, axis=(1,)) & (iterations >= 8) converged = self.all(residual_squared", "zeros(self, shape, dtype: DType = None): raise NotImplementedError(self) def zeros_like(self,", "[False] * batch_size messages = [\"\"] * batch_size f_inputs =", "that can be allocated (in bytes). -1 for n/a. \"\"\"", "[`jax.devices`](https://jax.readthedocs.io/en/latest/jax.html#jax.devices) Args: device_type: (optional) Return only devices of this type,", "round(self, x): raise NotImplementedError(self) def ceil(self, x): raise NotImplementedError(self) def", "methods. An object is considered a tensor (nativer or otherwise)", "'boundary', 'periodic', 'symmetric', 'reflect')`. Returns: sampled values with linear interpolation", "shape): raise NotImplementedError(self) def flip(self, value, axes: tuple or list):", "--- Default Backend has priority --- if _is_applicable(_DEFAULT[-1], values) and", "as `gradient(*d_out)` to compute the gradient of `f`. Returns: Function", "1 # else: residual = residual - step_size * dy", "raise NotImplementedError(self) def cast(self, x, dtype: DType): raise NotImplementedError(self) def", "(-1,)) def std(self, x, axis=None, keepdims=False): raise NotImplementedError(self) def boolean_mask(self,", "of multi_index is values.rank - 2. Returns: Gathered values as", "see `default_backend()`. raise_error: Determines the behavior of this function if", "None, -1 if i in axes else None) for i", "method need not provide a gradient for the operation. Args:", "False, \"\")) return f_b_losses_np[b], f_grad_np[b] def callback(x, *args): # L-BFGS-B", "-> List[ComputeDevice]: \"\"\" Fetches information about all available compute devices", "`floating_point_bits` is None, new tensors will default to float32 unless", "representations, not Python numbers or others that are also supported", "is not None: trajectory.append(SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged,", "'CG-adaptive')`. lin: Linear operation. One of * sparse/dense matrix valid", "returns a copy of `x`. To convert float tensors to", "x = y. This method need not provide a gradient", "backend \"\"\" Backend that this device belongs to. Different backends", "x, y): \"\"\" Element-wise equality check \"\"\" raise NotImplementedError(self) def", "return dim1 assert dim1 == dim2, f\"Incompatible {type_str} dimensions: x0", "are considered tensors by this backend but are not native", "the last at values.shape[i]-1. extrapolation: Values to use for coordinates", "available operations. To support a compute library, subclass `Backend` and", "for states in zip(*trajectories): x = self.stack([self.to_float(state.x) for state in", "]) class ComputeDevice: \"\"\" A physical device that can be", "\"\"\" raise NotImplementedError() def get_default_device(self) -> ComputeDevice: return self._default_device def", "If `floating_point_bits` is None, new tensors will default to float32", "called `'CPU'`. \"\"\" self.device_type: str = device_type \"\"\" Type of", "the selected `Backend` \"\"\" # --- Default Backend has priority", "diverged, \"\")) x = self.copy(x) iterations = self.copy(iterations) finished =", "(batch,) atol: Absolute tolerance of size (batch,) max_iter: Maximum number", "scipy.optimize import OptimizeResult, minimize from threading import Thread assert self.supports(Backend.functional_gradient)", "https://nvlpubs.nist.gov/nistpubs/jres/049/jresv49n6p409_A1b.pdf method = f\"Φ-Flow CG-adaptive ({self.name})\" y = self.to_float(y) x0", "= f\"{(self.memory / 1024 ** 2)} MB\" if self.memory >", "-> DType: return combine_types(*dtypes, fp_precision=self.precision) def auto_cast(self, *tensors) -> list:", "range(batch_size): def b_thread(b=b): recent_b_losses = [] def b_fun(x: numpy.ndarray): function_evaluations[b]", "str): \"\"\" Depending on `mode`, performs scatter_update or scatter_add. Args:", "point values to this precision, even if the input had", "None: trajectory.append(SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, \"\")) x", "if trj else None threads = [] for b in", "numpy.ndarray: \"\"\" Returns a NumPy representation of the given tensor.", "NotImplementedError(self) def einsum(self, equation, *tensors): raise NotImplementedError(self) def while_loop(self, loop:", "The first grid point of dimension i lies at position", "ComputeDevice: return self._default_device def set_default_device(self, device: ComputeDevice or str): if", "fp_precision=self.precision) def auto_cast(self, *tensors) -> list: \"\"\" Determins the appropriate", "b = self.auto_cast(a, b) return a & b def or_(self,", "not_finished_1 dy = self.linear(lin, dx); function_evaluations += not_finished_1 dx_dy =", "64, None) \"\"\" _PRECISION[0] = floating_point_bits def get_precision() -> int:", "math operations has the same precision as its inputs. Args:", "return f\"'{self.name}' ({self.device_type}) | {mem} | {pro} | {descr}\" class", "a, b = self.auto_cast(a, b) return a + b def", "None or dim1 == 1: return dim2 if dim2 is", "recent_b_losses.append(f_b_losses[b]) if final_losses[b] is None: # first evaluation final_losses[b] =", "provide a gradient for the operation. Args: method: Which algorithm", "if not backend.is_tensor(value, only_native=False): return False return True def _is_specific(backend,", "scatter values are inserted at indices. Tensor of shape (batch_size,", "matmul(self, A, b): raise NotImplementedError(self) def einsum(self, equation, *tensors): raise", "mode: constant', 'boundary', 'periodic', 'symmetric', 'reflect' constant_values: used for out-of-bounds", "times given by multiples. If `multiples` has more dimensions than", "valid for all instances * tuple/list of sparse/dense matrices for", "\"\"\" Creates a function based on `f` that uses a", "for varying matrices along batch, must have the same nonzero", "(batch, parameters) rtol: Relative tolerance of size (batch,) atol: Absolute", "Function with similar signature and return values as `f`. However,", "int = memory \"\"\" Maximum memory of the device that", "== 'CG': return self.conjugate_gradient(lin, y, x0, rtol, atol, max_iter, trj)", "NotImplementedError(self) def ones(self, shape, dtype: DType = None): raise NotImplementedError(self)", "b def xor(self, a, b): a, b = self.auto_cast(a, b)", "thus, will be converted by this method. Args: x: tensor-like,", "\"\"\" return get_precision() @property def float_type(self) -> DType: return DType(float,", "t, last_point in zip(trajectories, last_points)] trajectory = [] for states", "is None or dim2 == 1: return dim1 assert dim1", "number of values padded to the edges of each axis", "Creates a function based on `f` that uses a custom", "wrt: tuple or list, get_output: bool): raise NotImplementedError(self) def custom_gradient(self,", "def floor(self, x): raise NotImplementedError(self) def max(self, x, axis=None, keepdims=False):", "only_native=False): \"\"\" An object is considered a native tensor by", "can be represented as a NumPy array. Args: tensor: backend-compatible", "self.cast(x, DType(complex, max(64, min(self.precision * 2, 128)))) def batched_gather_nd(self, values,", "Interpolates a regular grid at the specified coordinates. Args: grid:", "= memory \"\"\" Maximum memory of the device that can", "# (batch, parameters) batch_size = self.staticshape(x0)[0] fg = self.functional_gradient(f, [0],", "prefer_default=False) if backend.is_tensor(tensor, True) or backend is current_backend: return tensor", "tensor with values as specified by `mode` and `constant_values`. If", "\"\"\" raise NotImplementedError(self) def matmul(self, A, b): raise NotImplementedError(self) def", "ifft(self, k): \"\"\" Computes the n-dimensional inverse FFT along all", "\"\"\" Type of device such as `'CPU'`, `'GPU'` or `'TPU'`.", "= f\"Φ-Flow CG-adaptive ({self.name})\" y = self.to_float(y) x0 = self.copy(self.to_float(x0),", "xs: tuple or list, persistent=False): raise NotImplementedError(self) def stop_gradient(self, value):", "keepdims=False) -> tuple: if axis < 0: axis += len(tensor.shape)", "return numerator / denominator def pow(self, base, exp): base, exp", "preferred by `choose_backend()`. The default backend can be set globally", "axis=None, keepdims=False): raise NotImplementedError(self) def range(self, start, limit=None, delta=1, dtype:", "Args: method: Which algorithm to use. One of `('auto', 'CG',", "supported. \"\"\" feature = feature if isinstance(feature, str) else feature.__name__", "{self.name}\" iterations = [0] * batch_size function_evaluations = [0] *", "(batch_size or 1, update_count or 1, channels or 1) mode:", "NotImplementedError(self) def cos(self, x): raise NotImplementedError(self) def tan(self, x): raise", "= self.sum(residual ** 2, -1, keepdims=True) dx = residual -", "`tuple` with entries equal to `values` in shape and data", "self.combine_types(*dtypes) if result_type.kind in (int, float, complex, bool): tensors =", "def boolean_mask(self, x, mask, axis=0): \"\"\" Args: x: tensor with", "is considered a native tensor by a backend if no", "of this backend that must be implemented if the feature", "not support backpropagation. Args: loop: Loop function, must return a", "Converts a tensor-like object to the native tensor representation of", "of shape (batch_size or 1, out_channel, in_channel, spatial...) zero_padding: If", "matrices. indices: Tensor holding the corresponding values \"\"\" raise NotImplementedError(self)", "e.g. because it represents a node in a graph. Use", "x0, atol, max_iter, trj: bool): from scipy.optimize import OptimizeResult, minimize", "differentiation chain. Args: tensor: Native tensor belonging to any registered", "f\"{self.processor_count} processors\" if self.processor_count > 0 else \"processors: n/a\" descr", "of `('undefined', 'zeros', 'boundary', 'periodic', 'symmetric', 'reflect')`. Returns: sampled values", "of this backend, it is returned without modification. If x", "in (int, float, complex, bool): tensors = [self.cast(t, result_type) for", "(tuple, list)): for lin_i in lin: lin_shape = self.staticshape(lin_i) assert", ">= y def add(self, a, b): a, b = self.auto_cast(a,", "Args: values: tensor of shape (batch, spatial..., channel) indices: int", "called by the default implementations of basic operators. Backends can", "NotImplementedError() @property def precision(self) -> int: \"\"\" Short for math.backend.get_precision()", "= res.success diverged[b] = res.status not in (0, 1) #", "gradient algorithm with adaptive step size. Signature matches to `Backend.linear_solve()`.", "assuming it can handle handle the values, see `default_backend()`. raise_error:", "backend.supports(Backend.from_dlpack): capsule = current_backend.to_dlpack(tensor) return backend.from_dlpack(capsule) else: nparray = current_backend.numpy(tensor)", "loss = min(recent_b_losses) recent_b_losses.clear() final_losses[b] = loss if trajectories is", "tensor): raise NotImplementedError(self) def ones(self, shape, dtype: DType = None):", "int tensor of shape (batch, any..., multi_index) where the size", "converged[b], diverged[b], \"\") for b in range(batch_size)] trajectories = [t[:-1]", "max_iter, trj) elif method == 'CG': return self.conjugate_gradient(lin, y, x0,", "iterations = self.copy(iterations) finished = converged | diverged | (iterations", "point precision in bits. The precision can be set globally", "set default precision. See Also: `Backend.precision()`. If `x` is mutable", "x0, rtol, atol, max_iter, trj) elif method == 'CG-adaptive': return", "_is_applicable(backend, values): for value in values: if not backend.is_tensor(value, only_native=False):", "`is_available(tensor)` to check if the value can be represented as", "def sqrt(self, x): raise NotImplementedError(self) def exp(self, x): raise NotImplementedError(self)", "def auto_cast(self, *tensors) -> list: \"\"\" Determins the appropriate values", "axis=0): raise NotImplementedError(self) def concat(self, values, axis): raise NotImplementedError(self) def", "value, multiples): \"\"\" Repeats the tensor along each axis the", "tuple/list matching the dimensions (pair for matrix) values: param shape:", "-> bool: \"\"\" Tests if the value of the tensor", "exp) return base ** exp def mod(self, dividend, divisor): dividend,", "holding the corresponding values \"\"\" raise NotImplementedError(self) def minimize(self, method:", "y = self.auto_cast(x, y) return x >= y def add(self,", "(Default value = 0) mode: str: (Default value = 'constant')", "are not native and thus, will be converted by this", "guess of size (batch, parameters) rtol: Relative tolerance of size", "Absolute tolerance of size (batch,) max_iter: Maximum number of iterations", "Also: `Backend.precision()`. If `x` is mutable and of the correct", "tuple: raise NotImplementedError(self) def record_gradients(self, xs: tuple or list, persistent=False):", "return NotImplemented def jit_compile_grad(self, f, wrt: tuple or list, get_output:", "mode='constant' (Default value = 0) mode: str: (Default value =", "_, _, residual, iterations, function_evaluations, converged, diverged =\\ self.while_loop(loop, (continue_,", "the returned function does not support keyword arguments. \"\"\" return", "def conjugate_gradient(self, lin, y, x0, rtol, atol, max_iter, trj: bool)", "consider when determining the common data type Returns: tensors cast", "in-place subtraction affects convergence residual_squared = self.sum(residual ** 2, -1,", "& (iterations >= 8) converged = self.all(residual_squared <= tolerance_sq, axis=(1,))", "= processor_count \"\"\" Number of CPU cores or GPU multiprocessors.", "= False) Returns: bool: whether `x` is considered a tensor", "def reshape(self, value, shape): raise NotImplementedError(self) def flip(self, value, axes:", "NotImplementedError(self) def clip(self, x, minimum, maximum): raise NotImplementedError(self) def sqrt(self,", "surrounding `with backend:` block. If called outside a backend context,", "zero-copy conversion using the DLPack library. Else, intermediately converts `tensor`", "self.list_devices(device) assert len(devices) >= 1, f\"{self.name}: Cannot select '{device} because", "of the values stored in the tensor \"\"\" raise NotImplementedError()", "into which scatter values are inserted at indices. Tensor of", "-> 'Backend': from phi.math.backend import BACKENDS for backend in BACKENDS:", "values: Values to scatter at indices. Tensor of shape (batch_size", "Implementations: * NumPy: [`os.cpu_count`](https://docs.python.org/3/library/os.html#os.cpu_count) * PyTorch: [`torch.cuda.get_device_properties`](https://pytorch.org/docs/stable/cuda.html#torch.cuda.get_device_properties) * TensorFlow: `tensorflow.python.client.device_lib.list_local_devices`", "of dimensions mask: 1D mask tensor axis: Axis index >=", "Backend: \"\"\" The default backend is preferred by `choose_backend()`. The", "residual - step_size * dy # in-place subtraction affects convergence", "iterations, function_evaluations, converged, diverged, \"\")) x = self.copy(x) iterations =", "batch_size xs = [None] * batch_size final_losses = [None] *", "operations has the same precision as its inputs. Args: floating_point_bits:", "Use `is_available(tensor)` to check if the value can be represented", "necessary but ensures batch-independence x += step_size * dx if", "backends represent the same device with different objects. \"\"\" def", "False) Returns: bool: whether `x` is considered a tensor by", "Python number, tensor convert_external: if False and `x` is a", "number that is understood by this backend, this method returns", "x, y): \"\"\" Computes x/y but returns 0 if y=0.", "in BACKENDS if _is_applicable(backend, values)] if len(backends) == 0: raise", "either equal for both or one for either. Args: values:", "Returns: Function with similar signature and return values as `f`.", "locations. * linear function A(x), must be called on all", "loop variables. Returns: Loop variables upon loop completion. \"\"\" raise", "3D (rank=5). Higher dimensions may not be supported. Args: value:", "= [0] * batch_size function_evaluations = [0] * batch_size xs", "1 loss = min(recent_b_losses) recent_b_losses.clear() final_losses[b] = loss if trajectories", ">= len(tensor.shape) or axis < 0: raise ValueError(\"Illegal axis value\")", "[0] = global precision in bits, [1:] from 'with' blocks", "set as default \"\"\" assert isinstance(backend, Backend) _DEFAULT[0] = backend", "`set_global_precision()` or locally using `with precision(p):`. Any Backend method may", "None): raise NotImplementedError(self) def zeros_like(self, tensor): raise NotImplementedError(self) def ones(self,", "values with precision equal to the currently set default precision.", "True, raises a `NoBackendFound` error, else returns `None`. Returns: the", "backend: 'Backend', name: str, device_type: str, memory: int, processor_count: int,", "backends[0] class NoBackendFound(Exception): \"\"\" Thrown by `choose_backend` if no backend", "[0] = global default, [1:] from 'with' blocks _PRECISION =", "NotImplementedError() def is_tensor(self, x, only_native=False): \"\"\" An object is considered", "Thread(target=b_thread) threads.append(b_thread) b_thread.start() while True: f_input_available.wait() if all(finished): all_finished =", "a valid NumPy representation of the value. Tensors are typically", "x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]:", "state in states] function_evaluations = [state.function_evaluations for state in states]", "*args): # L-BFGS-B only passes x but the documentation says", "dx = residual = y - self.linear(lin, x) dy =", "len(tensor.shape) or axis < 0: raise ValueError(\"Illegal axis value\") result", "def __init__(self, msg): Exception.__init__(self, msg) def default_backend() -> Backend: \"\"\"", "diverged = self.any(residual_squared / rsq0 > 100, axis=(1,)) & (iterations", "given values. \"\"\" def __init__(self, msg): Exception.__init__(self, msg) def default_backend()", "of b. \"\"\" raise NotImplementedError(self) def matmul(self, A, b): raise", "the internal device representation. \"\"\" self.backend: 'Backend' = backend \"\"\"", "divide_no_nan(self, x, y): \"\"\" Computes x/y but returns 0 if", "multi-indices as tensor of shape (nnz, vector) \"\"\" raise NotImplementedError(self)", "res.x converged[b] = res.success diverged[b] = res.status not in (0,", "selected precision containing random values sampled from a normal distribution", "target floating point precision in bits. The precision can be", "if the input had a different precision. Returns: 16 for", "combined_dim(dim1, dim2, type_str: str = 'batch'): if dim1 is None", "NotImplementedError(self) def minimum(self, a, b): raise NotImplementedError(self) def clip(self, x,", "linear_solve(self, method: str, lin, y, x0, rtol, atol, max_iter, trj:", "this type is available.\" device = devices[0] self._default_device = device", "def nonzero(self, values): \"\"\" Args: values: Tensor with only spatial", "rtol, atol, max_iter, trj) else: raise NotImplementedError(f\"Method '{method}' not supported", "iterations, function_evaluations, converged, diverged, \"\") def conjugate_gradient_adaptive(self, lin, y, x0,", "= self.as_tensor(x, convert_external=True) f_input_available.wait() f_output_available.wait() recent_b_losses.append(f_b_losses[b]) if final_losses[b] is None:", "raise NotImplementedError(self) def log(self, x): \"\"\" Natural logarithm \"\"\" raise", "return values as `f`. However, the returned function does not", "henceforth will be of the corresponding data type, float16, float32", "Different backends represent the same device with different objects. \"\"\"", "[`os.cpu_count`](https://docs.python.org/3/library/os.html#os.cpu_count) * PyTorch: [`torch.cuda.get_device_properties`](https://pytorch.org/docs/stable/cuda.html#torch.cuda.get_device_properties) * TensorFlow: `tensorflow.python.client.device_lib.list_local_devices` * Jax: [`jax.devices`](https://jax.readthedocs.io/en/latest/jax.html#jax.devices)", "Returns: Gathered values as tensor of shape (batch, any..., channel)", "Solving Linear Systems\" by <NAME> and <NAME> # https://nvlpubs.nist.gov/nistpubs/jres/049/jresv49n6p409_A1b.pdf method", "- step_size * dy # in-place subtraction affects convergence residual_squared", "it to the list. \"\"\" _DEFAULT = [] # [0]", "Returns: tile tensor \"\"\" raise NotImplementedError(self) def sparse_tensor(self, indices, values,", "value: tensor pad_width: 2D tensor specifying the number of values", "get_precision() @property def float_type(self) -> DType: return DType(float, self.precision) @property", "30: descr = descr[:28] + \"...\" return f\"'{self.name}' ({self.device_type}) |", "`x`. To convert float tensors to the backend precision but", "(Default value = True) Returns: tensor representation of `x` \"\"\"", "at `indices` updated by `values`. \"\"\" raise NotImplementedError(self) def any(self,", "mutable and of the correct floating type, returns a copy", "in lin: lin_shape = self.staticshape(lin_i) assert len(lin_shape) == 2 return", "or locally using `with precision(p):`. Any Backend method may convert", "\"\"\" Tests if the value of the tensor is known", "Else, intermediately converts `tensor` to a NumPy array. *Warning*: This", "numerator, denominator): numerator, denominator = self.auto_cast(numerator, denominator) return numerator /", "# else: residual = residual - step_size * dy #", "return a & b def or_(self, a, b): a, b", "NotImplementedError(self) def shape(self, tensor): raise NotImplementedError(self) def staticshape(self, tensor): raise", "Returns the coordinates and values of a tensor. Args: tensor:", "= minimize(fun=b_fun, x0=x0[b], jac=True, method=method, tol=atol[b], options={'maxiter': max_iter[b]}, callback=callback) assert", "DType: return DType(float, self.precision) @property def as_registered(self) -> 'Backend': from", "DType = None): raise NotImplementedError(self) def ones_like(self, tensor): raise NotImplementedError(self)", "self.expand_dims(self.to_float(not_finished_1), -1) # this is not really necessary but ensures", "\"\"\" # Based on the variant described in \"Methods of", "vector): if callable(lin): return lin(vector) elif isinstance(lin, (tuple, list)): for", "if the input had a different precision. If `floating_point_bits` is", "NotImplementedError() def copy(self, tensor, only_mutable=False): raise NotImplementedError() def call(self, f:", "values = loop(*values) return values ``` This operation does not", "Computes the n-dimensional FFT along all but the first and", "= device def seed(self, seed: int): raise NotImplementedError() def is_tensor(self,", "of `x` \"\"\" raise NotImplementedError() def is_available(self, tensor) -> bool:", "inverse FFT along all but the first and last dimensions.", "* dy, axis=-1, keepdims=True) step_size = self.divide_no_nan(self.sum(dx * residual, axis=-1,", "set by the inner-most surrounding `with backend:` block. If called", "= device_type \"\"\" Type of device such as `'CPU'`, `'GPU'`", "max_iter, trj: bool): from scipy.optimize import OptimizeResult, minimize from threading", "`value`, these dimensions are added to `value` as outer dimensions.", "** 2)} MB\" if self.memory > 0 else \"memory: n/a\"", "values of loop variables. Returns: Loop variables upon loop completion.", "= True while not all_finished: f_input_available.wait() f_output_available.wait() b_thread = Thread(target=b_thread)", "dx_dy) step_size *= self.expand_dims(self.to_float(not_finished_1), -1) # this is not really", "self.sum(residual ** 2, -1, keepdims=True) diverged = self.any(~self.isfinite(x), axis=(1,)) converged", "-> SolveResult or List[SolveResult]: \"\"\" Conjugate gradient algorithm with adaptive", "numpy from ._dtype import DType, combine_types SolveResult = namedtuple('SolveResult', [", "the dimensions (pair for matrix) values: param shape: shape: Returns:", "accept it as a tensor argument. Args: x: object to", "a matrix but got shape {lin_shape}\" return self.matmul(lin, vector) def", "device such as driver version. \"\"\" self.ref = ref \"\"\"", "diverged = [state.diverged for state in states] trajectory.append(SolveResult(method_description, x, residual,", "_, x, _, _, residual, iterations, function_evaluations, converged, diverged =\\", "x); function_evaluations += 1 else: residual = residual - step_size", "the specified coordinates. Args: grid: Tensor spatial_dims: Dimension indices that", "= self.linear(lin, dx); function_evaluations += continue_1 diverged = self.any(residual_squared /", "to coordinate vectors coordinates: Tensor of floating grid indices. The", "result as tensor of shape (batch_size, out_channel, spatial...) \"\"\" raise", "on \"An Introduction to the Conjugate Gradient Method Without the", "point precision for the local context. Usage: `with precision(p):` This", "* linear function A(x), must be called on all instances", "CPU cores or GPU multiprocessors. -1 for n/a. \"\"\" self.description:", "custom gradient for backprop. Args: f: Forward function. gradient: Function", "as specified by `mode` and `constant_values`. If the mode is", "range(len(tensor.shape))])] else: component = tensor[tuple([slice_idx if d == axis else", "get_output: bool): raise NotImplementedError(self) def custom_gradient(self, f: Callable, gradient: Callable)", "list) and all methods of the backend accept it as", "vector) \"\"\" raise NotImplementedError(self) def mean(self, value, axis=None, keepdims=False): raise", "float tensor \"\"\" return self.cast(x, self.float_type) def to_int32(self, x): return", "using `with backend:`. See `default_backend()`, `choose_backend()`. Args: backend: `Backend` to", "\"\"\" Maximum memory of the device that can be allocated", "None if dim1 is None or dim1 == 1: return", "a backend if no internal conversion is required by backend", "_DEFAULT[-1] if len(_DEFAULT) > 1 else None def set_global_default_backend(backend: Backend):", "self.zeros([batch_size], DType(int, 32)) function_evaluations = self.ones([batch_size], DType(int, 32)) residual_squared =", "'{device} because no device of this type is available.\" device", "device = devices[0] self._default_device = device def seed(self, seed: int):", "clashes like int32 vs int64. (Default value = True) Returns:", "trj) elif method == 'CG': return self.conjugate_gradient(lin, y, x0, rtol,", "has the same shape as `value`. Returns: Convolution result as", "no backend can handle the given values. \"\"\" def __init__(self,", "= self.copy(self.to_float(x0), only_mutable=True) batch_size = self.staticshape(y)[0] tolerance_sq = self.maximum(rtol **", "precision containing random values in the range [0, 1) \"\"\"", "@property def name(self) -> str: return self._name def supports(self, feature:", "x, y = self.auto_cast(x, y) return x > y def", "from threading import Barrier from typing import List, Callable import", "assert isinstance(res, OptimizeResult) # res.nit, res.nfev xs[b] = res.x converged[b]", "self.ones([batch_size], DType(int, 32)) residual_squared = rsq0 = self.sum(residual ** 2,", "def greater_or_equal(self, x, y): x, y = self.auto_cast(x, y) return", "given values. If True, raises a `NoBackendFound` error, else returns", "Values of `x` as float tensor \"\"\" return self.cast(x, self.float_type)", "= self.copy(iterations) finished = converged | diverged | (iterations >=", "def sum(self, value, axis=None, keepdims=False): raise NotImplementedError(self) def prod(self, value,", "in range(batch_size)] trajectories = [t[:-1] + [last_point] * (max_trajectory_length -", "uses the current default backend, see `default_backend()`. Returns: Tensor belonging", "by most math functions operating on `Tensor` objects to delegate", "< 0: axis += len(tensor.shape) if axis >= len(tensor.shape) or", "a native tensor by a backend if no internal conversion", "used by default \"\"\" self._name = name self._default_device = default_device", "and values of a tensor. Args: tensor: Sparse tensor Returns:", "registered backends. If `floating_point_bits` is an integer, all floating point", "and `x` is a Python number that is understood by", "= self.auto_cast(numerator, denominator) return numerator / denominator def pow(self, base,", "a tensor by this backend \"\"\" raise NotImplementedError() def as_tensor(self,", "res.success diverged[b] = res.status not in (0, 1) # 0=success", "graph. Use `is_available(tensor)` to check if the value can be", "2, -1, keepdims=True) dx = residual + self.divide_no_nan(residual_squared, residual_squared_old) *", "Gradients for Solving Linear Systems\" by <NAME> and <NAME> #", "precision(p):` This overrides the global setting, see `set_global_precision()`. Args: floating_point_bits:", "which must be either equal for both or one for", "from ._dtype import DType, combine_types SolveResult = namedtuple('SolveResult', [ 'method',", "self.description: str = description \"\"\" Further information about the device", "NotImplementedError(self) def reshape(self, value, shape): raise NotImplementedError(self) def flip(self, value,", "return self.reshape(x, (-1,)) def std(self, x, axis=None, keepdims=False): raise NotImplementedError(self)", "BACKENDS = [] \"\"\" Global list of all registered backends.", "nparray = current_backend.numpy(tensor) return backend.as_tensor(nparray) # Backend choice utility functions", "SolveResult(method_description, x, residual, iterations, function_evaluations, converged, diverged, messages) def linear_solve(self,", "Human-readable string default_device: `ComputeDevice` being used by default \"\"\" self._name", "trj: bool): from scipy.optimize import OptimizeResult, minimize from threading import", "raise NotImplementedError(self) def log2(self, x): raise NotImplementedError(self) def log10(self, x):", "backpropagation. Args: loop: Loop function, must return a `tuple` with", "is not None: trajectories[b].append(SolveResult(method_description, x0[b], f_b_losses[b], 0, 1, False, False,", "as_registered(self) -> 'Backend': from phi.math.backend import BACKENDS for backend in", "0: residual = y - self.linear(lin, x); function_evaluations += 1", "tensor, batches): if isinstance(batches, int): batches = [batches] return tensor[batches,", "a NumPy array. Args: tensor: backend-compatible tensor Returns: NumPy representation", "a tensor. Args: tensor: Sparse tensor Returns: coordinates: `tuple` of", "dx, dy, residual, iterations, function_evaluations, converged, diverged _, _, x,", "NotImplementedError(self) def sign(self, x): raise NotImplementedError(self) def round(self, x): raise", "implementations of basic operators. Backends can override this method to", "with values at `indices` updated by `values`. \"\"\" raise NotImplementedError(self)", "+ 1) finished = [False] * batch_size all_finished = False", "name: Human-readable string default_device: `ComputeDevice` being used by default \"\"\"", "= self.sum(dx * dy, axis=-1, keepdims=True) step_size = self.divide_no_nan(self.sum(dx *", "the tensor rank, the convolution is either 1D (rank=3), 2D", "= self.sum(dx * dy, axis=-1, keepdims=True) step_size = self.divide_no_nan(residual_squared, dx_dy)", "b = self.auto_cast(a, b) return a - b def mul(self,", "One of * sparse/dense matrix valid for all instances *", "self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter, trj) else: raise NotImplementedError(f\"Method", "default precision. See Also: `Backend.precision()`. If `x` is mutable and", "may convert floating point values to this precision, even if", "tuple, coordinates, extrapolation='constant'): \"\"\" Interpolates a regular grid at the", "tensor): raise NotImplementedError() def from_dlpack(self, capsule): raise NotImplementedError() def copy(self,", "def tan(self, x): raise NotImplementedError(self) def log(self, x): \"\"\" Natural", "batch-independence x += step_size * dx # if it_counter %", "method == 'CG-adaptive': return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter,", "this type, e.g. `'GPU'` or `'CPU'`. See `ComputeDevice.device_type`. Returns: `list`", "a gradient for the operation. Args: method: Which algorithm to", "the given tensor. If `tensor` is already a NumPy array,", "are typically called `'CPU'`. \"\"\" self.device_type: str = device_type \"\"\"", "str, memory: int, processor_count: int, description: str, ref=None): self.name: str", "the tensor along each axis the number of times given", "= res.status not in (0, 1) # 0=success messages[b] =", "'with' blocks def choose_backend(*values, prefer_default=False) -> Backend: \"\"\" Selects a", "NotImplementedError(self) def range(self, start, limit=None, delta=1, dtype: DType = DType(int,", "axes: tuple or list): slices = tuple(slice(None, None, -1 if", "Tensor belonging to `backend`. \"\"\" backend = backend or default_backend()", "Args: x: y: Returns: \"\"\" raise NotImplementedError(self) def where(self, condition,", "of (16, 32, 64, None) \"\"\" _PRECISION[0] = floating_point_bits def", "f\"{self.name}: Cannot select '{device} because no device of this type", "import BACKENDS for backend in BACKENDS: if self.name in backend.name:", "50 == 0: residual = y - self.linear(lin, x); function_evaluations", "low-level operations to a compute library or emulate them. The", "spatial dimensions Returns: non-zero multi-indices as tensor of shape (nnz,", "x): raise NotImplementedError(self) def dtype(self, array) -> DType: raise NotImplementedError(self)", "or 1, update_count, index_vector) values: Values to scatter at indices.", "Float tensor of selected precision containing random values sampled from", "`value` with zeros so that the result has the same", "operation. Args: method: Which algorithm to use. One of `('auto',", "== 2 return self.stack([self.matmul(m, v) for m, v in zip(lin,", "\"\"\" raise NotImplementedError(self) def any(self, boolean_tensor, axis=None, keepdims=False): raise NotImplementedError(self)", "List, Callable import numpy from ._dtype import DType, combine_types SolveResult", "def default_backend() -> Backend: \"\"\" The default backend is preferred", "Jax: [`jax.devices`](https://jax.readthedocs.io/en/latest/jax.html#jax.devices) Args: device_type: (optional) Return only devices of this", "_, residual, iterations, function_evaluations, converged, diverged =\\ self.while_loop(loop, (continue_, 0,", "scatter(self, base_grid, indices, values, mode: str): \"\"\" Depending on `mode`,", "or List[SolveResult]: \"\"\" Standard conjugate gradient algorithm. Signature matches to", "f_b_losses[b], 0, 1, False, False, \"\")) return f_b_losses_np[b], f_grad_np[b] def", "in zip(lin, self.unstack(vector))]) else: lin_shape = self.staticshape(lin) assert len(lin_shape) ==", "return SolveResult(method_description, x, residual, iterations, function_evaluations, converged, diverged, messages) def", "returned function does not support keyword arguments. \"\"\" return NotImplemented", "subtraction affects convergence residual_squared = self.sum(residual ** 2, -1, keepdims=True)", "x > y def greater_or_equal(self, x, y): x, y =", "if callable(lin): return lin(vector) elif isinstance(lin, (tuple, list)): for lin_i", "mean(self, value, axis=None, keepdims=False): raise NotImplementedError(self) def range(self, start, limit=None,", "_is_specific(backend, values): return backend return backends[0] class NoBackendFound(Exception): \"\"\" Thrown", "tensor, axes): raise NotImplementedError() def random_uniform(self, shape): \"\"\" Float tensor", "function_evaluations, converged, diverged, \"\") def conjugate_gradient_adaptive(self, lin, y, x0, rtol,", "messages = [\"\"] * batch_size f_inputs = [None] * batch_size", "points if mode='constant' (Default value = 0) mode: str: (Default", "values of a tensor. Args: tensor: Sparse tensor Returns: coordinates:", "f_input_available.wait() f_output_available.wait() b_thread = Thread(target=b_thread) threads.append(b_thread) b_thread.start() while True: f_input_available.wait()", "backend to handle the given values. This function is used", "behavior of this function if no backend can handle the", "to the edges of each axis in the form [[axis", "self.staticshape(lin) assert len(lin_shape) == 2, f\"A must be a matrix", "= residual + self.divide_no_nan(residual_squared, residual_squared_old) * dx diverged = self.any(residual_squared", "all currently available devices. \"\"\" raise NotImplementedError() def get_default_device(self) ->", "\"\"\" Pad a tensor with values as specified by `mode`", "values) and (prefer_default or _is_specific(_DEFAULT[-1], values)): return _DEFAULT[-1] # ---", "0: axis += len(tensor.shape) if axis >= len(tensor.shape) or axis", "maximum): raise NotImplementedError(self) def sqrt(self, x): raise NotImplementedError(self) def exp(self,", "step_size * dy # in-place subtraction affects convergence residual_squared =", "x): raise NotImplementedError(self) def round(self, x): raise NotImplementedError(self) def ceil(self,", "unbound Backend method, e.g. `Backend.sparse_tensor` Returns: Whether the feature is", "'with' blocks _PRECISION = [32] # [0] = global precision", "devices. \"\"\" raise NotImplementedError() def get_default_device(self) -> ComputeDevice: return self._default_device", "values stored in the tensor \"\"\" raise NotImplementedError() def to_dlpack(self,", "raise NotImplementedError(self) def sign(self, x): raise NotImplementedError(self) def round(self, x):", "\"\"\" Natural logarithm \"\"\" raise NotImplementedError(self) def log2(self, x): raise", "· x = y. This method need not provide a", "Returns: non-zero multi-indices as tensor of shape (nnz, vector) \"\"\"", "continue_ = ~converged & ~diverged & (iterations < max_iter) return", "str or None = None) -> List[ComputeDevice]: \"\"\" Fetches information", "can handle handle the values, see `default_backend()`. raise_error: Determines the", "converts `tensor` to a NumPy array. *Warning*: This operation breaks", "NotImplementedError(self) def abs(self, x): raise NotImplementedError(self) def sign(self, x): raise", "| {descr}\" class Backend: def __init__(self, name: str, default_device: ComputeDevice):", "str or Callable) -> bool: \"\"\" Tests if this backend", "to a compute library or emulate them. The methods of", "\"\")] if trj else None finished = converged | diverged", "residual_squared_old = residual_squared residual_squared = self.sum(residual ** 2, -1, keepdims=True)", "function is used by most math functions operating on `Tensor`", "tensor[tuple([slice_idx if d == axis else slice(None) for d in", "tensor, axis=0, keepdims=False) -> tuple: if axis < 0: axis", "it_counter += 1; iterations += not_finished_1 dy = self.linear(lin, dx);", "number of times given by multiples. If `multiples` has more", "current_backend: return tensor if use_dlpack and current_backend.supports(Backend.to_dlpack) and backend.supports(Backend.from_dlpack): capsule", "def __enter__(self): _DEFAULT.append(self) def __exit__(self, exc_type, exc_val, exc_tb): _DEFAULT.pop(-1) @property", "\"\")) x = self.copy(x) iterations = self.copy(iterations) finished = converged", "information about all available compute devices this backend can use.", "already a NumPy array, it is returned without modification. This", "') if len(descr) > 30: descr = descr[:28] + \"...\"", "1D mask tensor axis: Axis index >= 0 \"\"\" raise", "* batch_size f_inputs = [None] * batch_size f_b_losses = None", "elif method == 'CG': return self.conjugate_gradient(lin, y, x0, rtol, atol,", "= y - self.linear(lin, x); function_evaluations += 1 # else:", "NotImplementedError(self) def staticshape(self, tensor): raise NotImplementedError(self) def cast(self, x, dtype:", "'Backend' = backend \"\"\" Backend that this device belongs to.", "no device of this type is available.\" device = devices[0]", "others that are also supported as tensors (Default value =", "array): return self.prod(self.shape(array)) def batch_gather(self, tensor, batches): if isinstance(batches, int):", "b): a, b = self.auto_cast(a, b) return a + b", "be set globally using `set_global_precision()` or locally using `with precision(p):`.", "self.sum(y ** 2, -1), atol ** 2) x = x0", "@property def complex_type(self) -> DType: return DType(complex, max(64, self.precision)) def", "Backend: \"\"\" Selects a suitable backend to handle the given", "the backend cannot handle Python numbers. *Note:* There may be", "f\"'{self.name}' ({self.device_type}) | {mem} | {pro} | {descr}\" class Backend:", "in bits. The precision can be set globally using `set_global_precision()`", "no internal conversion is required by backend methods. An object", "def meshgrid(self, *coordinates): raise NotImplementedError(self) def linspace(self, start, stop, number):", "start, limit=None, delta=1, dtype: DType = DType(int, 32)): raise NotImplementedError(self)", "*DLPack* and `use_dlpack=True`, uses zero-copy conversion using the DLPack library.", "str = device_type \"\"\" Type of device such as `'CPU'`,", "while not all_finished: f_input_available.wait() f_output_available.wait() b_thread = Thread(target=b_thread) threads.append(b_thread) b_thread.start()", "len(devices) >= 1, f\"{self.name}: Cannot select '{device} because no device", "batch_size = self.staticshape(x0)[0] fg = self.functional_gradient(f, [0], get_output=True) method_description =", "# res.nit, res.nfev xs[b] = res.x converged[b] = res.success diverged[b]", "floating_point_bits: 16 for half, 32 for single, 64 for double", "= self.to_int32(~finished) # ; active = self.to_float(self.expand_dims(not_finished_1, -1)) while ~self.all(finished):", "to compute the gradient of `f`. Returns: Function with similar", "or `'CPU'`. See `ComputeDevice.device_type`. Returns: `list` of all currently available", "for matrix) values: param shape: shape: Returns: \"\"\" raise NotImplementedError(self)", "precision for the local context. Usage: `with precision(p):` This overrides", "self.linear(lin, x) it_counter = 0 iterations = self.zeros([batch_size], DType(int, 32))", "function A(x), must be called on all instances in parallel", "[self.dtype(t) for t in tensors] result_type = self.combine_types(*dtypes) if result_type.kind", "described in \"Methods of Conjugate Gradients for Solving Linear Systems\"", "step_size = self.divide_no_nan(self.sum(dx * residual, axis=-1, keepdims=True), dx_dy) step_size *=", "zeros so that the result has the same shape as", "is the batch dimension which must be either equal for", "f_inputs = [None] * batch_size f_b_losses = None f_b_losses_np =", "raise NotImplementedError(self) def sin(self, x): raise NotImplementedError(self) def cos(self, x):", "__init__(self, backend: 'Backend', name: str, device_type: str, memory: int, processor_count:", "import OptimizeResult, minimize from threading import Thread assert self.supports(Backend.functional_gradient) assert", "`None`, uses the current default backend, see `default_backend()`. Returns: Tensor", "register it by adding it to `BACKENDS`. Args: name: Human-readable", "[\"\"] * batch_size f_inputs = [None] * batch_size f_b_losses =", "floating point precision of DYNAMIC_BACKEND which affects all registered backends.", "\"\"\" Computes x/y but returns 0 if y=0. Args: x:", "self.reshape(x, (-1,)) def std(self, x, axis=None, keepdims=False): raise NotImplementedError(self) def", "* residual, axis=-1, keepdims=True), dx_dy) step_size *= self.expand_dims(self.to_float(continue_1), -1) #", "'{feature}'\") backend_fun = getattr(Backend, feature) impl_fun = getattr(self.__class__, feature) return", "is_available(self, tensor) -> bool: \"\"\" Tests if the value of", "* self.sum(y ** 2, -1), atol ** 2) x =", "tensor[tuple([slice(slice_idx, slice_idx + 1) if d == axis else slice(None)", "self._default_device = default_device def __enter__(self): _DEFAULT.append(self) def __exit__(self, exc_type, exc_val,", "\"\") for b in range(batch_size)] trajectories = [t[:-1] + [last_point]", "`f`. However, the returned function does not support keyword arguments.", "a Python number that is understood by this backend, this", "_DEFAULT[0] = backend def set_global_precision(floating_point_bits: int): \"\"\" Sets the floating", "a different precision. If `floating_point_bits` is None, new tensors will", "is supported. Possible features: * `sparse_tensor` * `gradients Args: feature:", "devices of this type, e.g. `'GPU'` or `'CPU'`. See `ComputeDevice.device_type`.", "values as tensor of shape (batch, any..., channel) \"\"\" raise", "as tensors (Default value = False) Returns: bool: whether `x`", "valid NumPy representation of the value. Tensors are typically available", "feature is supported. Possible features: * `sparse_tensor` * `gradients Args:", "+ 1) if d == axis else slice(None) for d", "seed(self, seed: int): raise NotImplementedError() def is_tensor(self, x, only_native=False): \"\"\"", "isinstance(backend, Backend) _DEFAULT[0] = backend def set_global_precision(floating_point_bits: int): \"\"\" Sets", "def call(self, f: Callable, *args, name=None): \"\"\" Calls `f(*args)` and", "keepdims=True) diverged = self.any(~self.isfinite(x), axis=(1,)) converged = self.all(residual_squared <= tolerance_sq,", "n/a. \"\"\" self.processor_count: int = processor_count \"\"\" Number of CPU", "in states] diverged = [state.diverged for state in states] trajectory.append(SolveResult(method_description,", "of `Backend` form a comprehensive list of available operations. To", "this method returns the number as-is. This can help prevent", "* batch_size final_losses = [None] * batch_size converged = [False]", "device. CPUs are typically called `'CPU'`. \"\"\" self.device_type: str =", "for double \"\"\" return _PRECISION[-1] @contextmanager def precision(floating_point_bits: int): \"\"\"", "Callable, *args, name=None): \"\"\" Calls `f(*args)` and returns the result.", "n/a\" pro = f\"{self.processor_count} processors\" if self.processor_count > 0 else", "value can be represented as a NumPy array. Args: tensor:", "f_input_available.wait() if all(finished): all_finished = True f_output_available.wait() break _, f_b_losses,", "scatter_add. Args: base_grid: Tensor into which scatter values are inserted", "internal device representation. \"\"\" self.backend: 'Backend' = backend \"\"\" Backend", "b) return a - b def mul(self, a, b): a,", "floating point precision in bits. The precision can be set", "Default Backend has priority --- if _is_applicable(_DEFAULT[-1], values) and (prefer_default", ">= 8) converged = self.all(residual_squared <= tolerance_sq, axis=(1,)) if trajectory", "or 3D (rank=5). Higher dimensions may not be supported. Args:", "jit_compile_grad(self, f, wrt: tuple or list, get_output: bool): raise NotImplementedError()", "\"\"\" return NotImplemented def jit_compile_grad(self, f, wrt: tuple or list,", "def where(self, condition, x=None, y=None): raise NotImplementedError(self) def nonzero(self, values):", "def minimum(self, a, b): raise NotImplementedError(self) def clip(self, x, minimum,", "or dim1 == 1: return dim2 if dim2 is None", "b. \"\"\" raise NotImplementedError(self) def matmul(self, A, b): raise NotImplementedError(self)", "return backends[0] class NoBackendFound(Exception): \"\"\" Thrown by `choose_backend` if no", "in-place subtraction affects convergence residual_squared_old = residual_squared residual_squared = self.sum(residual", "f_output_available.wait() for b_thread in threads: b_thread.join() # make sure threads", "raise NotImplementedError(self) def prod(self, value, axis=None): raise NotImplementedError(self) def divide_no_nan(self,", "of shape (batch, any..., multi_index) where the size of multi_index", "a NumPy representation of the given tensor. If `tensor` is", "floating type, returns a copy of `x`. To convert float", "The methods of `Backend` form a comprehensive list of available", "axis=(1,)) if trajectory is not None: trajectory.append(SolveResult(method, x, residual, iterations,", "or backend is current_backend: return tensor if use_dlpack and current_backend.supports(Backend.to_dlpack)", "loss, iterations[b], function_evaluations[b], False, False, \"\")) res = minimize(fun=b_fun, x0=x0[b],", "the given backend as default. This setting can be overridden", "are typically available when the backend operates in eager mode.", "lower, axis 0 upper], ...] including batch and component axes.", "converged, diverged, \"\") def linear(self, lin, vector): if callable(lin): return", "greater_than(self, x, y): x, y = self.auto_cast(x, y) return x", "typing import List, Callable import numpy from ._dtype import DType,", "raise NotImplementedError() def numpy(self, tensor) -> numpy.ndarray: \"\"\" Returns a", "gradient: Callable) -> Callable: \"\"\" Creates a function based on", "tuple or list, persistent=False): raise NotImplementedError(self) def stop_gradient(self, value): raise", "NotImplementedError(self) def ifft(self, k): \"\"\" Computes the n-dimensional inverse FFT", "a `Backend` by adding it to the list. \"\"\" _DEFAULT", "float_type(self) -> DType: return DType(float, self.precision) @property def as_registered(self) ->", "function_evaluations += not_finished_1 dx_dy = self.sum(dx * dy, axis=-1, keepdims=True)", "tile(self, value, multiples): \"\"\" Repeats the tensor along each axis", "a + b def sub(self, a, b): a, b =", "corresponding data type, float16, float32 or float64. Operations may also", "bool: raise NotImplementedError() @property def precision(self) -> int: \"\"\" Short", "on `tensor`, returns `tensor`. If both backends support *DLPack* and", "gradient algorithm. Signature matches to `Backend.linear_solve()`. \"\"\" # Based on", "einsum(self, equation, *tensors): raise NotImplementedError(self) def while_loop(self, loop: Callable, values:", "or str): if isinstance(device, str): devices = self.list_devices(device) assert len(devices)", "number, tensor convert_external: if False and `x` is a Python", "of the given tensor. If `tensor` is already a NumPy", "in states]) residual = self.stack([state.residual for state in states]) iterations", "final_losses[b], iterations[b], function_evaluations[b], converged[b], diverged[b], \"\") for b in range(batch_size)]", "def get_precision() -> int: \"\"\" Gets the current target floating", "and thus, will be converted by this method. Args: x:", "exc_tb): _DEFAULT.pop(-1) @property def name(self) -> str: return self._name def", "grid, spatial_dims: tuple, coordinates, extrapolation='constant'): \"\"\" Interpolates a regular grid", "def exp(self, x): raise NotImplementedError(self) def conv(self, value, kernel, zero_padding=True):", "None) -> List[ComputeDevice]: \"\"\" Fetches information about all available compute", "to set as default \"\"\" assert isinstance(backend, Backend) _DEFAULT[0] =", "of dimension 3 or higher Returns: \"\"\" raise NotImplementedError(self) def", "tensor: Sparse tensor Returns: coordinates: `tuple` of tensor holding the", "False # Other low-level helper functions def combined_dim(dim1, dim2, type_str:", "The first dimension of `values` and `indices` is the batch", "for state in states] trajectory.append(SolveResult(method_description, x, residual, iterations, function_evaluations, converged,", "% 50 == 0: residual = y - self.linear(lin, x);", "backend found for types {[type(v).__name__ for v in values]}; registered", "NotImplementedError() def get_default_device(self) -> ComputeDevice: return self._default_device def set_default_device(self, device:", "spatial...) \"\"\" raise NotImplementedError(self) def expand_dims(self, a, axis=0, number=1): raise", "...] including batch and component axes. mode: constant', 'boundary', 'periodic',", "[None] * batch_size f_b_losses = None f_b_losses_np = None f_grad_np", "is required by backend methods. An object is considered a", "state in states] diverged = [state.diverged for state in states]", "globally using `set_global_default_backend()` and locally using `with backend:`. Returns: current", "states] trajectory.append(SolveResult(method_description, x, residual, iterations, function_evaluations, converged, diverged, messages)) return", "by backend methods. An object is considered a tensor (nativer", "for d in range(len(tensor.shape))])] else: component = tensor[tuple([slice_idx if d", "a `List[SolveResult]`. Returns: result: `SolveResult` or `List[SolveResult]`, depending on `trj`.", "list, b, b_axes: tuple or list): \"\"\" Multiply-sum-reduce a_axes of", "dim2 is None or dim2 == 1: return dim1 assert", "zip(*trajectories): x = self.stack([self.to_float(state.x) for state in states]) residual =", "Method Without the Agonizing Pain\" by <NAME> # symbols: dx=d,", "device representation. \"\"\" self.backend: 'Backend' = backend \"\"\" Backend that", "x0=x0[b], jac=True, method=method, tol=atol[b], options={'maxiter': max_iter[b]}, callback=callback) assert isinstance(res, OptimizeResult)", "`x` is mutable and of the correct floating type, returns", "make sure threads exit correctly if trj: max_trajectory_length = max([len(t)", "b): a, b = self.auto_cast(a, b) return a ^ b", "float32 or float64. Operations may also convert floating point values", "3 or higher Returns: \"\"\" raise NotImplementedError(self) def imag(self, x):", "not hasattr(Backend, feature): raise ValueError(f\"Not a valid feature: '{feature}'\") backend_fun", "axis=-1, keepdims=True) * dx, dx_dy) dy = self.linear(lin, dx); function_evaluations", "array, it is returned without modification. This method raises an", "states] function_evaluations = [state.function_evaluations for state in states] converged =", "~converged & ~diverged & (iterations < max_iter) return continue_, it_counter,", "update_count or 1, channels or 1) mode: One of ('update',", "def cast(self, x, dtype: DType): raise NotImplementedError(self) def to_float(self, x):", "\"\"\" return _PRECISION[-1] @contextmanager def precision(floating_point_bits: int): \"\"\" Sets the", "the system of linear equations A · x = y.", "x += step_size * dx # if it_counter % 50", "`mode` and `constant_values`. If the mode is not supported, returns", "| diverged | (iterations >= max_iter); not_finished_1 = self.to_int32(~finished) #", "linear interpolation \"\"\" return NotImplemented def variable(self, value): return NotImplemented", "backend, this method returns the number as-is. This can help", "different precision. Returns: 16 for half, 32 for single, 64", "y, x0, rtol, atol, max_iter, trj) elif method == 'CG-adaptive':", "Short for math.backend.get_precision() \"\"\" return get_precision() @property def float_type(self) ->", "x, residual, iterations, function_evaluations, converged, diverged, \"\") def conjugate_gradient_adaptive(self, lin,", "`set_global_default_backend()` and locally using `with backend:`. Returns: current default `Backend`", "-> list: \"\"\" Determins the appropriate values type resulting from", "res.nit, res.nfev xs[b] = res.x converged[b] = res.success diverged[b] =", "0 else \"processors: n/a\" descr = self.description.replace('\\n', ' ') if", "x, dtype: DType): raise NotImplementedError(self) def to_float(self, x): \"\"\" Converts", "= self.numpy(f_grad).astype(numpy.float64) f_output_available.wait() for b_thread in threads: b_thread.join() # make", "an error if the value of the tensor is not", "a * b def div(self, numerator, denominator): numerator, denominator =", "# https://nvlpubs.nist.gov/nistpubs/jres/049/jresv49n6p409_A1b.pdf method = f\"Φ-Flow CG-adaptive ({self.name})\" y = self.to_float(y)", "compute library, subclass `Backend` and register it by adding it", "currently available devices. \"\"\" raise NotImplementedError() def get_default_device(self) -> ComputeDevice:", "x): \"\"\" Natural logarithm \"\"\" raise NotImplementedError(self) def log2(self, x):", "integer, all floating point tensors created henceforth will be of", "(batch, any..., multi_index) where the size of multi_index is values.rank", "(batch_size or 1, update_count, index_vector) values: Values to scatter at", "(batch, vector) or list of vectors. x0: Initial guess of", "or scatter_add. Args: base_grid: Tensor into which scatter values are", "to the native tensor representation of this backend. If x", "1, False, False, \"\")) return f_b_losses_np[b], f_grad_np[b] def callback(x, *args):", "be used to register internal calls with the profiler. Usage:", "function_evaluations, _converged, _diverged): continue_1 = self.to_int32(continue_) it_counter += 1 iterations", "d in range(len(tensor.shape))])] result.append(component) return tuple(result) def equal(self, x, y):", "default implementations of basic operators. Backends can override this method", "self._name = name self._default_device = default_device def __enter__(self): _DEFAULT.append(self) def", "f\"Φ-Flow CG-adaptive ({self.name})\" y = self.to_float(y) x0 = self.copy(self.to_float(x0), only_mutable=True)", "or 1, channels or 1) mode: One of ('update', 'add')", "backend_fun def prefers_channels_last(self) -> bool: raise NotImplementedError() @property def precision(self)", "``` This operation does not support backpropagation. Args: loop: Loop", "by the inner-most surrounding `with backend:` block. If called outside", "`'CPU'`. \"\"\" self.device_type: str = device_type \"\"\" Type of device", "None = None) -> List[ComputeDevice]: \"\"\" Fetches information about all", "f\"SciPy {method} with {self.name}\" iterations = [0] * batch_size function_evaluations", "dx); function_evaluations += continue_1 diverged = self.any(residual_squared / rsq0 >", "with linear interpolation \"\"\" return NotImplemented def variable(self, value): return", "NotImplementedError(self) def imag(self, x): raise NotImplementedError(self) def real(self, x): raise", "To support a compute library, subclass `Backend` and register it", "channels or 1) mode: One of ('update', 'add') Returns: Copy", "unnecessary casting. Args: *tensors: tensors to cast and to consider", "range(batch_size)] if trj else None threads = [] for b", "int32 vs int64. (Default value = True) Returns: tensor representation", "is values.rank - 2. Returns: Gathered values as tensor of", "a, b): a, b = self.auto_cast(a, b) return a //", "self.maximum(rtol ** 2 * self.sum(y ** 2, -1), atol **", "exp(self, x): raise NotImplementedError(self) def conv(self, value, kernel, zero_padding=True): \"\"\"", "diverged = self.any(~self.isfinite(x), axis=(1,)) converged = self.all(residual_squared <= tolerance_sq, axis=(1,))", "'periodic', 'symmetric', 'reflect' constant_values: used for out-of-bounds points if mode='constant'", "of the corresponding data type, float16, float32 or float64. Operations", "\"\"\" ```python while any(values[0]): values = loop(*values) return values ```", "current default `Backend` \"\"\" return _DEFAULT[-1] def context_backend() -> Backend", "tensor by this backend \"\"\" raise NotImplementedError() def as_tensor(self, x,", "of each axis in the form [[axis 0 lower, axis", "-1 if i in axes else None) for i in", "finished[b] = True while not all_finished: f_input_available.wait() f_output_available.wait() b_thread =", "= self.combine_types(*dtypes) if result_type.kind in (int, float, complex, bool): tensors", "as `f`. However, the returned function does not support keyword", "in zip(*trajectories): x = self.stack([self.to_float(state.x) for state in states]) residual", "def is_tensor(self, x, only_native=False): \"\"\" An object is considered a", "but ensures batch-independence x += step_size * dx if it_counter", "dx=d, dy=q, step_size=alpha, residual_squared=delta, residual=r, y=b method = f\"Φ-Flow CG", "if self.memory > 0 else \"memory: n/a\" pro = f\"{self.processor_count}", "threads: b_thread.join() # make sure threads exit correctly if trj:", "correctly if trj: max_trajectory_length = max([len(t) for t in trajectories])", "convert_external: if False and `x` is a Python number that", "break _, f_b_losses, f_grad = fg(self.stack(f_inputs)) f_b_losses_np = self.numpy(f_b_losses).astype(numpy.float64) f_grad_np", "raise NotImplementedError() def to_dlpack(self, tensor): raise NotImplementedError() def from_dlpack(self, capsule):", "= [] \"\"\" Global list of all registered backends. Register", "with {self.name}\" iterations = [0] * batch_size function_evaluations = [0]", "__enter__(self): _DEFAULT.append(self) def __exit__(self, exc_type, exc_val, exc_tb): _DEFAULT.pop(-1) @property def", "axis=-1, keepdims=True) step_size = self.divide_no_nan(residual_squared, dx_dy) step_size *= self.expand_dims(self.to_float(not_finished_1), -1)", "get_default_device(self) -> ComputeDevice: return self._default_device def set_default_device(self, device: ComputeDevice or", "the same nonzero locations. * linear function A(x), must be", "operation does not support backpropagation. Args: loop: Loop function, must", "NotImplementedError(self) def meshgrid(self, *coordinates): raise NotImplementedError(self) def linspace(self, start, stop,", "def mul(self, a, b): a, b = self.auto_cast(a, b) return", "coordinates outside the grid. One of `('undefined', 'zeros', 'boundary', 'periodic',", "for math.backend.get_precision() \"\"\" return get_precision() @property def float_type(self) -> DType:", "can be selected to perform backend computations. \"\"\" def __init__(self,", "`gradient(*d_out)` to compute the gradient of `f`. Returns: Function with", "CPUs are typically called `'CPU'`. \"\"\" self.device_type: str = device_type", "x: object to check only_native: If True, only accepts true", "NotImplementedError(self) def round(self, x): raise NotImplementedError(self) def ceil(self, x): raise", "- 2. Returns: Gathered values as tensor of shape (batch,", "parameters) rtol: Relative tolerance of size (batch,) atol: Absolute tolerance", "# Based on the variant described in \"Methods of Conjugate", "values]}; registered backends are {BACKENDS}\") # --- Native tensors? ---", "* batch_size all_finished = False trajectories = [[] for _", "The output of math operations has the same precision as", "keepdims=True) step_size = self.divide_no_nan(self.sum(dx * residual, axis=-1, keepdims=True), dx_dy) step_size", "impl_fun is not backend_fun def prefers_channels_last(self) -> bool: raise NotImplementedError()", "def real(self, x): raise NotImplementedError(self) def sin(self, x): raise NotImplementedError(self)", "grid. One of `('undefined', 'zeros', 'boundary', 'periodic', 'symmetric', 'reflect')`. Returns:", "float tensors to the backend precision but leave non-float tensors", "self.cast(x, DType(int, 64)) def to_complex(self, x): return self.cast(x, DType(complex, max(64,", "memory: int, processor_count: int, description: str, ref=None): self.name: str =", "NotImplemented \"\"\" raise NotImplementedError(self) def reshape(self, value, shape): raise NotImplementedError(self)", "-> DType: return DType(float, self.precision) @property def as_registered(self) -> 'Backend':", "\"\"\" (Optional) Reference to the internal device representation. \"\"\" self.backend:", "denominator): numerator, denominator = self.auto_cast(numerator, denominator) return numerator / denominator", "= converged | diverged | (iterations >= max_iter); not_finished_1 =", "found for types {[type(v).__name__ for v in values]}; registered backends", "* Jax: [`jax.devices`](https://jax.readthedocs.io/en/latest/jax.html#jax.devices) Args: device_type: (optional) Return only devices of", "= self.copy(x) iterations = self.copy(iterations) finished = converged | diverged", "like int32 vs int64. (Default value = True) Returns: tensor", "self.auto_cast(x, y) return x >= y def add(self, a, b):", "backends = [backend for backend in BACKENDS if _is_applicable(backend, values)]", "backend as default. This setting can be overridden using `with", "feature.__name__ if not hasattr(Backend, feature): raise ValueError(f\"Not a valid feature:", "\"\"\" Returns a NumPy representation of the given tensor. If", "1 iterations += continue_1 dx_dy = self.sum(dx * dy, axis=-1,", "all registered backends. If `floating_point_bits` is an integer, all floating", "locally using `with backend:`. Returns: current default `Backend` \"\"\" return", "= backend def set_global_precision(floating_point_bits: int): \"\"\" Sets the floating point", "support keyword arguments. \"\"\" return NotImplemented def jit_compile_grad(self, f, wrt:", "spatial...) zero_padding: If True, pads the edges of `value` with", "`'GPU'` or `'CPU'`. See `ComputeDevice.device_type`. Returns: `list` of all currently", "be of the corresponding data type, float16, float32 or float64.", "= [state.converged for state in states] diverged = [state.diverged for", "self.conjugate_gradient(lin, y, x0, rtol, atol, max_iter, trj) elif method ==", "perform backend computations. \"\"\" def __init__(self, backend: 'Backend', name: str,", "native tensor of this backend, it is returned without modification.", "convert it unless the backend cannot handle Python numbers. *Note:*", "NotImplementedError(self) def while_loop(self, loop: Callable, values: tuple): \"\"\" ```python while", "shape (batch, any..., channel) \"\"\" raise NotImplementedError(self) def flatten(self, x):", "\"\"\" Repeats the tensor along each axis the number of", "Determins the appropriate values type resulting from operations involving the", "global default, [1:] from 'with' blocks _PRECISION = [32] #", "= [] for b in range(batch_size): def b_thread(b=b): recent_b_losses =", "b) return a + b def sub(self, a, b): a,", "> 30: descr = descr[:28] + \"...\" return f\"'{self.name}' ({self.device_type})", "the actual computations. Args: *values: prefer_default: if True, selects the", "otherwise. The output of math operations has the same precision", "selected `Backend` \"\"\" # --- Default Backend has priority ---", "backend can operate natively on `tensor`, returns `tensor`. If both", "loop(continue_, it_counter, x, dx, dy, residual, iterations, function_evaluations, _converged, _diverged):", "array. *Warning*: This operation breaks the automatic differentiation chain. Args:", "the device such as driver version. \"\"\" self.ref = ref", "contextlib import contextmanager from threading import Barrier from typing import", "Args: values: Tensor with only spatial dimensions Returns: non-zero multi-indices", "raise NotImplementedError(self) def log10(self, x): raise NotImplementedError(self) def dtype(self, array)", "`numpy(tensor)` must return a valid NumPy representation of the value.", "= True) Returns: tensor representation of `x` \"\"\" raise NotImplementedError()", "combine_types SolveResult = namedtuple('SolveResult', [ 'method', 'x', 'residual', 'iterations', 'function_evaluations',", "str: return self._name def supports(self, feature: str or Callable) ->", "operations to a compute library or emulate them. The methods", "for t, last_point in zip(trajectories, last_points)] trajectory = [] for", "continue_1 = self.to_int32(continue_) it_counter += 1 iterations += continue_1 dx_dy", "got shape {lin_shape}\" return self.matmul(lin, vector) def gradients(self, y, xs:", "Returns: sampled values with linear interpolation \"\"\" return NotImplemented def", "values ``` This operation does not support backpropagation. Args: loop:", "states]) residual = self.stack([state.residual for state in states]) iterations =", "= residual = y - self.linear(lin, x) it_counter = 0", "raise NotImplementedError(self) def zeros_like(self, tensor): raise NotImplementedError(self) def ones(self, shape,", "self.to_int32(~finished) # ; active = self.to_float(self.expand_dims(not_finished_1, -1)) while ~self.all(finished): it_counter", "them. The methods of `Backend` form a comprehensive list of", "shape, dtype: DType = None): raise NotImplementedError(self) def ones_like(self, tensor):", "function if no backend can handle the given values. If", "* sparse/dense matrix valid for all instances * tuple/list of", "whether `x` is considered a tensor by this backend \"\"\"", "self.processor_count: int = processor_count \"\"\" Number of CPU cores or", "representation. \"\"\" self.backend: 'Backend' = backend \"\"\" Backend that this", "values: Tensor with only spatial dimensions Returns: non-zero multi-indices as", "def zeros_like(self, tensor): raise NotImplementedError(self) def ones(self, shape, dtype: DType", "[False] * batch_size all_finished = False trajectories = [[] for", "trj) elif method == 'CG-adaptive': return self.conjugate_gradient_adaptive(lin, y, x0, rtol,", "of the compute device. CPUs are typically called `'CPU'`. \"\"\"", "instances in parallel y: target result of A * x.", "** 2 * self.sum(y ** 2, -1), atol ** 2)", "self.auto_cast(a, b) return a - b def mul(self, a, b):", "current_backend.numpy(tensor) return backend.as_tensor(nparray) # Backend choice utility functions def _is_applicable(backend,", "value, shape): raise NotImplementedError(self) def flip(self, value, axes: tuple or", "the batch dimension which must be either equal for both", "on `trj`. \"\"\" if method == 'auto': return self.conjugate_gradient_adaptive(lin, y,", "Args: floating_point_bits: one of (16, 32, 64, None) \"\"\" _PRECISION[0]", "str) else feature.__name__ if not hasattr(Backend, feature): raise ValueError(f\"Not a", "DType(int, 32)) function_evaluations = self.ones([batch_size], DType(int, 32)) residual_squared = rsq0", "b def mul(self, a, b): a, b = self.auto_cast(a, b)", "of shape (batch_size, in_channel, spatial...) kernel: tensor of shape (batch_size", "Convolution result as tensor of shape (batch_size, out_channel, spatial...) \"\"\"", "vs int64. (Default value = True) Returns: tensor representation of", "self.cast(x, DType(int, 32)) def to_int64(self, x): return self.cast(x, DType(int, 64))", "= residual - step_size * dy # in-place subtraction affects", "- b def mul(self, a, b): a, b = self.auto_cast(a,", "`values`. \"\"\" raise NotImplementedError(self) def any(self, boolean_tensor, axis=None, keepdims=False): raise", "x, y): x, y = self.auto_cast(x, y) return x >", "If the mode is not supported, returns NotImplemented. Args: value:", "NotImplementedError(self) def isfinite(self, x): raise NotImplementedError(self) def scatter(self, base_grid, indices,", "backends: if _is_specific(backend, values): return backend return backends[0] class NoBackendFound(Exception):", "setting, see `set_global_precision()`. Args: floating_point_bits: 16 for half, 32 for", "conversion is required by backend methods. An object is considered", "greater_or_equal(self, x, y): x, y = self.auto_cast(x, y) return x", "if True, selects the default backend assuming it can handle", "x, residual, iterations, function_evaluations, converged, diverged, messages) def linear_solve(self, method:", "channel) indices: int tensor of shape (batch, any..., multi_index) where", "of size (batch,) max_iter: Maximum number of iterations of size", "`indices`. The first dimension of `values` and `indices` is the", "precision of DYNAMIC_BACKEND which affects all registered backends. If `floating_point_bits`", "def isfinite(self, x): raise NotImplementedError(self) def scatter(self, base_grid, indices, values,", "is considered a tensor by this backend \"\"\" raise NotImplementedError()", "the value can be represented as a NumPy array. Args:", "residual, iterations, function_evaluations, converged, diverged, \"\")) x = self.copy(x) iterations", "Sets the floating point precision for the local context. Usage:", "padded tensor or NotImplemented \"\"\" raise NotImplementedError(self) def reshape(self, value,", "Callable, values: tuple): \"\"\" ```python while any(values[0]): values = loop(*values)", "a | b def xor(self, a, b): a, b =", "backend.is_tensor(value, only_native=False): return False return True def _is_specific(backend, values): for", "base_grid with values at `indices` updated by `values`. \"\"\" raise", "is not visible.\") @property def complex_type(self) -> DType: return DType(complex,", "return f(*args) def block_until_ready(self, values): pass def jit_compile(self, f: Callable)", "ValueError(\"Illegal axis value\") result = [] for slice_idx in range(tensor.shape[axis]):", "raise NotImplementedError(self) def boolean_mask(self, x, mask, axis=0): \"\"\" Args: x:", "def prefers_channels_last(self) -> bool: raise NotImplementedError() @property def precision(self) ->", "coordinate vectors, i.e. (row, col) for matrices. indices: Tensor holding", "NumPy array. *Warning*: This operation breaks the automatic differentiation chain.", "n-dimensional FFT along all but the first and last dimensions.", "def greater_than(self, x, y): x, y = self.auto_cast(x, y) return", "if trajectories is not None: trajectories[b].append(SolveResult(method_description, x, loss, iterations[b], function_evaluations[b],", "`with backend:`. Returns: current default `Backend` \"\"\" return _DEFAULT[-1] def", "values): \"\"\" Args: values: Tensor with only spatial dimensions Returns:", "`str` or unbound Backend method, e.g. `Backend.sparse_tensor` Returns: Whether the", "| (iterations >= max_iter); not_finished_1 = self.to_int32(~finished) # ; active", "for backprop. Will be called as `gradient(*d_out)` to compute the", "diverged | (iterations >= max_iter); not_finished_1 = self.to_int32(~finished) # ;", "known and can be read at this point. If true,", "return x > y def greater_or_equal(self, x, y): x, y", "if len(backends) == 0: raise NoBackendFound(f\"No backend found for types", "Args: grid: Tensor spatial_dims: Dimension indices that correspond to coordinate", "function_evaluations[b], converged[b], diverged[b], \"\") for b in range(batch_size)] trajectories =", "._dtype import DType, combine_types SolveResult = namedtuple('SolveResult', [ 'method', 'x',", "along all but the first and last dimensions. Args: k:", "value. Tensors are typically available when the backend operates in", "loop: Callable, values: tuple): \"\"\" ```python while any(values[0]): values =", "tuple(slice(None, None, -1 if i in axes else None) for", "x is a native tensor of this backend, it is", "with values as specified by `mode` and `constant_values`. If the", "backend in BACKENDS: if self.name in backend.name: return backend raise", "conjugate_gradient_adaptive(self, lin, y, x0, rtol, atol, max_iter, trj: bool) ->", "= floating_point_bits def get_precision() -> int: \"\"\" Gets the current", "b = self.auto_cast(a, b) return a ^ b def floordiv(self,", "dim1 is None or dim1 == 1: return dim2 if", "the list. \"\"\" _DEFAULT = [] # [0] = global", "of the backend accept it as a tensor argument. Args:", "raise NotImplementedError(self) def all(self, boolean_tensor, axis=None, keepdims=False): raise NotImplementedError(self) def", "\"\"\" raise NotImplementedError(self) def not_equal(self, x, y): return ~self.equal(x, y)", "Without the Agonizing Pain\" by <NAME> # symbols: dx=d, dy=q,", "type, float16, float32 or float64. Operations may also convert floating", "None finally: _PRECISION.pop(-1) def convert(tensor, backend: Backend = None, use_dlpack=True):", "the first and last dimensions. Args: k: tensor of dimension", "says (x, state) iterations[b] += 1 loss = min(recent_b_losses) recent_b_losses.clear()", "\"\"\" backend = backend or default_backend() current_backend = choose_backend(tensor, prefer_default=False)", "ensures batch-independence x += step_size * dx # if it_counter", "32 for single, 64 for double \"\"\" return _PRECISION[-1] @contextmanager", "raise NotImplementedError() def from_dlpack(self, capsule): raise NotImplementedError() def copy(self, tensor,", "if isinstance(device, str): devices = self.list_devices(device) assert len(devices) >= 1,", "*Note:* There may be objects that are considered tensors by", "iterations[b], function_evaluations[b], False, False, \"\")) res = minimize(fun=b_fun, x0=x0[b], jac=True,", "residual + self.divide_no_nan(residual_squared, residual_squared_old) * dx diverged = self.any(residual_squared /", "capsule = current_backend.to_dlpack(tensor) return backend.from_dlpack(capsule) else: nparray = current_backend.numpy(tensor) return", "all_finished: f_input_available.wait() f_output_available.wait() b_thread = Thread(target=b_thread) threads.append(b_thread) b_thread.start() while True:", "self._default_device = device def seed(self, seed: int): raise NotImplementedError() def", "modification. This method raises an error if the value of", "\"\"\" Float tensor of selected precision containing random values sampled", "in shape and data type. values: Initial values of loop", "visible.\") @property def complex_type(self) -> DType: return DType(complex, max(64, self.precision))", "= None): raise NotImplementedError(self) def ones_like(self, tensor): raise NotImplementedError(self) def", "atol, max_iter, trj) elif method == 'CG': return self.conjugate_gradient(lin, y,", "def seed(self, seed: int): raise NotImplementedError() def is_tensor(self, x, only_native=False):", "list of all registered backends. Register a `Backend` by adding", "random values sampled from a normal distribution with mean 0", "f_input_available = Barrier(batch_size + 1) f_output_available = Barrier(batch_size + 1)", "zero_padding=True): \"\"\" Convolve value with kernel. Depending on the tensor", "x, mask, axis=0): \"\"\" Args: x: tensor with any number", "def grid_sample(self, grid, spatial_dims: tuple, coordinates, extrapolation='constant'): \"\"\" Interpolates a", "Args: loop: Loop function, must return a `tuple` with entries", "self.copy(iterations) finished = converged | diverged | (iterations >= max_iter);", "a ^ b def floordiv(self, a, b): a, b =", "return trajectory else: x = self.stack(xs) residual = self.stack(final_losses) return", "of shape (batch_size, spatial..., channels) indices: Tensor of shape (batch_size", "Target backend. If `None`, uses the current default backend, see", "tensor-like object to the native tensor representation of this backend.", "tensor. If `tensor` is already a NumPy array, it is", "the global setting, see `set_global_precision()`. Args: floating_point_bits: 16 for half,", "raise NotImplementedError(self) def maximum(self, a, b): raise NotImplementedError(self) def minimum(self,", "b): a, b = self.auto_cast(a, b) return a // b", "[0] * batch_size function_evaluations = [0] * batch_size xs =", "* dy, axis=-1, keepdims=True) * dx, dx_dy) dy = self.linear(lin,", "a method of this backend that must be implemented if", "range [0, 1) \"\"\" raise NotImplementedError(self) def random_normal(self, shape): \"\"\"", "`f` that uses a custom gradient for backprop. Args: f:", "res.nfev xs[b] = res.x converged[b] = res.success diverged[b] = res.status", "corresponding values \"\"\" raise NotImplementedError(self) def minimize(self, method: str, f,", "return a * b def div(self, numerator, denominator): numerator, denominator", "point, e.g. because it represents a node in a graph.", "values: tuple): \"\"\" ```python while any(values[0]): values = loop(*values) return", "= self.any(residual_squared / rsq0 > 100, axis=(1,)) & (iterations >=", "# Backend choice utility functions def _is_applicable(backend, values): for value", "struct (e.g. tuple, list) and all methods of the backend", "= tensor[tuple([slice_idx if d == axis else slice(None) for d", "`Backend` or `None` \"\"\" return _DEFAULT[-1] if len(_DEFAULT) > 1", "backend if it is not a struct (e.g. tuple, list)", "batch_size all_finished = False trajectories = [[] for _ in", "\"\"\" Conjugate gradient algorithm with adaptive step size. Signature matches", "linear function A(x), must be called on all instances in", "values. \"\"\" def __init__(self, msg): Exception.__init__(self, msg) def default_backend() ->", "`choose_backend()`. Args: backend: `Backend` to set as default \"\"\" assert", "batch_size f_inputs = [None] * batch_size f_b_losses = None f_b_losses_np", "& (iterations < max_iter) def loop(continue_, it_counter, x, dx, dy,", "converged = [False] * batch_size diverged = [False] * batch_size", "f\"A must be a matrix but got shape {lin_shape}\" return", "self.description.replace('\\n', ' ') if len(descr) > 30: descr = descr[:28]", "float16, float32 or float64. Operations may also convert floating point", "values: Initial values of loop variables. Returns: Loop variables upon", "shape (batch_size or 1, update_count or 1, channels or 1)", "must have the same nonzero locations. * linear function A(x),", "def batched_gather_nd(self, values, indices): \"\"\" Gathers values from the tensor", "self.divide_no_nan(self.sum(dx * residual, axis=-1, keepdims=True), dx_dy) step_size *= self.expand_dims(self.to_float(continue_1), -1)", "denominator = self.auto_cast(numerator, denominator) return numerator / denominator def pow(self,", "namedtuple('SolveResult', [ 'method', 'x', 'residual', 'iterations', 'function_evaluations', 'converged', 'diverged', 'message',", "[state.diverged for state in states] trajectory.append(SolveResult(method_description, x, residual, iterations, function_evaluations,", "number=1): raise NotImplementedError(self) def shape(self, tensor): raise NotImplementedError(self) def staticshape(self,", "backend methods. An object is considered a tensor (nativer or", "not_finished_1 = self.to_int32(~finished) # ; active = self.to_float(self.expand_dims(not_finished_1, -1)) return", "bool): from scipy.optimize import OptimizeResult, minimize from threading import Thread", "\"\"\" A physical device that can be selected to perform", "is called by the default implementations of basic operators. Backends", "x, dx, dy, residual, iterations, function_evaluations, converged, diverged _, _,", "if no internal conversion is required by backend methods. An", "bool: \"\"\" Tests if the value of the tensor is", "max_iter[b]}, callback=callback) assert isinstance(res, OptimizeResult) # res.nit, res.nfev xs[b] =", "to cast and to consider when determining the common data", "self.stack(final_losses) return SolveResult(method_description, x, residual, iterations, function_evaluations, converged, diverged, messages)", "y - self.linear(lin, x); function_evaluations += 1 # else: residual", "reshape(self, value, shape): raise NotImplementedError(self) def flip(self, value, axes: tuple", "of the correct floating type, returns a copy of `x`.", "\"\"\" return _DEFAULT[-1] def context_backend() -> Backend or None: \"\"\"", "\"\")] if trj else None continue_ = ~converged & ~diverged", "flip(self, value, axes: tuple or list): slices = tuple(slice(None, None,", "of tensor holding the coordinate vectors, i.e. (row, col) for", "tensor) -> bool: \"\"\" Tests if the value of the", "iterations[b] += 1 loss = min(recent_b_losses) recent_b_losses.clear() final_losses[b] = loss", "Gathers values from the tensor `values` at locations `indices`. The", "the value. Tensors are typically available when the backend operates", "None, use_dlpack=True): \"\"\" Convert a Tensor to the native format", "diverged, messages)) return trajectory else: x = self.stack(xs) residual =", "return self.name def __repr__(self): return self.name def list_devices(self, device_type: str", "Returns: tensors cast to a common data type \"\"\" dtypes", "int: \"\"\" Gets the current target floating point precision in", "object to check only_native: If True, only accepts true native", "Returns: bool \"\"\" raise NotImplementedError() def numpy(self, tensor) -> numpy.ndarray:", "int = processor_count \"\"\" Number of CPU cores or GPU", "Number of CPU cores or GPU multiprocessors. -1 for n/a.", "passes x but the documentation says (x, state) iterations[b] +=", "return None if dim1 is None or dim1 == 1:", "1, update_count, index_vector) values: Values to scatter at indices. Tensor", "feature) impl_fun = getattr(self.__class__, feature) return impl_fun is not backend_fun", "valid feature: '{feature}'\") backend_fun = getattr(Backend, feature) impl_fun = getattr(self.__class__,", "value, axis=None): raise NotImplementedError(self) def divide_no_nan(self, x, y): \"\"\" Computes", "'batch'): if dim1 is None and dim2 is None: return", "x = x0 dx = residual = y - self.linear(lin,", "def record_gradients(self, xs: tuple or list, persistent=False): raise NotImplementedError(self) def", "\"\"\" self._name = name self._default_device = default_device def __enter__(self): _DEFAULT.append(self)", "\"\"\" # Based on \"An Introduction to the Conjugate Gradient", "an integer, all floating point tensors created henceforth will be", "def divide_no_nan(self, x, y): \"\"\" Computes x/y but returns 0", "indices. Tensor of shape (batch_size, spatial..., channels) indices: Tensor of", "def flatten(self, x): return self.reshape(x, (-1,)) def std(self, x, axis=None,", "the Conjugate Gradient Method Without the Agonizing Pain\" by <NAME>", "b = self.auto_cast(a, b) return a // b BACKENDS =", "DLPack library. Else, intermediately converts `tensor` to a NumPy array.", "Sets the floating point precision of DYNAMIC_BACKEND which affects all", "name \"\"\" Name of the compute device. CPUs are typically", "the tensor is known and can be read at this", "self.linear(lin, dx); function_evaluations += continue_1 diverged = self.any(residual_squared / rsq0", "kernel. Depending on the tensor rank, the convolution is either", "last at values.shape[i]-1. extrapolation: Values to use for coordinates outside", "\"\"\" raise NotImplementedError(self) def mean(self, value, axis=None, keepdims=False): raise NotImplementedError(self)", "len(_DEFAULT) > 1 else None def set_global_default_backend(backend: Backend): \"\"\" Sets", "whether to convert it unless the backend cannot handle Python", "representation of `x` \"\"\" raise NotImplementedError() def is_available(self, tensor) ->", "def precision(floating_point_bits: int): \"\"\" Sets the floating point precision for", "False trajectories = [[] for _ in range(batch_size)] if trj", "2D (rank=4) or 3D (rank=5). Higher dimensions may not be", "function_evaluations[b], False, False, \"\")) res = minimize(fun=b_fun, x0=x0[b], jac=True, method=method,", "True) or backend is current_backend: return tensor if use_dlpack and", "residual = y - self.linear(lin, x) it_counter = 0 iterations", "self._default_device def set_default_device(self, device: ComputeDevice or str): if isinstance(device, str):", "def pad(self, value, pad_width, mode: str = 'constant', constant_values=0): \"\"\"", "max_iter) def loop(continue_, it_counter, x, dx, dy, residual, iterations, function_evaluations,", "finally: _PRECISION.pop(-1) def convert(tensor, backend: Backend = None, use_dlpack=True): \"\"\"", "uses zero-copy conversion using the DLPack library. Else, intermediately converts", "floating point tensors created henceforth will be of the corresponding", "axis=0): \"\"\" Args: x: tensor with any number of dimensions", "x, y): return ~self.equal(x, y) def greater_than(self, x, y): x,", "Backend): \"\"\" Sets the given backend as default. This setting", "method: str, lin, y, x0, rtol, atol, max_iter, trj: bool)", "be read at this point. If true, `numpy(tensor)` must return", "to `Backend.linear_solve()`. \"\"\" # Based on the variant described in", "value[slices] def sum(self, value, axis=None, keepdims=False): raise NotImplementedError(self) def prod(self,", "raises a `NoBackendFound` error, else returns `None`. Returns: the selected", "> 1 else None def set_global_default_backend(backend: Backend): \"\"\" Sets the", "backend = backend or default_backend() current_backend = choose_backend(tensor, prefer_default=False) if", "b) return a * b def div(self, numerator, denominator): numerator,", "If x is a Python number (numbers.Number instance), `convert_numbers` decides", "`None`. Returns: `Backend` or `None` \"\"\" return _DEFAULT[-1] if len(_DEFAULT)", "basic operators. Backends can override this method to prevent unnecessary", "floor(self, x): raise NotImplementedError(self) def max(self, x, axis=None, keepdims=False): raise", "= self.auto_cast(a, b) return a // b BACKENDS = []", "precision. If `floating_point_bits` is None, new tensors will default to", "functions def _is_applicable(backend, values): for value in values: if not", "if False and `x` is a Python number that is", "\"\"\" raise NotImplementedError() def numpy(self, tensor) -> numpy.ndarray: \"\"\" Returns", "batch_size converged = [False] * batch_size diverged = [False] *", "y = self.auto_cast(x, y) return x > y def greater_or_equal(self,", "locally using `with precision(p):`. Any Backend method may convert floating", "backend or default_backend() current_backend = choose_backend(tensor, prefer_default=False) if backend.is_tensor(tensor, True)", "of all registered backends. Register a `Backend` by adding it", "# [0] = global default, [1:] from 'with' blocks _PRECISION", "\"\"\" self.description: str = description \"\"\" Further information about the", "\"\"\" return NotImplemented def variable(self, value): return NotImplemented def ndims(self,", "= self.functional_gradient(f, [0], get_output=True) method_description = f\"SciPy {method} with {self.name}\"", "for state in states]) residual = self.stack([state.residual for state in", "diverged =\\ self.while_loop(loop, (continue_, 0, x, dx, dy, residual, iterations,", "self.sum(residual ** 2, -1, keepdims=True) dx = residual + self.divide_no_nan(residual_squared,", "= 0) mode: str: (Default value = 'constant') Returns: padded", "Reference to the internal device representation. \"\"\" self.backend: 'Backend' =", "None def set_global_default_backend(backend: Backend): \"\"\" Sets the given backend as", "if not hasattr(Backend, feature): raise ValueError(f\"Not a valid feature: '{feature}'\")", "grid indices. The last dimension must match `spatial_dims`. The first", "return tensor if use_dlpack and current_backend.supports(Backend.to_dlpack) and backend.supports(Backend.from_dlpack): capsule =", "`mode`, performs scatter_update or scatter_add. Args: base_grid: Tensor into which", "a, b = self.auto_cast(a, b) return a | b def", "return a valid NumPy representation of the value. Tensors are", "the input had a different precision. Returns: 16 for half,", "to the Conjugate Gradient Method Without the Agonizing Pain\" by", "(batch, any..., channel) \"\"\" raise NotImplementedError(self) def flatten(self, x): return", "values): for value in values: if not backend.is_tensor(value, only_native=False): return", "boolean_tensor, axis=None, keepdims=False): raise NotImplementedError(self) def all(self, boolean_tensor, axis=None, keepdims=False):", "numpy.ndarray): function_evaluations[b] += 1 f_inputs[b] = self.as_tensor(x, convert_external=True) f_input_available.wait() f_output_available.wait()", "\"\"\" raise NotImplementedError(self) def stack(self, values, axis=0): raise NotImplementedError(self) def", "+ 1) f_output_available = Barrier(batch_size + 1) finished = [False]", "SolveResult or List[SolveResult]: \"\"\" Standard conjugate gradient algorithm. Signature matches", "but got shape {lin_shape}\" return self.matmul(lin, vector) def gradients(self, y,", "axis in the form [[axis 0 lower, axis 0 upper],", "tensor (nativer or otherwise) by a backend if it is", "threads.append(b_thread) b_thread.start() while True: f_input_available.wait() if all(finished): all_finished = True", "== 2, f\"A must be a matrix but got shape", "tensors] result_type = self.combine_types(*dtypes) if result_type.kind in (int, float, complex,", "a `tuple` with entries equal to `values` in shape and", "NotImplementedError(self) def maximum(self, a, b): raise NotImplementedError(self) def minimum(self, a,", "* x. 2nd order tensor (batch, vector) or list of", "not backend_fun def prefers_channels_last(self) -> bool: raise NotImplementedError() @property def", "return NotImplemented def functional_gradient(self, f, wrt: tuple or list, get_output:", "system of linear equations A · x = y. This", "self.any(~self.isfinite(x), axis=(1,)) converged = self.all(residual_squared <= tolerance_sq, axis=(1,)) trajectory =", "Possible features: * `sparse_tensor` * `gradients Args: feature: `str` or", "= getattr(Backend, feature) impl_fun = getattr(self.__class__, feature) return impl_fun is", "range(batch_size)] trajectories = [t[:-1] + [last_point] * (max_trajectory_length - len(t)", "will default to float32 unless specified otherwise. The output of", "f_input_available.wait() f_output_available.wait() recent_b_losses.append(f_b_losses[b]) if final_losses[b] is None: # first evaluation", "1) mode: One of ('update', 'add') Returns: Copy of base_grid", "while_loop(self, loop: Callable, values: tuple): \"\"\" ```python while any(values[0]): values", "raise NotImplementedError(self) def flip(self, value, axes: tuple or list): slices", "values, mode: str): \"\"\" Depending on `mode`, performs scatter_update or", "exc_type, exc_val, exc_tb): _DEFAULT.pop(-1) @property def name(self) -> str: return", "by default \"\"\" self._name = name self._default_device = default_device def", "diverged, \"\") def conjugate_gradient_adaptive(self, lin, y, x0, rtol, atol, max_iter,", "DType(int, 64)) def to_complex(self, x): return self.cast(x, DType(complex, max(64, min(self.precision", "-> numpy.ndarray: \"\"\" Returns a NumPy representation of the given", "is None: return None if dim1 is None or dim1", "grid point of dimension i lies at position 0, the", "precision but leave non-float tensors untouched, use `Backend.as_tensor()`. Args: x:", "Returns: NumPy representation of the values stored in the tensor", "method, e.g. `Backend.sparse_tensor` Returns: Whether the feature is supported. \"\"\"", "iterations = self.copy(iterations) continue_ = ~converged & ~diverged & (iterations", "str): if isinstance(device, str): devices = self.list_devices(device) assert len(devices) >=", "values \"\"\" raise NotImplementedError(self) def minimize(self, method: str, f, x0,", "= self.staticshape(lin_i) assert len(lin_shape) == 2 return self.stack([self.matmul(m, v) for", "NotImplementedError() def transpose(self, tensor, axes): raise NotImplementedError() def random_uniform(self, shape):", "spatial...) kernel: tensor of shape (batch_size or 1, out_channel, in_channel,", "function, must return a `tuple` with entries equal to `values`", "b_thread.join() # make sure threads exit correctly if trj: max_trajectory_length", "NotImplementedError(self) def dtype(self, array) -> DType: raise NotImplementedError(self) def tile(self,", "for n/a. \"\"\" self.description: str = description \"\"\" Further information", "Tensor spatial_dims: Dimension indices that correspond to coordinate vectors coordinates:", "self.memory > 0 else \"memory: n/a\" pro = f\"{self.processor_count} processors\"", "Dimension indices that correspond to coordinate vectors coordinates: Tensor of", "= self.auto_cast(a, b) return a + b def sub(self, a,", "float32 unless specified otherwise. The output of math operations has", "considered tensors by this backend but are not native and", "tolerance of size (batch,) max_iter: Maximum number of iterations of", "handle the given values. If True, raises a `NoBackendFound` error,", "b) return a & b def or_(self, a, b): a,", "Further information about the device such as driver version. \"\"\"", "for both or one for either. Args: values: tensor of", "persistent=False): raise NotImplementedError(self) def stop_gradient(self, value): raise NotImplementedError(self) def grid_sample(self,", "typically called `'CPU'`. \"\"\" self.device_type: str = device_type \"\"\" Type", "= f\"SciPy {method} with {self.name}\" iterations = [0] * batch_size", "in states]) iterations = [state.iterations for state in states] function_evaluations", "0, the last at values.shape[i]-1. extrapolation: Values to use for", "x0, rtol, atol, max_iter, trj) else: raise NotImplementedError(f\"Method '{method}' not", "function based on `f` that uses a custom gradient for", "NotImplemented def functional_gradient(self, f, wrt: tuple or list, get_output: bool):", "diverged)) return trajectory if trj else SolveResult(method, x, residual, iterations,", "out non-applicable --- backends = [backend for backend in BACKENDS", "sign(self, x): raise NotImplementedError(self) def round(self, x): raise NotImplementedError(self) def", "the edges of each axis in the form [[axis 0", "is_tensor(self, x, only_native=False): \"\"\" An object is considered a native", "value = False) Returns: bool: whether `x` is considered a", "shape (batch_size, spatial..., channels) indices: Tensor of shape (batch_size or", "support a compute library, subclass `Backend` and register it by", "_ in range(batch_size)] if trj else None threads = []", "trajectory.append(SolveResult(method_description, x, residual, iterations, function_evaluations, converged, diverged, messages)) return trajectory", "x, y = self.auto_cast(x, y) return x >= y def", "as driver version. \"\"\" self.ref = ref \"\"\" (Optional) Reference", "tuple or list): slices = tuple(slice(None, None, -1 if i", "\"\"\" Depending on `mode`, performs scatter_update or scatter_add. Args: base_grid:", "'constant', constant_values=0): \"\"\" Pad a tensor with values as specified", "Thread assert self.supports(Backend.functional_gradient) assert len(self.staticshape(x0)) == 2 # (batch, parameters)", "coordinates. Args: grid: Tensor spatial_dims: Dimension indices that correspond to", "all registered backends. Register a `Backend` by adding it to", "is considered a tensor (nativer or otherwise) by a backend", "as default. This setting can be overridden using `with backend:`.", "component axes. mode: constant', 'boundary', 'periodic', 'symmetric', 'reflect' constant_values: used", "dim2 is None: return None if dim1 is None or", "check if the value can be represented as a NumPy", "tensor of shape (batch, any..., channel) \"\"\" raise NotImplementedError(self) def", "k: tensor of dimension 3 or higher Returns: \"\"\" raise", "for m, v in zip(lin, self.unstack(vector))]) else: lin_shape = self.staticshape(lin)", "feature: `str` or unbound Backend method, e.g. `Backend.sparse_tensor` Returns: Whether", "capsule): raise NotImplementedError() def copy(self, tensor, only_mutable=False): raise NotImplementedError() def", "b def sub(self, a, b): a, b = self.auto_cast(a, b)", "of all currently available devices. \"\"\" raise NotImplementedError() def get_default_device(self)", "indices: Tensor of shape (batch_size or 1, update_count, index_vector) values:", "`Backend.sparse_tensor` Returns: Whether the feature is supported. \"\"\" feature =", "(max_trajectory_length - len(t) + 1) for t, last_point in zip(trajectories,", "converted by this method. Args: x: tensor-like, e.g. list, tuple,", "tensors untouched, use `Backend.as_tensor()`. Args: x: tensor of bool, int", "Args: value: tensor of shape (batch_size, in_channel, spatial...) kernel: tensor", ">= 0 \"\"\" raise NotImplementedError(self) def isfinite(self, x): raise NotImplementedError(self)", "= y - self.linear(lin, x) it_counter = 0 iterations =", "self.auto_cast(a, b) return a ^ b def floordiv(self, a, b):", "dividend % divisor def and_(self, a, b): a, b =", "converged = self.all(residual_squared <= tolerance_sq, axis=(1,)) trajectory = [SolveResult(method, x,", "2 * self.sum(y ** 2, -1), atol ** 2) x", "Signature matches to `Backend.linear_solve()`. \"\"\" # Based on the variant", "of iterations of size (batch,) trj: Whether to record and", "Copy of base_grid with values at `indices` updated by `values`.", "at position 0, the last at values.shape[i]-1. extrapolation: Values to", "or list, get_output: bool): raise NotImplementedError(self) def custom_gradient(self, f: Callable,", "= self.auto_cast(a, b) return a - b def mul(self, a,", "DType = None): raise NotImplementedError(self) def zeros_like(self, tensor): raise NotImplementedError(self)", "mode: str = 'constant', constant_values=0): \"\"\" Pad a tensor with", "by `mode` and `constant_values`. If the mode is not supported,", "An object is considered a native tensor by a backend", "Element-wise equality check \"\"\" raise NotImplementedError(self) def not_equal(self, x, y):", "in axes else None) for i in range(self.ndims(value))) return value[slices]", "self.auto_cast(a, b) return a & b def or_(self, a, b):", "tuple, list) and all methods of the backend accept it", "a common data type \"\"\" dtypes = [self.dtype(t) for t", "x, residual, iterations, function_evaluations, converged, diverged, messages)) return trajectory else:", "backend: Backend = None, use_dlpack=True): \"\"\" Convert a Tensor to", "this backend can use. Implementations: * NumPy: [`os.cpu_count`](https://docs.python.org/3/library/os.html#os.cpu_count) * PyTorch:", "2 return self.stack([self.matmul(m, v) for m, v in zip(lin, self.unstack(vector))])", "NotImplementedError() def from_dlpack(self, capsule): raise NotImplementedError() def copy(self, tensor, only_mutable=False):", "in range(len(tensor.shape))])] else: component = tensor[tuple([slice_idx if d == axis", "-1 for n/a. \"\"\" self.description: str = description \"\"\" Further", "NotImplemented def ndims(self, tensor): return len(self.staticshape(tensor)) def size(self, array): return", "residual, iterations, function_evaluations, converged, diverged, \"\") def linear(self, lin, vector):", "Args: x: tensor of bool, int or float Returns: Values", "along each axis the number of times given by multiples.", "NotImplemented. Args: value: tensor pad_width: 2D tensor specifying the number", "state in states] trajectory.append(SolveResult(method_description, x, residual, iterations, function_evaluations, converged, diverged,", "\"\"\" Args: x: tensor with any number of dimensions mask:", "copy of `x`. To convert float tensors to the backend", "array. Args: tensor: backend-compatible tensor Returns: NumPy representation of the", "but are not native and thus, will be converted by", "a with b_axes of b. \"\"\" raise NotImplementedError(self) def matmul(self,", "is not a struct (e.g. tuple, list) and all methods", "descr[:28] + \"...\" return f\"'{self.name}' ({self.device_type}) | {mem} | {pro}", "if _is_applicable(backend, values)] if len(backends) == 0: raise NoBackendFound(f\"No backend", "tensor is known and can be read at this point.", "List[SolveResult]: \"\"\" Standard conjugate gradient algorithm. Signature matches to `Backend.linear_solve()`.", "state in states] converged = [state.converged for state in states]", "as `'CPU'`, `'GPU'` or `'TPU'`. \"\"\" self.memory: int = memory", "type \"\"\" dtypes = [self.dtype(t) for t in tensors] result_type", "`('undefined', 'zeros', 'boundary', 'periodic', 'symmetric', 'reflect')`. Returns: sampled values with", "return base ** exp def mod(self, dividend, divisor): dividend, divisor", "def zeros(self, shape, dtype: DType = None): raise NotImplementedError(self) def", "size (batch,) max_iter: Maximum number of iterations of size (batch,)", "'boundary', 'periodic', 'symmetric', 'reflect' constant_values: used for out-of-bounds points if", "supported. Possible features: * `sparse_tensor` * `gradients Args: feature: `str`", "NotImplementedError(self) def boolean_mask(self, x, mask, axis=0): \"\"\" Args: x: tensor", "threads exit correctly if trj: max_trajectory_length = max([len(t) for t", "driver version. \"\"\" self.ref = ref \"\"\" (Optional) Reference to", "Axis index >= 0 \"\"\" raise NotImplementedError(self) def isfinite(self, x):", "and std 1. \"\"\" raise NotImplementedError(self) def stack(self, values, axis=0):", "final_losses = [None] * batch_size converged = [False] * batch_size", "not_equal(self, x, y): return ~self.equal(x, y) def greater_than(self, x, y):", "= self.numpy(f_b_losses).astype(numpy.float64) f_grad_np = self.numpy(f_grad).astype(numpy.float64) f_output_available.wait() for b_thread in threads:", "mod(self, dividend, divisor): dividend, divisor = self.auto_cast(dividend, divisor) return dividend", "\"\"\" Number of CPU cores or GPU multiprocessors. -1 for", "__exit__(self, exc_type, exc_val, exc_tb): _DEFAULT.pop(-1) @property def name(self) -> str:", "the appropriate values type resulting from operations involving the tensors", "be supported. Args: value: tensor of shape (batch_size, in_channel, spatial...)", "states in zip(*trajectories): x = self.stack([self.to_float(state.x) for state in states])", "exc_val, exc_tb): _DEFAULT.pop(-1) @property def name(self) -> str: return self._name", "values: if not backend.is_tensor(value, only_native=False): return False return True def", "x): raise NotImplementedError(self) def max(self, x, axis=None, keepdims=False): raise NotImplementedError(self)", "return a ^ b def floordiv(self, a, b): a, b", "atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]: \"\"\" Solve", "of sparse/dense matrices for varying matrices along batch, must have", "def __repr__(self): return self.name def list_devices(self, device_type: str or None", "Callable) -> Callable: \"\"\" Creates a function based on `f`", "_DEFAULT = [] # [0] = global default, [1:] from", "(batch_size, in_channel, spatial...) kernel: tensor of shape (batch_size or 1,", "return NotImplemented def variable(self, value): return NotImplemented def ndims(self, tensor):", "= [False] * batch_size all_finished = False trajectories = [[]", "def to_int64(self, x): return self.cast(x, DType(int, 64)) def to_complex(self, x):", "dim2 == 1: return dim1 assert dim1 == dim2, f\"Incompatible", "specifying the number of values padded to the edges of", "_DEFAULT.pop(-1) @property def name(self) -> str: return self._name def supports(self,", "the feature is supported. Possible features: * `sparse_tensor` * `gradients", "matrix valid for all instances * tuple/list of sparse/dense matrices", "dx, dy, residual, iterations, function_evaluations, _converged, _diverged): continue_1 = self.to_int32(continue_)", "point tensors created henceforth will be of the corresponding data", "out_channel, in_channel, spatial...) zero_padding: If True, pads the edges of", "one of (16, 32, 64, None) \"\"\" _PRECISION[0] = floating_point_bits", "y - self.linear(lin, x) it_counter = 0 iterations = self.zeros([batch_size],", "*= self.expand_dims(self.to_float(continue_1), -1) # this is not really necessary but", "Cannot select '{device} because no device of this type is", "prevent unnecessary casting. Args: *tensors: tensors to cast and to", "tensors to cast and to consider when determining the common", "keepdims=False): raise NotImplementedError(self) def boolean_mask(self, x, mask, axis=0): \"\"\" Args:", "or GPU multiprocessors. -1 for n/a. \"\"\" self.description: str =", "keepdims=False): raise NotImplementedError(self) def prod(self, value, axis=None): raise NotImplementedError(self) def", "= self.linear(lin, dx); function_evaluations += not_finished_1 dx_dy = self.sum(dx *", "True) Returns: tensor representation of `x` \"\"\" raise NotImplementedError() def", "containing random values sampled from a normal distribution with mean", "def b_thread(b=b): recent_b_losses = [] def b_fun(x: numpy.ndarray): function_evaluations[b] +=", "represents a node in a graph. Use `is_available(tensor)` to check", "it_counter % 50 == 0: # Not traceable since Python", "\"\"\" # --- Default Backend has priority --- if _is_applicable(_DEFAULT[-1],", "-1, keepdims=True) dx = residual - self.divide_no_nan(self.sum(residual * dy, axis=-1,", "created henceforth will be of the corresponding data type, float16,", "tensor Returns: bool \"\"\" raise NotImplementedError() def numpy(self, tensor) ->", "shape (batch_size or 1, out_channel, in_channel, spatial...) zero_padding: If True,", "this method to prevent unnecessary casting. Args: *tensors: tensors to", "def linspace(self, start, stop, number): raise NotImplementedError(self) def tensordot(self, a,", "or list of vectors. x0: Initial guess of size (batch,", "step_size=alpha, residual_squared=delta, residual=r, y=b method = f\"Φ-Flow CG ({self.name})\" y", "has the same precision as its inputs. Args: floating_point_bits: one", "be converted by this method. Args: x: tensor-like, e.g. list,", "def get_default_device(self) -> ComputeDevice: return self._default_device def set_default_device(self, device: ComputeDevice", "Return only devices of this type, e.g. `'GPU'` or `'CPU'`.", "0 lower, axis 0 upper], ...] including batch and component", "or list, b, b_axes: tuple or list): \"\"\" Multiply-sum-reduce a_axes", "dim2, f\"Incompatible {type_str} dimensions: x0 {dim1}, y {dim2}\" return dim1", "custom_gradient(self, f: Callable, gradient: Callable) -> Callable: \"\"\" Creates a", "or float Returns: Values of `x` as float tensor \"\"\"", "= self.divide_no_nan(self.sum(dx * residual, axis=-1, keepdims=True), dx_dy) step_size *= self.expand_dims(self.to_float(continue_1),", "a, b = self.auto_cast(a, b) return a // b BACKENDS", "at the specified coordinates. Args: grid: Tensor spatial_dims: Dimension indices", "\"\"\" The default backend is preferred by `choose_backend()`. The default", "start, stop, number): raise NotImplementedError(self) def tensordot(self, a, a_axes: tuple", "def from_dlpack(self, capsule): raise NotImplementedError() def copy(self, tensor, only_mutable=False): raise", "else None threads = [] for b in range(batch_size): def", "* dy, axis=-1, keepdims=True) step_size = self.divide_no_nan(residual_squared, dx_dy) step_size *=", "div(self, numerator, denominator): numerator, denominator = self.auto_cast(numerator, denominator) return numerator", "in states] trajectory.append(SolveResult(method_description, x, residual, iterations, function_evaluations, converged, diverged, messages))", "even if the input had a different precision. Returns: 16", "x0: Initial guess of size (batch, parameters) rtol: Relative tolerance", "current_backend = choose_backend(tensor, prefer_default=False) if backend.is_tensor(tensor, True) or backend is", "NotImplementedError(self) def zeros_like(self, tensor): raise NotImplementedError(self) def ones(self, shape, dtype:", "index_vector) values: Values to scatter at indices. Tensor of shape", "by the default implementations of basic operators. Backends can override", "precision equal to the currently set default precision. See Also:", "def tile(self, value, multiples): \"\"\" Repeats the tensor along each", "step size. Signature matches to `Backend.linear_solve()`. \"\"\" # Based on", "...] def unstack(self, tensor, axis=0, keepdims=False) -> tuple: if axis", "values, see `default_backend()`. raise_error: Determines the behavior of this function", "if d == axis else slice(None) for d in range(len(tensor.shape))])]", "= 'constant') Returns: padded tensor or NotImplemented \"\"\" raise NotImplementedError(self)", "equal to the currently set default precision. See Also: `Backend.precision()`.", "the DLPack library. Else, intermediately converts `tensor` to a NumPy", "`sparse_tensor` * `gradients Args: feature: `str` or unbound Backend method,", "result_type = self.combine_types(*dtypes) if result_type.kind in (int, float, complex, bool):", "list of integers Returns: tile tensor \"\"\" raise NotImplementedError(self) def", "function_evaluations = [0] * batch_size xs = [None] * batch_size", "_is_applicable(backend, values)] if len(backends) == 0: raise NoBackendFound(f\"No backend found", "also supported as tensors (Default value = False) Returns: bool:", "step_size * dy # in-place subtraction affects convergence residual_squared_old =", "None) \"\"\" _PRECISION[0] = floating_point_bits def get_precision() -> int: \"\"\"", "\"\"\" raise NotImplementedError() def as_tensor(self, x, convert_external=True): \"\"\" Converts a", "concat(self, values, axis): raise NotImplementedError(self) def pad(self, value, pad_width, mode:", "higher Returns: \"\"\" raise NotImplementedError(self) def imag(self, x): raise NotImplementedError(self)", "of basic operators. Backends can override this method to prevent", "any number of dimensions mask: 1D mask tensor axis: Axis", "\"...\" return f\"'{self.name}' ({self.device_type}) | {mem} | {pro} | {descr}\"", "param shape: shape: Returns: \"\"\" raise NotImplementedError(self) def coordinates(self, tensor):", "def div(self, numerator, denominator): numerator, denominator = self.auto_cast(numerator, denominator) return", "self.auto_cast(dividend, divisor) return dividend % divisor def and_(self, a, b):", "that must be implemented if the feature is supported. Possible", "node in a graph. Use `is_available(tensor)` to check if the", "Backend that this device belongs to. Different backends represent the", "tensor holding the coordinate vectors, i.e. (row, col) for matrices.", "a, b): a, b = self.auto_cast(a, b) return a ^", "0, x, dx, dy, residual, iterations, function_evaluations, converged, diverged)) return", "record and return the optimization trajectory as a `List[SolveResult]`. Returns:", "raise NotImplementedError(self) def dtype(self, array) -> DType: raise NotImplementedError(self) def", "of this type is available.\" device = devices[0] self._default_device =", "in states] function_evaluations = [state.function_evaluations for state in states] converged", "or others that are also supported as tensors (Default value", "options={'maxiter': max_iter[b]}, callback=callback) assert isinstance(res, OptimizeResult) # res.nit, res.nfev xs[b]", "low-level helper functions def combined_dim(dim1, dim2, type_str: str = 'batch'):", "x): raise NotImplementedError(self) def conv(self, value, kernel, zero_padding=True): \"\"\" Convolve", "(Default value = 'constant') Returns: padded tensor or NotImplemented \"\"\"", "result_type) for t in tensors] return tensors def __str__(self): return", "finished = converged | diverged | (iterations >= max_iter); not_finished_1", "a `NoBackendFound` error, else returns `None`. Returns: the selected `Backend`", "support backpropagation. Args: loop: Loop function, must return a `tuple`", "+= 1 f_inputs[b] = self.as_tensor(x, convert_external=True) f_input_available.wait() f_output_available.wait() recent_b_losses.append(f_b_losses[b]) if", "_is_applicable(_DEFAULT[-1], values) and (prefer_default or _is_specific(_DEFAULT[-1], values)): return _DEFAULT[-1] #", "def while_loop(self, loop: Callable, values: tuple): \"\"\" ```python while any(values[0]):", "indices): \"\"\" Gathers values from the tensor `values` at locations", "\"processors: n/a\" descr = self.description.replace('\\n', ' ') if len(descr) >", "check only_native: If True, only accepts true native tensor representations,", "= self.all(residual_squared <= tolerance_sq, axis=(1,)) trajectory = [SolveResult(method, x, residual,", "(batch_size or 1, out_channel, in_channel, spatial...) zero_padding: If True, pads", "only_native: If True, only accepts true native tensor representations, not", "b_axes: tuple or list): \"\"\" Multiply-sum-reduce a_axes of a with", "f: Callable, *args, name=None): \"\"\" Calls `f(*args)` and returns the", "residual - self.divide_no_nan(self.sum(residual * dy, axis=-1, keepdims=True) * dx, dx_dy)", "bool, int or float Returns: Values of `x` as float", "calls with the profiler. Usage: choose_backend(key).call(custom_function, *args) \"\"\" return f(*args)", "can handle the given values. If True, raises a `NoBackendFound`", "`with backend:` block. If called outside a backend context, returns", "not None: trajectory.append(SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, \"\"))", "return continue_, it_counter, x, dx, dy, residual, iterations, function_evaluations, converged,", "complex_type(self) -> DType: return DType(complex, max(64, self.precision)) def combine_types(self, *dtypes:", "iterations, function_evaluations, _converged, _diverged): continue_1 = self.to_int32(continue_) it_counter += 1", "if trj: max_trajectory_length = max([len(t) for t in trajectories]) last_points", "NotImplementedError(self) def all(self, boolean_tensor, axis=None, keepdims=False): raise NotImplementedError(self) def fft(self,", "<= tolerance_sq, axis=(1,)) trajectory = [SolveResult(method, x, residual, iterations, function_evaluations,", "registered backends. Register a `Backend` by adding it to the", "multiples): \"\"\" Repeats the tensor along each axis the number", "from the tensor `values` at locations `indices`. The first dimension", "method == 'CG': return self.conjugate_gradient(lin, y, x0, rtol, atol, max_iter,", "backend. If `None`, uses the current default backend, see `default_backend()`.", "- len(t) + 1) for t, last_point in zip(trajectories, last_points)]", "self.auto_cast(base, exp) return base ** exp def mod(self, dividend, divisor):", "{descr}\" class Backend: def __init__(self, name: str, default_device: ComputeDevice): \"\"\"", "raise NotImplementedError(self) def cos(self, x): raise NotImplementedError(self) def tan(self, x):", "1) \"\"\" raise NotImplementedError(self) def random_normal(self, shape): \"\"\" Float tensor", "`with precision(p):`. Any Backend method may convert floating point values", "\"\"\" def __init__(self, backend: 'Backend', name: str, device_type: str, memory:", "the value of the tensor is known and can be", "0 upper], ...] including batch and component axes. mode: constant',", "final_losses[b] = loss if trajectories is not None: trajectories[b].append(SolveResult(method_description, x,", "= default_device def __enter__(self): _DEFAULT.append(self) def __exit__(self, exc_type, exc_val, exc_tb):", "indices that correspond to coordinate vectors coordinates: Tensor of floating", "raise NotImplementedError(self) def divide_no_nan(self, x, y): \"\"\" Computes x/y but", "def coordinates(self, tensor): \"\"\" Returns the coordinates and values of", "x0, rtol, atol, max_iter, trj) elif method == 'CG': return", "trajectory.append(SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, \"\")) x =", "input had a different precision. If `floating_point_bits` is None, new", "for value in values: if backend.is_tensor(value, only_native=True): return True return", "dx = residual = y - self.linear(lin, x) it_counter =", "True, selects the default backend assuming it can handle handle", "-1), atol ** 2) x = x0 dx = residual", "error, else returns `None`. Returns: the selected `Backend` \"\"\" #", "getattr(self.__class__, feature) return impl_fun is not backend_fun def prefers_channels_last(self) ->" ]
[ "code\"\"\" if letter_color_code == \"d\": letter_color_code = default elif letter_color_code", "for a bpython-style color code\"\"\" if letter_color_code == \"d\": letter_color_code", "atts[\"fg\"] = FG_COLORS[color] if d[\"bg\"]: if d[\"bg\"] == \"I\": #", "to spec as I understand it if d[\"fg\"].isupper(): d[\"bold\"] =", "# TODO figure out why boldness isn't based on presence", "for d in stuff[1:]), fs_from_match(stuff[0])) if len(stuff) > 0 else", "atts[\"bg\"] = BG_COLORS[color] if d[\"bold\"]: atts[\"bold\"] = True return fmtstr(d[\"string\"],", "bpython-formatted colored string\"\"\" rest = s stuff = [] while", "FmtStr constructor for a bpython-style color code\"\"\" if letter_color_code ==", "peel_off_string_re.match(s) assert m, repr(s) d = m.groupdict() rest = d[\"rest\"]", "= s stuff = [] while True: if not rest:", "\\x02 color = CNAMES[d[\"fg\"].lower()] if color != \"default\": atts[\"fg\"] =", "color = CNAMES[d[\"bg\"].lower()] if color != \"default\": atts[\"bg\"] = BG_COLORS[color]", "bpython-style color code\"\"\" if letter_color_code == \"d\": letter_color_code = default", "re.VERBOSE | re.DOTALL, ) def peel_off_string(s): m = peel_off_string_re.match(s) assert", "bold=letter_color_code.isupper(), ) def color_for_letter(letter_color_code: str, default: str = \"k\"): if", "fmtstr(d[\"string\"], **atts) peel_off_string_re = LazyReCompile( r\"\"\"(?P<colormarker>\\x01 (?P<fg>[krgybmcwdKRGYBMCWD]?) (?P<bg>[krgybmcwdKRGYBMCWDI]?)?) (?P<bold>\\x02?) \\x03", "on presence of \\x02 color = CNAMES[d[\"fg\"].lower()] if color !=", "from curtsies.formatstring import fmtstr, FmtStr from curtsies.termformatconstants import ( FG_COLORS,", "not rest: break start, rest = peel_off_string(rest) stuff.append(start) return (", "= { CURTSIES_COLORS[idx]: CURTSIES_COLORS[ (idx + (len(CURTSIES_COLORS) // 2)) %", "d[\"fg\"]: # this isn't according to spec as I understand", "d[\"bold\"]: atts[\"bold\"] = True return fmtstr(d[\"string\"], **atts) peel_off_string_re = LazyReCompile(", "default elif letter_color_code == \"D\": letter_color_code = default.upper() return partial(", "elif letter_color_code == \"D\": letter_color_code = default.upper() return partial( fmtstr,", "why boldness isn't based on presence of \\x02 color =", "\"I\": # hack for finding the \"inverse\" color = INVERSE_COLORS[color]", "2)) % len(CURTSIES_COLORS) ] for idx in range(len(CURTSIES_COLORS)) } INVERSE_COLORS[\"default\"]", "= {} if d[\"fg\"]: # this isn't according to spec", "FmtStr from curtsies.termformatconstants import ( FG_COLORS, BG_COLORS, colors as CURTSIES_COLORS,", "based on presence of \\x02 color = CNAMES[d[\"fg\"].lower()] if color", "string\"\"\" rest = s stuff = [] while True: if", "it if d[\"fg\"].isupper(): d[\"bold\"] = True # TODO figure out", "isn't based on presence of \\x02 color = CNAMES[d[\"fg\"].lower()] if", "== \"I\": # hack for finding the \"inverse\" color =", "break start, rest = peel_off_string(rest) stuff.append(start) return ( sum((fs_from_match(d) for", "boldness isn't based on presence of \\x02 color = CNAMES[d[\"fg\"].lower()]", "\"D\": letter_color_code = default.upper() return partial( fmtstr, fg=CNAMES[letter_color_code.lower()], bold=letter_color_code.isupper(), )", "== \"d\": letter_color_code = default return CNAMES[letter_color_code.lower()] def parse(s): \"\"\"Returns", "\"\"\"Returns a FmtStr object from a bpython-formatted colored string\"\"\" rest", ") def fs_from_match(d): atts = {} if d[\"fg\"]: # this", "CNAMES = dict(zip(\"krgybmcwd\", COLORS)) # hack for finding the \"inverse\"", "of \\x02 color = CNAMES[d[\"fg\"].lower()] if color != \"default\": atts[\"fg\"]", "import re from curtsies.formatstring import fmtstr, FmtStr from curtsies.termformatconstants import", "(?P<bg>[krgybmcwdKRGYBMCWDI]?)?) (?P<bold>\\x02?) \\x03 (?P<string>[^\\x04]*) \\x04 (?P<rest>.*) \"\"\", re.VERBOSE | re.DOTALL,", "partial( fmtstr, fg=CNAMES[letter_color_code.lower()], bold=letter_color_code.isupper(), ) def color_for_letter(letter_color_code: str, default: str", "hack for finding the \"inverse\" INVERSE_COLORS = { CURTSIES_COLORS[idx]: CURTSIES_COLORS[", "[] while True: if not rest: break start, rest =", "color_for_letter(letter_color_code: str, default: str = \"k\"): if letter_color_code == \"d\":", "(\"default\",) CNAMES = dict(zip(\"krgybmcwd\", COLORS)) # hack for finding the", "default return CNAMES[letter_color_code.lower()] def parse(s): \"\"\"Returns a FmtStr object from", "import LazyReCompile COLORS = CURTSIES_COLORS + (\"default\",) CNAMES = dict(zip(\"krgybmcwd\",", "True # TODO figure out why boldness isn't based on", "if d[\"bg\"]: if d[\"bg\"] == \"I\": # hack for finding", "according to spec as I understand it if d[\"fg\"].isupper(): d[\"bold\"]", "str = \"k\"): \"\"\"Returns FmtStr constructor for a bpython-style color", "..lazyre import LazyReCompile COLORS = CURTSIES_COLORS + (\"default\",) CNAMES =", "peel_off_string_re = LazyReCompile( r\"\"\"(?P<colormarker>\\x01 (?P<fg>[krgybmcwdKRGYBMCWD]?) (?P<bg>[krgybmcwdKRGYBMCWDI]?)?) (?P<bold>\\x02?) \\x03 (?P<string>[^\\x04]*) \\x04", "= FG_COLORS[color] if d[\"bg\"]: if d[\"bg\"] == \"I\": # hack", "func_for_letter(letter_color_code: str, default: str = \"k\"): \"\"\"Returns FmtStr constructor for", "stuff.append(start) return ( sum((fs_from_match(d) for d in stuff[1:]), fs_from_match(stuff[0])) if", "== \"D\": letter_color_code = default.upper() return partial( fmtstr, fg=CNAMES[letter_color_code.lower()], bold=letter_color_code.isupper(),", "len(stuff) > 0 else FmtStr() ) def fs_from_match(d): atts =", "this isn't according to spec as I understand it if", "!= \"default\": atts[\"fg\"] = FG_COLORS[color] if d[\"bg\"]: if d[\"bg\"] ==", "def peel_off_string(s): m = peel_off_string_re.match(s) assert m, repr(s) d =", "import ( FG_COLORS, BG_COLORS, colors as CURTSIES_COLORS, ) from functools", "CURTSIES_COLORS[idx]: CURTSIES_COLORS[ (idx + (len(CURTSIES_COLORS) // 2)) % len(CURTSIES_COLORS) ]", "= INVERSE_COLORS[CURTSIES_COLORS[0]] def func_for_letter(letter_color_code: str, default: str = \"k\"): \"\"\"Returns", "+ (\"default\",) CNAMES = dict(zip(\"krgybmcwd\", COLORS)) # hack for finding", "{ CURTSIES_COLORS[idx]: CURTSIES_COLORS[ (idx + (len(CURTSIES_COLORS) // 2)) % len(CURTSIES_COLORS)", "% len(CURTSIES_COLORS) ] for idx in range(len(CURTSIES_COLORS)) } INVERSE_COLORS[\"default\"] =", "} INVERSE_COLORS[\"default\"] = INVERSE_COLORS[CURTSIES_COLORS[0]] def func_for_letter(letter_color_code: str, default: str =", "assert m, repr(s) d = m.groupdict() rest = d[\"rest\"] del", "letter_color_code = default elif letter_color_code == \"D\": letter_color_code = default.upper()", "(len(CURTSIES_COLORS) // 2)) % len(CURTSIES_COLORS) ] for idx in range(len(CURTSIES_COLORS))", "letter_color_code = default.upper() return partial( fmtstr, fg=CNAMES[letter_color_code.lower()], bold=letter_color_code.isupper(), ) def", "functools import partial from ..lazyre import LazyReCompile COLORS = CURTSIES_COLORS", "= default return CNAMES[letter_color_code.lower()] def parse(s): \"\"\"Returns a FmtStr object", "finding the \"inverse\" color = INVERSE_COLORS[color] else: color = CNAMES[d[\"bg\"].lower()]", "(?P<rest>.*) \"\"\", re.VERBOSE | re.DOTALL, ) def peel_off_string(s): m =", "\"inverse\" INVERSE_COLORS = { CURTSIES_COLORS[idx]: CURTSIES_COLORS[ (idx + (len(CURTSIES_COLORS) //", "m, repr(s) d = m.groupdict() rest = d[\"rest\"] del d[\"rest\"]", "(?P<fg>[krgybmcwdKRGYBMCWD]?) (?P<bg>[krgybmcwdKRGYBMCWDI]?)?) (?P<bold>\\x02?) \\x03 (?P<string>[^\\x04]*) \\x04 (?P<rest>.*) \"\"\", re.VERBOSE |", "INVERSE_COLORS[color] else: color = CNAMES[d[\"bg\"].lower()] if color != \"default\": atts[\"bg\"]", "for idx in range(len(CURTSIES_COLORS)) } INVERSE_COLORS[\"default\"] = INVERSE_COLORS[CURTSIES_COLORS[0]] def func_for_letter(letter_color_code:", "0 else FmtStr() ) def fs_from_match(d): atts = {} if", "TODO figure out why boldness isn't based on presence of", "curtsies.termformatconstants import ( FG_COLORS, BG_COLORS, colors as CURTSIES_COLORS, ) from", "= dict(zip(\"krgybmcwd\", COLORS)) # hack for finding the \"inverse\" INVERSE_COLORS", "d[\"bold\"] = True # TODO figure out why boldness isn't", "letter_color_code = default return CNAMES[letter_color_code.lower()] def parse(s): \"\"\"Returns a FmtStr", "color = INVERSE_COLORS[color] else: color = CNAMES[d[\"bg\"].lower()] if color !=", "INVERSE_COLORS = { CURTSIES_COLORS[idx]: CURTSIES_COLORS[ (idx + (len(CURTSIES_COLORS) // 2))", "= peel_off_string_re.match(s) assert m, repr(s) d = m.groupdict() rest =", "return CNAMES[letter_color_code.lower()] def parse(s): \"\"\"Returns a FmtStr object from a", "if d[\"fg\"].isupper(): d[\"bold\"] = True # TODO figure out why", "CURTSIES_COLORS + (\"default\",) CNAMES = dict(zip(\"krgybmcwd\", COLORS)) # hack for", "# this isn't according to spec as I understand it", "= CNAMES[d[\"fg\"].lower()] if color != \"default\": atts[\"fg\"] = FG_COLORS[color] if", "\"inverse\" color = INVERSE_COLORS[color] else: color = CNAMES[d[\"bg\"].lower()] if color", "d[\"bg\"]: if d[\"bg\"] == \"I\": # hack for finding the", "\"\"\", re.VERBOSE | re.DOTALL, ) def peel_off_string(s): m = peel_off_string_re.match(s)", "peel_off_string(rest) stuff.append(start) return ( sum((fs_from_match(d) for d in stuff[1:]), fs_from_match(stuff[0]))", "if d[\"bg\"] == \"I\": # hack for finding the \"inverse\"", "out why boldness isn't based on presence of \\x02 color", "as I understand it if d[\"fg\"].isupper(): d[\"bold\"] = True #", "= CNAMES[d[\"bg\"].lower()] if color != \"default\": atts[\"bg\"] = BG_COLORS[color] if", "curtsies.formatstring import fmtstr, FmtStr from curtsies.termformatconstants import ( FG_COLORS, BG_COLORS,", "presence of \\x02 color = CNAMES[d[\"fg\"].lower()] if color != \"default\":", "letter_color_code == \"d\": letter_color_code = default return CNAMES[letter_color_code.lower()] def parse(s):", "letter_color_code == \"D\": letter_color_code = default.upper() return partial( fmtstr, fg=CNAMES[letter_color_code.lower()],", "rest: break start, rest = peel_off_string(rest) stuff.append(start) return ( sum((fs_from_match(d)", "fg=CNAMES[letter_color_code.lower()], bold=letter_color_code.isupper(), ) def color_for_letter(letter_color_code: str, default: str = \"k\"):", "{} if d[\"fg\"]: # this isn't according to spec as", "d[\"fg\"].isupper(): d[\"bold\"] = True # TODO figure out why boldness", "!= \"default\": atts[\"bg\"] = BG_COLORS[color] if d[\"bold\"]: atts[\"bold\"] = True", "= BG_COLORS[color] if d[\"bold\"]: atts[\"bold\"] = True return fmtstr(d[\"string\"], **atts)", "( FG_COLORS, BG_COLORS, colors as CURTSIES_COLORS, ) from functools import", "dict(zip(\"krgybmcwd\", COLORS)) # hack for finding the \"inverse\" INVERSE_COLORS =", "= default elif letter_color_code == \"D\": letter_color_code = default.upper() return", "as CURTSIES_COLORS, ) from functools import partial from ..lazyre import", "import partial from ..lazyre import LazyReCompile COLORS = CURTSIES_COLORS +", "\"k\"): if letter_color_code == \"d\": letter_color_code = default return CNAMES[letter_color_code.lower()]", "FG_COLORS[color] if d[\"bg\"]: if d[\"bg\"] == \"I\": # hack for", "repr(s) d = m.groupdict() rest = d[\"rest\"] del d[\"rest\"] return", "figure out why boldness isn't based on presence of \\x02", ") def color_for_letter(letter_color_code: str, default: str = \"k\"): if letter_color_code", "CNAMES[d[\"fg\"].lower()] if color != \"default\": atts[\"fg\"] = FG_COLORS[color] if d[\"bg\"]:", "\"k\"): \"\"\"Returns FmtStr constructor for a bpython-style color code\"\"\" if", "return fmtstr(d[\"string\"], **atts) peel_off_string_re = LazyReCompile( r\"\"\"(?P<colormarker>\\x01 (?P<fg>[krgybmcwdKRGYBMCWD]?) (?P<bg>[krgybmcwdKRGYBMCWDI]?)?) (?P<bold>\\x02?)", "stuff[1:]), fs_from_match(stuff[0])) if len(stuff) > 0 else FmtStr() ) def", "def color_for_letter(letter_color_code: str, default: str = \"k\"): if letter_color_code ==", "COLORS = CURTSIES_COLORS + (\"default\",) CNAMES = dict(zip(\"krgybmcwd\", COLORS)) #", ") from functools import partial from ..lazyre import LazyReCompile COLORS", "atts[\"bold\"] = True return fmtstr(d[\"string\"], **atts) peel_off_string_re = LazyReCompile( r\"\"\"(?P<colormarker>\\x01", "BG_COLORS[color] if d[\"bold\"]: atts[\"bold\"] = True return fmtstr(d[\"string\"], **atts) peel_off_string_re", "d = m.groupdict() rest = d[\"rest\"] del d[\"rest\"] return d,", "atts = {} if d[\"fg\"]: # this isn't according to", "constructor for a bpython-style color code\"\"\" if letter_color_code == \"d\":", "rest = peel_off_string(rest) stuff.append(start) return ( sum((fs_from_match(d) for d in", "\"\"\"Returns FmtStr constructor for a bpython-style color code\"\"\" if letter_color_code", "fs_from_match(d): atts = {} if d[\"fg\"]: # this isn't according", "default: str = \"k\"): if letter_color_code == \"d\": letter_color_code =", "in stuff[1:]), fs_from_match(stuff[0])) if len(stuff) > 0 else FmtStr() )", "+ (len(CURTSIES_COLORS) // 2)) % len(CURTSIES_COLORS) ] for idx in", "str, default: str = \"k\"): if letter_color_code == \"d\": letter_color_code", "= \"k\"): \"\"\"Returns FmtStr constructor for a bpython-style color code\"\"\"", "str = \"k\"): if letter_color_code == \"d\": letter_color_code = default", "d in stuff[1:]), fs_from_match(stuff[0])) if len(stuff) > 0 else FmtStr()", "for finding the \"inverse\" color = INVERSE_COLORS[color] else: color =", "= default.upper() return partial( fmtstr, fg=CNAMES[letter_color_code.lower()], bold=letter_color_code.isupper(), ) def color_for_letter(letter_color_code:", "INVERSE_COLORS[CURTSIES_COLORS[0]] def func_for_letter(letter_color_code: str, default: str = \"k\"): \"\"\"Returns FmtStr", "= [] while True: if not rest: break start, rest", "from ..lazyre import LazyReCompile COLORS = CURTSIES_COLORS + (\"default\",) CNAMES", "re from curtsies.formatstring import fmtstr, FmtStr from curtsies.termformatconstants import (", "isn't according to spec as I understand it if d[\"fg\"].isupper():", "CURTSIES_COLORS[ (idx + (len(CURTSIES_COLORS) // 2)) % len(CURTSIES_COLORS) ] for", "**atts) peel_off_string_re = LazyReCompile( r\"\"\"(?P<colormarker>\\x01 (?P<fg>[krgybmcwdKRGYBMCWD]?) (?P<bg>[krgybmcwdKRGYBMCWDI]?)?) (?P<bold>\\x02?) \\x03 (?P<string>[^\\x04]*)", "(idx + (len(CURTSIES_COLORS) // 2)) % len(CURTSIES_COLORS) ] for idx", "CNAMES[d[\"bg\"].lower()] if color != \"default\": atts[\"bg\"] = BG_COLORS[color] if d[\"bold\"]:", "start, rest = peel_off_string(rest) stuff.append(start) return ( sum((fs_from_match(d) for d", "| re.DOTALL, ) def peel_off_string(s): m = peel_off_string_re.match(s) assert m,", "peel_off_string(s): m = peel_off_string_re.match(s) assert m, repr(s) d = m.groupdict()", "if letter_color_code == \"d\": letter_color_code = default elif letter_color_code ==", "= \"k\"): if letter_color_code == \"d\": letter_color_code = default return", "return partial( fmtstr, fg=CNAMES[letter_color_code.lower()], bold=letter_color_code.isupper(), ) def color_for_letter(letter_color_code: str, default:", "CNAMES[letter_color_code.lower()] def parse(s): \"\"\"Returns a FmtStr object from a bpython-formatted", "idx in range(len(CURTSIES_COLORS)) } INVERSE_COLORS[\"default\"] = INVERSE_COLORS[CURTSIES_COLORS[0]] def func_for_letter(letter_color_code: str,", "fmtstr, fg=CNAMES[letter_color_code.lower()], bold=letter_color_code.isupper(), ) def color_for_letter(letter_color_code: str, default: str =", "def fs_from_match(d): atts = {} if d[\"fg\"]: # this isn't", "partial from ..lazyre import LazyReCompile COLORS = CURTSIES_COLORS + (\"default\",)", "# hack for finding the \"inverse\" INVERSE_COLORS = { CURTSIES_COLORS[idx]:", "\"d\": letter_color_code = default return CNAMES[letter_color_code.lower()] def parse(s): \"\"\"Returns a", "] for idx in range(len(CURTSIES_COLORS)) } INVERSE_COLORS[\"default\"] = INVERSE_COLORS[CURTSIES_COLORS[0]] def", "spec as I understand it if d[\"fg\"].isupper(): d[\"bold\"] = True", "\"d\": letter_color_code = default elif letter_color_code == \"D\": letter_color_code =", "I understand it if d[\"fg\"].isupper(): d[\"bold\"] = True # TODO", ") def peel_off_string(s): m = peel_off_string_re.match(s) assert m, repr(s) d", "m = peel_off_string_re.match(s) assert m, repr(s) d = m.groupdict() rest", "fs_from_match(stuff[0])) if len(stuff) > 0 else FmtStr() ) def fs_from_match(d):", "\\x03 (?P<string>[^\\x04]*) \\x04 (?P<rest>.*) \"\"\", re.VERBOSE | re.DOTALL, ) def", "r\"\"\"(?P<colormarker>\\x01 (?P<fg>[krgybmcwdKRGYBMCWD]?) (?P<bg>[krgybmcwdKRGYBMCWDI]?)?) (?P<bold>\\x02?) \\x03 (?P<string>[^\\x04]*) \\x04 (?P<rest>.*) \"\"\", re.VERBOSE", "colored string\"\"\" rest = s stuff = [] while True:", "INVERSE_COLORS[\"default\"] = INVERSE_COLORS[CURTSIES_COLORS[0]] def func_for_letter(letter_color_code: str, default: str = \"k\"):", "= INVERSE_COLORS[color] else: color = CNAMES[d[\"bg\"].lower()] if color != \"default\":", "= CURTSIES_COLORS + (\"default\",) CNAMES = dict(zip(\"krgybmcwd\", COLORS)) # hack", "else FmtStr() ) def fs_from_match(d): atts = {} if d[\"fg\"]:", "default.upper() return partial( fmtstr, fg=CNAMES[letter_color_code.lower()], bold=letter_color_code.isupper(), ) def color_for_letter(letter_color_code: str,", "default: str = \"k\"): \"\"\"Returns FmtStr constructor for a bpython-style", "( sum((fs_from_match(d) for d in stuff[1:]), fs_from_match(stuff[0])) if len(stuff) >", "from curtsies.termformatconstants import ( FG_COLORS, BG_COLORS, colors as CURTSIES_COLORS, )", "d[\"bg\"] == \"I\": # hack for finding the \"inverse\" color", "str, default: str = \"k\"): \"\"\"Returns FmtStr constructor for a", "sum((fs_from_match(d) for d in stuff[1:]), fs_from_match(stuff[0])) if len(stuff) > 0", "> 0 else FmtStr() ) def fs_from_match(d): atts = {}", "if color != \"default\": atts[\"fg\"] = FG_COLORS[color] if d[\"bg\"]: if", "object from a bpython-formatted colored string\"\"\" rest = s stuff", "import fmtstr, FmtStr from curtsies.termformatconstants import ( FG_COLORS, BG_COLORS, colors", "if not rest: break start, rest = peel_off_string(rest) stuff.append(start) return", "# hack for finding the \"inverse\" color = INVERSE_COLORS[color] else:", "BG_COLORS, colors as CURTSIES_COLORS, ) from functools import partial from", "color = CNAMES[d[\"fg\"].lower()] if color != \"default\": atts[\"fg\"] = FG_COLORS[color]", "a FmtStr object from a bpython-formatted colored string\"\"\" rest =", "a bpython-formatted colored string\"\"\" rest = s stuff = []", "while True: if not rest: break start, rest = peel_off_string(rest)", "= peel_off_string(rest) stuff.append(start) return ( sum((fs_from_match(d) for d in stuff[1:]),", "hack for finding the \"inverse\" color = INVERSE_COLORS[color] else: color", "LazyReCompile( r\"\"\"(?P<colormarker>\\x01 (?P<fg>[krgybmcwdKRGYBMCWD]?) (?P<bg>[krgybmcwdKRGYBMCWDI]?)?) (?P<bold>\\x02?) \\x03 (?P<string>[^\\x04]*) \\x04 (?P<rest>.*) \"\"\",", "if d[\"fg\"]: # this isn't according to spec as I", "len(CURTSIES_COLORS) ] for idx in range(len(CURTSIES_COLORS)) } INVERSE_COLORS[\"default\"] = INVERSE_COLORS[CURTSIES_COLORS[0]]", "understand it if d[\"fg\"].isupper(): d[\"bold\"] = True # TODO figure", "// 2)) % len(CURTSIES_COLORS) ] for idx in range(len(CURTSIES_COLORS)) }", "finding the \"inverse\" INVERSE_COLORS = { CURTSIES_COLORS[idx]: CURTSIES_COLORS[ (idx +", "True: if not rest: break start, rest = peel_off_string(rest) stuff.append(start)", "if len(stuff) > 0 else FmtStr() ) def fs_from_match(d): atts", "rest = s stuff = [] while True: if not", "True return fmtstr(d[\"string\"], **atts) peel_off_string_re = LazyReCompile( r\"\"\"(?P<colormarker>\\x01 (?P<fg>[krgybmcwdKRGYBMCWD]?) (?P<bg>[krgybmcwdKRGYBMCWDI]?)?)", "in range(len(CURTSIES_COLORS)) } INVERSE_COLORS[\"default\"] = INVERSE_COLORS[CURTSIES_COLORS[0]] def func_for_letter(letter_color_code: str, default:", "FmtStr object from a bpython-formatted colored string\"\"\" rest = s", "if letter_color_code == \"d\": letter_color_code = default return CNAMES[letter_color_code.lower()] def", "from a bpython-formatted colored string\"\"\" rest = s stuff =", "from functools import partial from ..lazyre import LazyReCompile COLORS =", "s stuff = [] while True: if not rest: break", "= m.groupdict() rest = d[\"rest\"] del d[\"rest\"] return d, rest", "return ( sum((fs_from_match(d) for d in stuff[1:]), fs_from_match(stuff[0])) if len(stuff)", "FG_COLORS, BG_COLORS, colors as CURTSIES_COLORS, ) from functools import partial", "the \"inverse\" INVERSE_COLORS = { CURTSIES_COLORS[idx]: CURTSIES_COLORS[ (idx + (len(CURTSIES_COLORS)", "letter_color_code == \"d\": letter_color_code = default elif letter_color_code == \"D\":", "\\x04 (?P<rest>.*) \"\"\", re.VERBOSE | re.DOTALL, ) def peel_off_string(s): m", "LazyReCompile COLORS = CURTSIES_COLORS + (\"default\",) CNAMES = dict(zip(\"krgybmcwd\", COLORS))", "(?P<bold>\\x02?) \\x03 (?P<string>[^\\x04]*) \\x04 (?P<rest>.*) \"\"\", re.VERBOSE | re.DOTALL, )", "if color != \"default\": atts[\"bg\"] = BG_COLORS[color] if d[\"bold\"]: atts[\"bold\"]", "re.DOTALL, ) def peel_off_string(s): m = peel_off_string_re.match(s) assert m, repr(s)", "if d[\"bold\"]: atts[\"bold\"] = True return fmtstr(d[\"string\"], **atts) peel_off_string_re =", "range(len(CURTSIES_COLORS)) } INVERSE_COLORS[\"default\"] = INVERSE_COLORS[CURTSIES_COLORS[0]] def func_for_letter(letter_color_code: str, default: str", "\"default\": atts[\"fg\"] = FG_COLORS[color] if d[\"bg\"]: if d[\"bg\"] == \"I\":", "= True # TODO figure out why boldness isn't based", "COLORS)) # hack for finding the \"inverse\" INVERSE_COLORS = {", "= LazyReCompile( r\"\"\"(?P<colormarker>\\x01 (?P<fg>[krgybmcwdKRGYBMCWD]?) (?P<bg>[krgybmcwdKRGYBMCWDI]?)?) (?P<bold>\\x02?) \\x03 (?P<string>[^\\x04]*) \\x04 (?P<rest>.*)", "color != \"default\": atts[\"fg\"] = FG_COLORS[color] if d[\"bg\"]: if d[\"bg\"]", "stuff = [] while True: if not rest: break start,", "== \"d\": letter_color_code = default elif letter_color_code == \"D\": letter_color_code", "FmtStr() ) def fs_from_match(d): atts = {} if d[\"fg\"]: #", "def parse(s): \"\"\"Returns a FmtStr object from a bpython-formatted colored", "= True return fmtstr(d[\"string\"], **atts) peel_off_string_re = LazyReCompile( r\"\"\"(?P<colormarker>\\x01 (?P<fg>[krgybmcwdKRGYBMCWD]?)", "a bpython-style color code\"\"\" if letter_color_code == \"d\": letter_color_code =", "\"default\": atts[\"bg\"] = BG_COLORS[color] if d[\"bold\"]: atts[\"bold\"] = True return", "the \"inverse\" color = INVERSE_COLORS[color] else: color = CNAMES[d[\"bg\"].lower()] if", "else: color = CNAMES[d[\"bg\"].lower()] if color != \"default\": atts[\"bg\"] =", "color != \"default\": atts[\"bg\"] = BG_COLORS[color] if d[\"bold\"]: atts[\"bold\"] =", "def func_for_letter(letter_color_code: str, default: str = \"k\"): \"\"\"Returns FmtStr constructor", "colors as CURTSIES_COLORS, ) from functools import partial from ..lazyre", "CURTSIES_COLORS, ) from functools import partial from ..lazyre import LazyReCompile", "(?P<string>[^\\x04]*) \\x04 (?P<rest>.*) \"\"\", re.VERBOSE | re.DOTALL, ) def peel_off_string(s):", "fmtstr, FmtStr from curtsies.termformatconstants import ( FG_COLORS, BG_COLORS, colors as", "color code\"\"\" if letter_color_code == \"d\": letter_color_code = default elif", "parse(s): \"\"\"Returns a FmtStr object from a bpython-formatted colored string\"\"\"", "for finding the \"inverse\" INVERSE_COLORS = { CURTSIES_COLORS[idx]: CURTSIES_COLORS[ (idx" ]
[ "coding: utf-8 -*- from ..tre_elements import TREExtension, TREElement __classification__ =", "'s', 1, value) self.add_field('COL_SPACING', 's', 7, value) self.add_field('COL_SPACING_UNITS', 's', 1,", "self.add_field('BANDLBOUND', 's', 5, value) self.add_field('BANDUBOUND', 's', 5, value) self.add_field('BANDWIDTH', 's',", "value) self.add_field('BANDASD', 's', 5, value) self.add_field('BANDGSD', 's', 5, value) class", "5, value) self.add_field('BANDGSD', 's', 5, value) class BANDSAType(TREElement): def __init__(self,", "= \"<NAME>\" class BAND(TREElement): def __init__(self, value): super(BAND, self).__init__() self.add_field('BANDPEAK',", "5, value) class BANDSAType(TREElement): def __init__(self, value): super(BANDSAType, self).__init__() self.add_field('ROW_SPACING',", "def __init__(self, value): super(BANDSAType, self).__init__() self.add_field('ROW_SPACING', 's', 7, value) self.add_field('ROW_SPACING_UNITS',", "value) self.add_field('FOCAL_LENGTH', 's', 6, value) self.add_field('BANDCOUNT', 'd', 4, value) self.add_loop('BANDs',", "value) self.add_field('BANDCALDRK', 's', 6, value) self.add_field('BANDCALINC', 's', 5, value) self.add_field('BANDRESP',", "__init__(self, value): super(BAND, self).__init__() self.add_field('BANDPEAK', 's', 5, value) self.add_field('BANDLBOUND', 's',", "self.add_field('BANDUBOUND', 's', 5, value) self.add_field('BANDWIDTH', 's', 5, value) self.add_field('BANDCALDRK', 's',", "self.add_field('BANDWIDTH', 's', 5, value) self.add_field('BANDCALDRK', 's', 6, value) self.add_field('BANDCALINC', 's',", "\"UNCLASSIFIED\" __author__ = \"<NAME>\" class BAND(TREElement): def __init__(self, value): super(BAND,", "self).__init__() self.add_field('ROW_SPACING', 's', 7, value) self.add_field('ROW_SPACING_UNITS', 's', 1, value) self.add_field('COL_SPACING',", "value) self.add_field('BANDCALINC', 's', 5, value) self.add_field('BANDRESP', 's', 5, value) self.add_field('BANDASD',", "__author__ = \"<NAME>\" class BAND(TREElement): def __init__(self, value): super(BAND, self).__init__()", "super(BAND, self).__init__() self.add_field('BANDPEAK', 's', 5, value) self.add_field('BANDLBOUND', 's', 5, value)", "value) self.add_field('BANDGSD', 's', 5, value) class BANDSAType(TREElement): def __init__(self, value):", "'s', 7, value) self.add_field('COL_SPACING_UNITS', 's', 1, value) self.add_field('FOCAL_LENGTH', 's', 6,", "-*- from ..tre_elements import TREExtension, TREElement __classification__ = \"UNCLASSIFIED\" __author__", "self.add_field('BANDGSD', 's', 5, value) class BANDSAType(TREElement): def __init__(self, value): super(BANDSAType,", "value) self.add_field('BANDLBOUND', 's', 5, value) self.add_field('BANDUBOUND', 's', 5, value) self.add_field('BANDWIDTH',", "class BAND(TREElement): def __init__(self, value): super(BAND, self).__init__() self.add_field('BANDPEAK', 's', 5,", "'s', 5, value) class BANDSAType(TREElement): def __init__(self, value): super(BANDSAType, self).__init__()", "value) class BANDSAType(TREElement): def __init__(self, value): super(BANDSAType, self).__init__() self.add_field('ROW_SPACING', 's',", "'s', 5, value) self.add_field('BANDWIDTH', 's', 5, value) self.add_field('BANDCALDRK', 's', 6,", "5, value) self.add_field('BANDCALDRK', 's', 6, value) self.add_field('BANDCALINC', 's', 5, value)", "self.add_field('ROW_SPACING', 's', 7, value) self.add_field('ROW_SPACING_UNITS', 's', 1, value) self.add_field('COL_SPACING', 's',", "4, value) self.add_loop('BANDs', self.BANDCOUNT, BAND, value) class BANDSA(TREExtension): _tag_value =", "self.add_field('BANDCALDRK', 's', 6, value) self.add_field('BANDCALINC', 's', 5, value) self.add_field('BANDRESP', 's',", "5, value) self.add_field('BANDASD', 's', 5, value) self.add_field('BANDGSD', 's', 5, value)", "= \"UNCLASSIFIED\" __author__ = \"<NAME>\" class BAND(TREElement): def __init__(self, value):", "__init__(self, value): super(BANDSAType, self).__init__() self.add_field('ROW_SPACING', 's', 7, value) self.add_field('ROW_SPACING_UNITS', 's',", "1, value) self.add_field('FOCAL_LENGTH', 's', 6, value) self.add_field('BANDCOUNT', 'd', 4, value)", "1, value) self.add_field('COL_SPACING', 's', 7, value) self.add_field('COL_SPACING_UNITS', 's', 1, value)", "'d', 4, value) self.add_loop('BANDs', self.BANDCOUNT, BAND, value) class BANDSA(TREExtension): _tag_value", "TREExtension, TREElement __classification__ = \"UNCLASSIFIED\" __author__ = \"<NAME>\" class BAND(TREElement):", "BAND, value) class BANDSA(TREExtension): _tag_value = 'BANDSA' _data_type = BANDSAType", "self.add_field('ROW_SPACING_UNITS', 's', 1, value) self.add_field('COL_SPACING', 's', 7, value) self.add_field('COL_SPACING_UNITS', 's',", "5, value) self.add_field('BANDLBOUND', 's', 5, value) self.add_field('BANDUBOUND', 's', 5, value)", "value): super(BANDSAType, self).__init__() self.add_field('ROW_SPACING', 's', 7, value) self.add_field('ROW_SPACING_UNITS', 's', 1,", "'s', 7, value) self.add_field('ROW_SPACING_UNITS', 's', 1, value) self.add_field('COL_SPACING', 's', 7,", "self.BANDCOUNT, BAND, value) class BANDSA(TREExtension): _tag_value = 'BANDSA' _data_type =", "value) self.add_field('BANDCOUNT', 'd', 4, value) self.add_loop('BANDs', self.BANDCOUNT, BAND, value) class", "'s', 5, value) self.add_field('BANDLBOUND', 's', 5, value) self.add_field('BANDUBOUND', 's', 5,", "..tre_elements import TREExtension, TREElement __classification__ = \"UNCLASSIFIED\" __author__ = \"<NAME>\"", "self.add_field('COL_SPACING', 's', 7, value) self.add_field('COL_SPACING_UNITS', 's', 1, value) self.add_field('FOCAL_LENGTH', 's',", "5, value) self.add_field('BANDUBOUND', 's', 5, value) self.add_field('BANDWIDTH', 's', 5, value)", "'s', 5, value) self.add_field('BANDRESP', 's', 5, value) self.add_field('BANDASD', 's', 5,", "\"<NAME>\" class BAND(TREElement): def __init__(self, value): super(BAND, self).__init__() self.add_field('BANDPEAK', 's',", "7, value) self.add_field('ROW_SPACING_UNITS', 's', 1, value) self.add_field('COL_SPACING', 's', 7, value)", "'s', 1, value) self.add_field('FOCAL_LENGTH', 's', 6, value) self.add_field('BANDCOUNT', 'd', 4,", "BANDSAType(TREElement): def __init__(self, value): super(BANDSAType, self).__init__() self.add_field('ROW_SPACING', 's', 7, value)", "'s', 5, value) self.add_field('BANDASD', 's', 5, value) self.add_field('BANDGSD', 's', 5,", "self.add_field('BANDPEAK', 's', 5, value) self.add_field('BANDLBOUND', 's', 5, value) self.add_field('BANDUBOUND', 's',", "7, value) self.add_field('COL_SPACING_UNITS', 's', 1, value) self.add_field('FOCAL_LENGTH', 's', 6, value)", "import TREExtension, TREElement __classification__ = \"UNCLASSIFIED\" __author__ = \"<NAME>\" class", "value): super(BAND, self).__init__() self.add_field('BANDPEAK', 's', 5, value) self.add_field('BANDLBOUND', 's', 5,", "5, value) self.add_field('BANDWIDTH', 's', 5, value) self.add_field('BANDCALDRK', 's', 6, value)", "super(BANDSAType, self).__init__() self.add_field('ROW_SPACING', 's', 7, value) self.add_field('ROW_SPACING_UNITS', 's', 1, value)", "value) self.add_field('COL_SPACING_UNITS', 's', 1, value) self.add_field('FOCAL_LENGTH', 's', 6, value) self.add_field('BANDCOUNT',", "'s', 6, value) self.add_field('BANDCALINC', 's', 5, value) self.add_field('BANDRESP', 's', 5,", "self.add_loop('BANDs', self.BANDCOUNT, BAND, value) class BANDSA(TREExtension): _tag_value = 'BANDSA' _data_type", "self.add_field('BANDCOUNT', 'd', 4, value) self.add_loop('BANDs', self.BANDCOUNT, BAND, value) class BANDSA(TREExtension):", "def __init__(self, value): super(BAND, self).__init__() self.add_field('BANDPEAK', 's', 5, value) self.add_field('BANDLBOUND',", "self.add_field('BANDRESP', 's', 5, value) self.add_field('BANDASD', 's', 5, value) self.add_field('BANDGSD', 's',", "value) self.add_field('BANDWIDTH', 's', 5, value) self.add_field('BANDCALDRK', 's', 6, value) self.add_field('BANDCALINC',", "value) self.add_field('COL_SPACING', 's', 7, value) self.add_field('COL_SPACING_UNITS', 's', 1, value) self.add_field('FOCAL_LENGTH',", "'s', 5, value) self.add_field('BANDGSD', 's', 5, value) class BANDSAType(TREElement): def", "5, value) self.add_field('BANDRESP', 's', 5, value) self.add_field('BANDASD', 's', 5, value)", "value) self.add_field('ROW_SPACING_UNITS', 's', 1, value) self.add_field('COL_SPACING', 's', 7, value) self.add_field('COL_SPACING_UNITS',", "# -*- coding: utf-8 -*- from ..tre_elements import TREExtension, TREElement", "from ..tre_elements import TREExtension, TREElement __classification__ = \"UNCLASSIFIED\" __author__ =", "self.add_field('BANDASD', 's', 5, value) self.add_field('BANDGSD', 's', 5, value) class BANDSAType(TREElement):", "BAND(TREElement): def __init__(self, value): super(BAND, self).__init__() self.add_field('BANDPEAK', 's', 5, value)", "'s', 6, value) self.add_field('BANDCOUNT', 'd', 4, value) self.add_loop('BANDs', self.BANDCOUNT, BAND,", "value) self.add_field('BANDUBOUND', 's', 5, value) self.add_field('BANDWIDTH', 's', 5, value) self.add_field('BANDCALDRK',", "TREElement __classification__ = \"UNCLASSIFIED\" __author__ = \"<NAME>\" class BAND(TREElement): def", "self.add_field('FOCAL_LENGTH', 's', 6, value) self.add_field('BANDCOUNT', 'd', 4, value) self.add_loop('BANDs', self.BANDCOUNT,", "utf-8 -*- from ..tre_elements import TREExtension, TREElement __classification__ = \"UNCLASSIFIED\"", "value) self.add_loop('BANDs', self.BANDCOUNT, BAND, value) class BANDSA(TREExtension): _tag_value = 'BANDSA'", "value) self.add_field('BANDRESP', 's', 5, value) self.add_field('BANDASD', 's', 5, value) self.add_field('BANDGSD',", "<reponame>pressler-vsc/sarpy # -*- coding: utf-8 -*- from ..tre_elements import TREExtension,", "class BANDSAType(TREElement): def __init__(self, value): super(BANDSAType, self).__init__() self.add_field('ROW_SPACING', 's', 7,", "self.add_field('BANDCALINC', 's', 5, value) self.add_field('BANDRESP', 's', 5, value) self.add_field('BANDASD', 's',", "self.add_field('COL_SPACING_UNITS', 's', 1, value) self.add_field('FOCAL_LENGTH', 's', 6, value) self.add_field('BANDCOUNT', 'd',", "'s', 5, value) self.add_field('BANDCALDRK', 's', 6, value) self.add_field('BANDCALINC', 's', 5,", "'s', 5, value) self.add_field('BANDUBOUND', 's', 5, value) self.add_field('BANDWIDTH', 's', 5,", "__classification__ = \"UNCLASSIFIED\" __author__ = \"<NAME>\" class BAND(TREElement): def __init__(self,", "self).__init__() self.add_field('BANDPEAK', 's', 5, value) self.add_field('BANDLBOUND', 's', 5, value) self.add_field('BANDUBOUND',", "6, value) self.add_field('BANDCOUNT', 'd', 4, value) self.add_loop('BANDs', self.BANDCOUNT, BAND, value)", "6, value) self.add_field('BANDCALINC', 's', 5, value) self.add_field('BANDRESP', 's', 5, value)", "-*- coding: utf-8 -*- from ..tre_elements import TREExtension, TREElement __classification__" ]
[ "to select in form of int or tuple e.g., n=8", "is not yet supported for ' + 'graph neural networks", "model (Model): A compiled instance of keras.engine.training.Model train_data (Iterator): a", "import * from .. import utils as U from ..core", "'graph neural networks in ktrain') class LinkPredLearner(GenLearner): \"\"\" ``` Main", "batch_size=U.DEFAULT_BS, eval_batch_size=U.DEFAULT_BS, workers=1, use_multiprocessing=False): super().__init__(model, train_data=train_data, val_data=val_data, batch_size=batch_size, eval_batch_size=eval_batch_size, workers=workers,", "first element is either filepath or id of validation example", "tup[0] loss = tup[1] truth = tup[2] pred = tup[3]", "utils as U from ..core import GenLearner class NodeClassLearner(GenLearner): \"\"\"", "top losses in validation set. Typically over-ridden by Learner subclasses.", "as U from ..core import GenLearner class NodeClassLearner(GenLearner): \"\"\" ```", "of validation example and second element is loss. ``` \"\"\"", "= tup[2] pred = tup[3] print('----------') print(\"id:%s | loss:%s |", "by default. ``` \"\"\" raise Exception('currently_unsupported: layer_output method is not", "and second element is loss. ``` \"\"\" val = self._check_val(val_data)", "\"\"\" raise Exception('currently_unsupported: layer_output method is not yet supported for", "subclasses. Args: n(int or tuple): a range to select in", "(Preprocessor): A TextPreprocessor or ImagePreprocessor. For some data like text", "or ImagePreprocessor. For some data like text data, a preprocessor", "= self.top_losses(n=n, val_data=val, preproc=preproc) # get multilabel status and class", "true:%s | pred:%s)\\n\" % (idx, round(loss,2), truth, pred)) #print(obs) return", "set, by default. ``` \"\"\" raise Exception('currently_unsupported: layer_output method is", "in validation set. Typically over-ridden by Learner subclasses. Args: n(int", "use_val=False): \"\"\" ``` Prints output of layer with index <layer_id>", "None else None # iterate through losses for tup in", "classification Main parameters are: model (Model): A compiled instance of", "pred)) #print(obs) return def layer_output(self, layer_id, example_id=0, batch_id=0, use_val=False): \"\"\"", "set val_data (Iterator): A Iterator instance for validation set ```", "NodeClassLearner(GenLearner): \"\"\" ``` Main class used to tune and train", "id of validation example and second element is loss. ```", "val = self._check_val(val_data) # get top losses and associated data", "use instead of self.val_data Returns: list of n tuples where", "from ..core import GenLearner class NodeClassLearner(GenLearner): \"\"\" ``` Main class", "tune and train Keras models for node classification Main parameters", "e.g., n=8 is treated as n=(0,8) preproc (Preprocessor): A TextPreprocessor", "\"\"\" val = self._check_val(val_data) # get top losses and associated", "preproc=preproc) # get multilabel status and class names classes =", "through losses for tup in tups: # get data idx", "(idx, round(loss,2), truth, pred)) #print(obs) return def layer_output(self, layer_id, example_id=0,", "method is not yet supported for ' + 'graph neural", "loss = tup[1] truth = tup[2] pred = tup[3] print('----------')", "node classification Main parameters are: model (Model): A compiled instance", "\"\"\" def __init__(self, model, train_data=None, val_data=None, batch_size=U.DEFAULT_BS, eval_batch_size=U.DEFAULT_BS, workers=1, use_multiprocessing=False):", "of layer with index <layer_id> to help debug models. Uses", "the pre-processing to correctly view raw data. val_data: optional val_data", "n tuples where first element is either filepath or id", "classes = preproc.get_classes() if preproc is not None else None", "(Iterator): a Iterator instance for training set val_data (Iterator): A", "networks in ktrain') class LinkPredLearner(GenLearner): \"\"\" ``` Main class used", "ktrain') class LinkPredLearner(GenLearner): \"\"\" ``` Main class used to tune", "A Iterator instance for validation set ``` \"\"\" def __init__(self,", "either filepath or id of validation example and second element", "yet supported for ' + 'graph neural networks in ktrain')", "layer_output(self, layer_id, example_id=0, batch_id=0, use_val=False): \"\"\" ``` Prints output of", "neural networks in ktrain') class LinkPredLearner(GenLearner): \"\"\" ``` Main class", "preproc is not None else None # iterate through losses", "raise Exception('currently_unsupported: layer_output method is not yet supported for '", "use_multiprocessing=use_multiprocessing) return def view_top_losses(self, n=4, preproc=None, val_data=None): \"\"\" ``` Views", "as n=(0,8) preproc (Preprocessor): A TextPreprocessor or ImagePreprocessor. For some", "* from .. import utils as U from ..core import", "tup[2] pred = tup[3] print('----------') print(\"id:%s | loss:%s | true:%s", "treated as n=(0,8) preproc (Preprocessor): A TextPreprocessor or ImagePreprocessor. For", "model, train_data=None, val_data=None, batch_size=U.DEFAULT_BS, eval_batch_size=U.DEFAULT_BS, workers=1, use_multiprocessing=False): super().__init__(model, train_data=train_data, val_data=val_data,", "Views observations with top losses in validation set. Typically over-ridden", "| loss:%s | true:%s | pred:%s)\\n\" % (idx, round(loss,2), truth,", "..imports import * from .. import utils as U from", "required to undo the pre-processing to correctly view raw data.", "prediction Main parameters are: model (Model): A compiled instance of", "self._check_val(val_data) # get top losses and associated data tups =", "(Iterator): A Iterator instance for validation set ``` \"\"\" def", "data, a preprocessor is required to undo the pre-processing to", "return def view_top_losses(self, n=4, preproc=None, val_data=None): \"\"\" ``` Views observations", "tups: # get data idx = tup[0] loss = tup[1]", "and associated data tups = self.top_losses(n=n, val_data=val, preproc=preproc) # get", "top losses and associated data tups = self.top_losses(n=n, val_data=val, preproc=preproc)", "pred = tup[3] print('----------') print(\"id:%s | loss:%s | true:%s |", "``` Prints output of layer with index <layer_id> to help", ".. import utils as U from ..core import GenLearner class", "Args: n(int or tuple): a range to select in form", "For some data like text data, a preprocessor is required", "self.top_losses(n=n, val_data=val, preproc=preproc) # get multilabel status and class names", "for ' + 'graph neural networks in ktrain') class LinkPredLearner(GenLearner):", "associated data tups = self.top_losses(n=n, val_data=val, preproc=preproc) # get multilabel", "# iterate through losses for tup in tups: # get", "Keras models for link prediction Main parameters are: model (Model):", "list of n tuples where first element is either filepath", "for link prediction Main parameters are: model (Model): A compiled", "not None else None # iterate through losses for tup", "\"\"\" ``` Views observations with top losses in validation set.", "to undo the pre-processing to correctly view raw data. val_data:", "val_data=val, preproc=preproc) # get multilabel status and class names classes", "select in form of int or tuple e.g., n=8 is", "and class names classes = preproc.get_classes() if preproc is not", "class used to tune and train Keras models for link", "eval_batch_size=eval_batch_size, workers=workers, use_multiprocessing=use_multiprocessing) return def view_top_losses(self, n=4, preproc=None, val_data=None): \"\"\"", "of int or tuple e.g., n=8 is treated as n=(0,8)", "get multilabel status and class names classes = preproc.get_classes() if", "with top losses in validation set. Typically over-ridden by Learner", "or id of validation example and second element is loss.", "filepath or id of validation example and second element is", "in form of int or tuple e.g., n=8 is treated", "class names classes = preproc.get_classes() if preproc is not None", "batch_id=0, use_val=False): \"\"\" ``` Prints output of layer with index", "keras.engine.training.Model train_data (Iterator): a Iterator instance for training set val_data", "layer_id, example_id=0, batch_id=0, use_val=False): \"\"\" ``` Prints output of layer", "losses and associated data tups = self.top_losses(n=n, val_data=val, preproc=preproc) #", "is required to undo the pre-processing to correctly view raw", "link prediction Main parameters are: model (Model): A compiled instance", "| pred:%s)\\n\" % (idx, round(loss,2), truth, pred)) #print(obs) return def", "= tup[1] truth = tup[2] pred = tup[3] print('----------') print(\"id:%s", "optional val_data to use instead of self.val_data Returns: list of", "use_multiprocessing=False): super().__init__(model, train_data=train_data, val_data=val_data, batch_size=batch_size, eval_batch_size=eval_batch_size, workers=workers, use_multiprocessing=use_multiprocessing) return def", "training set val_data (Iterator): A Iterator instance for validation set", "second element is loss. ``` \"\"\" val = self._check_val(val_data) #", "names classes = preproc.get_classes() if preproc is not None else", "for tup in tups: # get data idx = tup[0]", "print('----------') print(\"id:%s | loss:%s | true:%s | pred:%s)\\n\" % (idx,", "if preproc is not None else None # iterate through", "index <layer_id> to help debug models. Uses first example (example_id=0)", "idx = tup[0] loss = tup[1] truth = tup[2] pred", "compiled instance of keras.engine.training.Model train_data (Iterator): a Iterator instance for", "training set, by default. ``` \"\"\" raise Exception('currently_unsupported: layer_output method", "used to tune and train Keras models for link prediction", "tune and train Keras models for link prediction Main parameters", "workers=workers, use_multiprocessing=use_multiprocessing) return def view_top_losses(self, n=4, preproc=None, val_data=None): \"\"\" ```", "example and second element is loss. ``` \"\"\" val =", "iterate through losses for tup in tups: # get data", "A compiled instance of keras.engine.training.Model train_data (Iterator): a Iterator instance", "``` Main class used to tune and train Keras models", "self.val_data Returns: list of n tuples where first element is", "example_id=0, batch_id=0, use_val=False): \"\"\" ``` Prints output of layer with", "Uses first example (example_id=0) from training set, by default. ```", "else None # iterate through losses for tup in tups:", "for node classification Main parameters are: model (Model): A compiled", "(Model): A compiled instance of keras.engine.training.Model train_data (Iterator): a Iterator", "preproc=None, val_data=None): \"\"\" ``` Views observations with top losses in", "layer_output method is not yet supported for ' + 'graph", "is not None else None # iterate through losses for", "n=8 is treated as n=(0,8) preproc (Preprocessor): A TextPreprocessor or", "get data idx = tup[0] loss = tup[1] truth =", "= tup[3] print('----------') print(\"id:%s | loss:%s | true:%s | pred:%s)\\n\"", "data. val_data: optional val_data to use instead of self.val_data Returns:", "\"\"\" ``` Main class used to tune and train Keras", "like text data, a preprocessor is required to undo the", "to help debug models. Uses first example (example_id=0) from training", "# get data idx = tup[0] loss = tup[1] truth", "int or tuple e.g., n=8 is treated as n=(0,8) preproc", "tuple): a range to select in form of int or", "``` \"\"\" def __init__(self, model, train_data=None, val_data=None, batch_size=U.DEFAULT_BS, eval_batch_size=U.DEFAULT_BS, workers=1,", "view raw data. val_data: optional val_data to use instead of", "``` Views observations with top losses in validation set. Typically", "validation set ``` \"\"\" def __init__(self, model, train_data=None, val_data=None, batch_size=U.DEFAULT_BS,", "validation set. Typically over-ridden by Learner subclasses. Args: n(int or", "val_data: optional val_data to use instead of self.val_data Returns: list", "class used to tune and train Keras models for node", "of n tuples where first element is either filepath or", "help debug models. Uses first example (example_id=0) from training set,", "default. ``` \"\"\" raise Exception('currently_unsupported: layer_output method is not yet", "instance of keras.engine.training.Model train_data (Iterator): a Iterator instance for training", "Exception('currently_unsupported: layer_output method is not yet supported for ' +", "supported for ' + 'graph neural networks in ktrain') class", "class LinkPredLearner(GenLearner): \"\"\" ``` Main class used to tune and", "..core import GenLearner class NodeClassLearner(GenLearner): \"\"\" ``` Main class used", "models for link prediction Main parameters are: model (Model): A", "import GenLearner class NodeClassLearner(GenLearner): \"\"\" ``` Main class used to", "Iterator instance for validation set ``` \"\"\" def __init__(self, model,", "eval_batch_size=U.DEFAULT_BS, workers=1, use_multiprocessing=False): super().__init__(model, train_data=train_data, val_data=val_data, batch_size=batch_size, eval_batch_size=eval_batch_size, workers=workers, use_multiprocessing=use_multiprocessing)", "a preprocessor is required to undo the pre-processing to correctly", "losses in validation set. Typically over-ridden by Learner subclasses. Args:", "pre-processing to correctly view raw data. val_data: optional val_data to", "= preproc.get_classes() if preproc is not None else None #", "from ..imports import * from .. import utils as U", "element is either filepath or id of validation example and", "preproc.get_classes() if preproc is not None else None # iterate", "def view_top_losses(self, n=4, preproc=None, val_data=None): \"\"\" ``` Views observations with", "% (idx, round(loss,2), truth, pred)) #print(obs) return def layer_output(self, layer_id,", "instance for validation set ``` \"\"\" def __init__(self, model, train_data=None,", "def __init__(self, model, train_data=None, val_data=None, batch_size=U.DEFAULT_BS, eval_batch_size=U.DEFAULT_BS, workers=1, use_multiprocessing=False): super().__init__(model,", "of self.val_data Returns: list of n tuples where first element", "tup[3] print('----------') print(\"id:%s | loss:%s | true:%s | pred:%s)\\n\" %", "in ktrain') class LinkPredLearner(GenLearner): \"\"\" ``` Main class used to", "None # iterate through losses for tup in tups: #", "tups = self.top_losses(n=n, val_data=val, preproc=preproc) # get multilabel status and", "validation example and second element is loss. ``` \"\"\" val", "Keras models for node classification Main parameters are: model (Model):", "Iterator instance for training set val_data (Iterator): A Iterator instance", "and train Keras models for link prediction Main parameters are:", "in tups: # get data idx = tup[0] loss =", "form of int or tuple e.g., n=8 is treated as", "n=4, preproc=None, val_data=None): \"\"\" ``` Views observations with top losses", "Main parameters are: model (Model): A compiled instance of keras.engine.training.Model", "Main class used to tune and train Keras models for", "over-ridden by Learner subclasses. Args: n(int or tuple): a range", "__init__(self, model, train_data=None, val_data=None, batch_size=U.DEFAULT_BS, eval_batch_size=U.DEFAULT_BS, workers=1, use_multiprocessing=False): super().__init__(model, train_data=train_data,", "or tuple): a range to select in form of int", "super().__init__(model, train_data=train_data, val_data=val_data, batch_size=batch_size, eval_batch_size=eval_batch_size, workers=workers, use_multiprocessing=use_multiprocessing) return def view_top_losses(self,", "status and class names classes = preproc.get_classes() if preproc is", "Typically over-ridden by Learner subclasses. Args: n(int or tuple): a", "layer with index <layer_id> to help debug models. Uses first", "example (example_id=0) from training set, by default. ``` \"\"\" raise", "is loss. ``` \"\"\" val = self._check_val(val_data) # get top", "' + 'graph neural networks in ktrain') class LinkPredLearner(GenLearner): \"\"\"", "loss. ``` \"\"\" val = self._check_val(val_data) # get top losses", "instead of self.val_data Returns: list of n tuples where first", "ImagePreprocessor. For some data like text data, a preprocessor is", "val_data=val_data, batch_size=batch_size, eval_batch_size=eval_batch_size, workers=workers, use_multiprocessing=use_multiprocessing) return def view_top_losses(self, n=4, preproc=None,", "train_data=None, val_data=None, batch_size=U.DEFAULT_BS, eval_batch_size=U.DEFAULT_BS, workers=1, use_multiprocessing=False): super().__init__(model, train_data=train_data, val_data=val_data, batch_size=batch_size,", "or tuple e.g., n=8 is treated as n=(0,8) preproc (Preprocessor):", "first example (example_id=0) from training set, by default. ``` \"\"\"", "| true:%s | pred:%s)\\n\" % (idx, round(loss,2), truth, pred)) #print(obs)", "view_top_losses(self, n=4, preproc=None, val_data=None): \"\"\" ``` Views observations with top", "(example_id=0) from training set, by default. ``` \"\"\" raise Exception('currently_unsupported:", "A TextPreprocessor or ImagePreprocessor. For some data like text data,", "TextPreprocessor or ImagePreprocessor. For some data like text data, a", "round(loss,2), truth, pred)) #print(obs) return def layer_output(self, layer_id, example_id=0, batch_id=0,", "<layer_id> to help debug models. Uses first example (example_id=0) from", "models for node classification Main parameters are: model (Model): A", "tup in tups: # get data idx = tup[0] loss", "not yet supported for ' + 'graph neural networks in", "tuple e.g., n=8 is treated as n=(0,8) preproc (Preprocessor): A", "batch_size=batch_size, eval_batch_size=eval_batch_size, workers=workers, use_multiprocessing=use_multiprocessing) return def view_top_losses(self, n=4, preproc=None, val_data=None):", "set. Typically over-ridden by Learner subclasses. Args: n(int or tuple):", "n=(0,8) preproc (Preprocessor): A TextPreprocessor or ImagePreprocessor. For some data", "preprocessor is required to undo the pre-processing to correctly view", "undo the pre-processing to correctly view raw data. val_data: optional", "correctly view raw data. val_data: optional val_data to use instead", "\"\"\" ``` Prints output of layer with index <layer_id> to", "Prints output of layer with index <layer_id> to help debug", "# get top losses and associated data tups = self.top_losses(n=n,", "used to tune and train Keras models for node classification", "#print(obs) return def layer_output(self, layer_id, example_id=0, batch_id=0, use_val=False): \"\"\" ```", "of keras.engine.training.Model train_data (Iterator): a Iterator instance for training set", "val_data=None): \"\"\" ``` Views observations with top losses in validation", "by Learner subclasses. Args: n(int or tuple): a range to", "is treated as n=(0,8) preproc (Preprocessor): A TextPreprocessor or ImagePreprocessor.", "GenLearner class NodeClassLearner(GenLearner): \"\"\" ``` Main class used to tune", "Returns: list of n tuples where first element is either", "tuples where first element is either filepath or id of", "print(\"id:%s | loss:%s | true:%s | pred:%s)\\n\" % (idx, round(loss,2),", "a range to select in form of int or tuple", "from training set, by default. ``` \"\"\" raise Exception('currently_unsupported: layer_output", "for validation set ``` \"\"\" def __init__(self, model, train_data=None, val_data=None,", "pred:%s)\\n\" % (idx, round(loss,2), truth, pred)) #print(obs) return def layer_output(self,", "models. Uses first example (example_id=0) from training set, by default.", "instance for training set val_data (Iterator): A Iterator instance for", "to use instead of self.val_data Returns: list of n tuples", "tup[1] truth = tup[2] pred = tup[3] print('----------') print(\"id:%s |", "output of layer with index <layer_id> to help debug models.", "some data like text data, a preprocessor is required to", "LinkPredLearner(GenLearner): \"\"\" ``` Main class used to tune and train", "class NodeClassLearner(GenLearner): \"\"\" ``` Main class used to tune and", "text data, a preprocessor is required to undo the pre-processing", "workers=1, use_multiprocessing=False): super().__init__(model, train_data=train_data, val_data=val_data, batch_size=batch_size, eval_batch_size=eval_batch_size, workers=workers, use_multiprocessing=use_multiprocessing) return", "is either filepath or id of validation example and second", "U from ..core import GenLearner class NodeClassLearner(GenLearner): \"\"\" ``` Main", "data idx = tup[0] loss = tup[1] truth = tup[2]", "truth = tup[2] pred = tup[3] print('----------') print(\"id:%s | loss:%s", "+ 'graph neural networks in ktrain') class LinkPredLearner(GenLearner): \"\"\" ```", "import utils as U from ..core import GenLearner class NodeClassLearner(GenLearner):", "raw data. val_data: optional val_data to use instead of self.val_data", "train_data (Iterator): a Iterator instance for training set val_data (Iterator):", "val_data to use instead of self.val_data Returns: list of n", "to tune and train Keras models for link prediction Main", "= self._check_val(val_data) # get top losses and associated data tups", "truth, pred)) #print(obs) return def layer_output(self, layer_id, example_id=0, batch_id=0, use_val=False):", "multilabel status and class names classes = preproc.get_classes() if preproc", "val_data=None, batch_size=U.DEFAULT_BS, eval_batch_size=U.DEFAULT_BS, workers=1, use_multiprocessing=False): super().__init__(model, train_data=train_data, val_data=val_data, batch_size=batch_size, eval_batch_size=eval_batch_size,", "get top losses and associated data tups = self.top_losses(n=n, val_data=val,", "# get multilabel status and class names classes = preproc.get_classes()", "train Keras models for link prediction Main parameters are: model", "to tune and train Keras models for node classification Main", "``` \"\"\" val = self._check_val(val_data) # get top losses and", "from .. import utils as U from ..core import GenLearner", "range to select in form of int or tuple e.g.,", "= tup[0] loss = tup[1] truth = tup[2] pred =", "train_data=train_data, val_data=val_data, batch_size=batch_size, eval_batch_size=eval_batch_size, workers=workers, use_multiprocessing=use_multiprocessing) return def view_top_losses(self, n=4,", "set ``` \"\"\" def __init__(self, model, train_data=None, val_data=None, batch_size=U.DEFAULT_BS, eval_batch_size=U.DEFAULT_BS,", "n(int or tuple): a range to select in form of", "train Keras models for node classification Main parameters are: model", "are: model (Model): A compiled instance of keras.engine.training.Model train_data (Iterator):", "data like text data, a preprocessor is required to undo", "to correctly view raw data. val_data: optional val_data to use", "data tups = self.top_losses(n=n, val_data=val, preproc=preproc) # get multilabel status", "a Iterator instance for training set val_data (Iterator): A Iterator", "observations with top losses in validation set. Typically over-ridden by", "``` \"\"\" raise Exception('currently_unsupported: layer_output method is not yet supported", "Learner subclasses. Args: n(int or tuple): a range to select", "loss:%s | true:%s | pred:%s)\\n\" % (idx, round(loss,2), truth, pred))", "preproc (Preprocessor): A TextPreprocessor or ImagePreprocessor. For some data like", "element is loss. ``` \"\"\" val = self._check_val(val_data) # get", "losses for tup in tups: # get data idx =", "with index <layer_id> to help debug models. Uses first example", "where first element is either filepath or id of validation", "debug models. Uses first example (example_id=0) from training set, by", "and train Keras models for node classification Main parameters are:", "val_data (Iterator): A Iterator instance for validation set ``` \"\"\"", "def layer_output(self, layer_id, example_id=0, batch_id=0, use_val=False): \"\"\" ``` Prints output", "for training set val_data (Iterator): A Iterator instance for validation", "return def layer_output(self, layer_id, example_id=0, batch_id=0, use_val=False): \"\"\" ``` Prints", "parameters are: model (Model): A compiled instance of keras.engine.training.Model train_data" ]
[ "\"y\": {\"field\": \"y\", \"type\": \"quantitative\"} } }, 'arc': { \"mark\":", "'topk' in vega_zero_keywords: self.parsed_vegaZero['transform']['topk'] = vega_zero_keywords[vega_zero_keywords.index('topk') + 1] if 'sort'", "1: final_filter_part += ' ' + each + ' '", "{ \"mark\": \"point\", \"encoding\": { \"x\": {\"field\": \"x\", \"type\": \"quantitative\"},", "== 'desc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort'] = '-y' else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort'] = 'y' return", "'' }, 'topk': '' } } vega_zero_keywords = vega_zero.split(' ')", "!= '': if 'transform' not in self.VegaLiteSpec[VegaZero['mark']]: self.VegaLiteSpec[VegaZero['mark']]['transform'] = [{", "'month']: self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = VegaZero['transform']['bin']['type'] elif VegaZero['transform']['bin']['type'] == 'weekday': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] =", "'axis': '', 'type': '' }, 'topk': '' } } vega_zero_keywords", "[] self.parsed_vegaZero['transform']['filter'] = final_filter_part else: # only single filter condition", "dataframe=None): self.VegaLiteSpec = { 'bar': { \"mark\": \"bar\", \"encoding\": {", "Vega-Lite if each == '=': each = '==' each_conditions.append(each) if", "\"y\": {\"field\": \"y\", \"type\": \"quantitative\"} } } } VegaZero =", "sort_field, \"order\": sort_order}] }, { \"filter\": \"datum.rank <= \" +", "if 'transform' in self.VegaLiteSpec[VegaZero['mark']]: current_filter = self.VegaLiteSpec[VegaZero['mark']]['transform'][0]['filter'] self.VegaLiteSpec[VegaZero['mark']]['transform'][0][ 'filter'] =", "if each_conditions[2][1] == '%' and each_conditions[2][len(each_conditions[2]) - 2] == '%':", "'temporal' if VegaZero['transform']['bin']['type'] in ['date', 'year', 'week', 'month']: self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] =", "= VegaZero['transform']['sort']['axis'] if VegaZero['transform']['sort']['type'] == 'desc': sort_order = 'descending' else:", "self.parsed_vegaZero['mark'] = vega_zero_keywords[vega_zero_keywords.index('mark') + 1] self.parsed_vegaZero['data'] = vega_zero_keywords[vega_zero_keywords.index('data') + 1]", "== len(filter_part_token) - 1: # each = '&' or '|'", "<= \" + str(VegaZero['transform']['topk']) self.VegaLiteSpec[VegaZero['mark']]['transform'].insert(0, { \"window\": [{ \"field\": sort_field,", "or '|' if 'like' == each_conditions[1]: # only consider this", "if VegaZero['encoding']['y']['aggregate'] != '' and VegaZero['encoding']['y']['aggregate'] != 'none': self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['aggregate'] =", "= 'week' else: print('Unknown binning step.') if VegaZero['transform']['filter'] != '':", "} # it seems that the group will be performed", "self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['type'] = 'temporal' if VegaZero['transform']['bin']['type'] in ['date', 'year', 'week', 'month']:", "# each = '&' or '|' if 'like' == each_conditions[1]:", "self.parsed_vegaZero = { 'mark': '', 'data': '', 'encoding': { 'x':", "vega_zero_keywords[vega_zero_keywords.index('color') + 1] if 'topk' in vega_zero_keywords: self.parsed_vegaZero['transform']['topk'] = vega_zero_keywords[vega_zero_keywords.index('topk')", "if 'filter' in vega_zero_keywords: filter_part_token = [] for each in", "+ 3] if 'filter' in vega_zero_keywords: filter_part_token = [] for", "'|' if 'like' == each_conditions[1]: # only consider this case:", "\"bar\", \"encoding\": { \"x\": {\"field\": \"x\", \"type\": \"nominal\"}, \"y\": {\"field\":", "sort_field = VegaZero['encoding']['x'] elif VegaZero['transform']['sort']['axis'] == 'y': sort_field = VegaZero['encoding']['y']['y']", "= [] for each in vega_zero_keywords[vega_zero_keywords.index('filter') + 1:]: if each", "'\") == -1' else: final_filter_part += 'datum.' + ' '.join(each_conditions)", "pandas.core.frame.DataFrame): self.VegaLiteSpec[VegaZero['mark']]['data'] = dict() self.VegaLiteSpec[VegaZero['mark']]['data']['values'] = json.loads(dataframe.to_json(orient='records')) if VegaZero['mark'] !=", "by VegaLite defaultly, in our cases. if VegaZero['transform']['group'] != '':", "x in filter_part_token] if '&' in filter_part_token or '|' in", "{ \"x\": {\"field\": \"x\", \"type\": \"nominal\"}, \"y\": {\"field\": \"y\", \"type\":", "\"order\": sort_order}] }) else: self.VegaLiteSpec[VegaZero['mark']]['transform'] = [ { \"window\": [{", "or '|' in filter_part_token: final_filter_part = '' each_conditions = []", "vega-zero keywords to the VegaLiteSpec object if isinstance(dataframe, pandas.core.frame.DataFrame): self.VegaLiteSpec[VegaZero['mark']]['data']", "!= '': self.VegaLiteSpec[VegaZero['mark']]['encoding']['color'] = { 'field': VegaZero['encoding']['color']['z'], 'type': 'nominal' }", "= [] self.parsed_vegaZero['transform']['filter'] = final_filter_part else: # only single filter", "}, 'color': { 'z': '' } }, 'transform': { 'filter':", "' '.join(filter_part_token).split() filter_part_token = ['&' if x == 'and' else", "}], \"sort\": [{\"field\": sort_field, \"order\": sort_order}] }, { \"filter\": \"datum.rank", "'line': { \"mark\": \"line\", \"encoding\": { \"x\": {\"field\": \"x\", \"type\":", "- 1: final_filter_part += ' ' + each + '", "'like' == each_conditions[1]: # only consider this case: '%a%' if", "if VegaZero['transform']['group'] != '': pass if VegaZero['transform']['bin']['axis'] != '': if", "= 'and ' + filter_part_token[ filter_part_token.index('between') - 1] + '", "\"type\": \"quantitative\"} } } } VegaZero = self.parse_vegaZero(vega_zero) # assign", "or each == '|' or i == len(filter_part_token) - 1:", "\"quantitative\"} } } } VegaZero = self.parse_vegaZero(vega_zero) # assign some", "VegaZero = self.parse_vegaZero(vega_zero) # assign some vega-zero keywords to the", "= VegaZero['encoding']['x'] self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['field'] = VegaZero['encoding']['y']['y'] if VegaZero['encoding']['y']['aggregate'] != '' and", "'axis': '', 'type': '' }, 'sort': { 'axis': '', 'type':", "self.parsed_vegaZero['transform']['sort']['axis'] = vega_zero_keywords[vega_zero_keywords.index('sort') + 1] self.parsed_vegaZero['transform']['sort']['type'] = vega_zero_keywords[vega_zero_keywords.index('sort') + 2]", "}, 'arc': { \"mark\": \"arc\", \"encoding\": { \"color\": {\"field\": \"x\",", "in vega_zero_keywords: self.parsed_vegaZero['transform']['topk'] = vega_zero_keywords[vega_zero_keywords.index('topk') + 1] if 'sort' in", "\"type\": \"quantitative\"} } }, 'arc': { \"mark\": \"arc\", \"encoding\": {", "in filter_part_token] if '&' in filter_part_token or '|' in filter_part_token:", "isinstance(dataframe, pandas.core.frame.DataFrame): self.VegaLiteSpec[VegaZero['mark']]['data'] = dict() self.VegaLiteSpec[VegaZero['mark']]['data']['values'] = json.loads(dataframe.to_json(orient='records')) if VegaZero['mark']", "# it seems that the group will be performed by", "= 'descending' else: sort_order = 'ascending' if 'transform' in self.VegaLiteSpec[VegaZero['mark']]:", "= ['|' if x == 'or' else x for x", "VegaLite defaultly, in our cases. if VegaZero['transform']['group'] != '': pass", "--to--> ’==‘ in Vega-Lite if each == '=': each =", "\"dense_rank\", \"as\": \"rank\" }], \"sort\": [{\"field\": sort_field, \"order\": sort_order}] },", "{ 'field': VegaZero['encoding']['color']['z'], 'type': 'nominal' } # it seems that", "{\"field\": \"x\", \"type\": \"nominal\"}, \"y\": {\"field\": \"y\", \"type\": \"quantitative\"} }", "' + filter_part_token[ filter_part_token.index('between') - 1] + ' <=' filter_part_token[filter_part_token.index('between')]", "] if VegaZero['transform']['sort']['axis'] != '': if VegaZero['transform']['sort']['axis'] == 'x': if", "final_filter_part += 'indexof(' + 'datum.' + each_conditions[0] + ',\"' +", "final_filter_part else: # only single filter condition self.parsed_vegaZero['transform']['filter'] = 'datum.'", "'mark': '', 'data': '', 'encoding': { 'x': '', 'y': {", "'': if VegaZero['transform']['sort']['axis'] == 'x': if VegaZero['transform']['sort']['type'] == 'desc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort']", "= '&' or '|' if 'like' == each_conditions[1]: # only", "'': if 'transform' not in self.VegaLiteSpec[VegaZero['mark']]: self.VegaLiteSpec[VegaZero['mark']]['transform'] = [{ \"filter\":", "'nominal' } # it seems that the group will be", "\"nominal\"}, \"theta\": {\"field\": \"y\", \"type\": \"quantitative\"} } }, 'line': {", "sort_order}] }) else: self.VegaLiteSpec[VegaZero['mark']]['transform'] = [ { \"window\": [{ \"field\":", "in self.VegaLiteSpec[VegaZero['mark']]: current_filter = self.VegaLiteSpec[VegaZero['mark']]['transform'][0]['filter'] self.VegaLiteSpec[VegaZero['mark']]['transform'][0][ 'filter'] = current_filter +", "sort_field, \"order\": sort_order}] }) else: self.VegaLiteSpec[VegaZero['mark']]['transform'] = [ { \"window\":", "!= '&' and each != '|': # ’=‘ in SQL", "{\"field\": \"y\", \"type\": \"quantitative\"} } }, 'arc': { \"mark\": \"arc\",", "+= ' ' + each + ' ' each_conditions =", "}, 'topk': '' } } vega_zero_keywords = vega_zero.split(' ') self.parsed_vegaZero['mark']", "'desc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = '-x' else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = 'x' else: if", "'encoding': { 'x': '', 'y': { 'aggregate': '', 'y': ''", "+ \\ each_conditions[3][2:len(each_conditions[3]) - 2] + '\") == -1' else:", "filter_part_token = ['|' if x == 'or' else x for", "= vega_zero_keywords[vega_zero_keywords.index('x') + 1] self.parsed_vegaZero['encoding']['y']['y'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') + 2] self.parsed_vegaZero['encoding']['y']['aggregate']", "= VegaZero['encoding']['y']['y'] if VegaZero['encoding']['y']['aggregate'] != '' and VegaZero['encoding']['y']['aggregate'] != 'none':", "vega_zero_keywords: self.parsed_vegaZero['transform']['bin']['axis'] = vega_zero_keywords[vega_zero_keywords.index('bin') + 1] self.parsed_vegaZero['transform']['bin']['type'] = vega_zero_keywords[vega_zero_keywords.index('bin') +", "= \"<NAME>\" import json import pandas class VegaZero2VegaLite(object): def __init__(self):", "else: sort_order = 'ascending' if 'transform' in self.VegaLiteSpec[VegaZero['mark']]: current_filter =", "if VegaZero['transform']['sort']['type'] == 'desc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort'] = '-y' else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort'] =", "to the VegaLiteSpec object if isinstance(dataframe, pandas.core.frame.DataFrame): self.VegaLiteSpec[VegaZero['mark']]['data'] = dict()", "+= ' & ' + VegaZero['transform']['filter'] if VegaZero['transform']['topk'] != '':", "if i != len(filter_part_token) - 1: final_filter_part += ' '", "== 'y': sort_field = VegaZero['encoding']['y']['y'] else: print('Unknown sorting field: ',", "{\"field\": \"x\", \"type\": \"nominal\"}, \"theta\": {\"field\": \"y\", \"type\": \"quantitative\"} }", "- 2] + '\") != -1' elif 'like' == each_conditions[2]", "def parse_vegaZero(self, vega_zero): self.parsed_vegaZero = { 'mark': '', 'data': '',", "' '.join(each_conditions) if i != len(filter_part_token) - 1: final_filter_part +=", "+ filter_part_token[ filter_part_token.index('between') - 1] + ' <=' filter_part_token[filter_part_token.index('between')] =", "i in range(len(filter_part_token)): each = filter_part_token[i] if each != '&'", "== each_conditions[2] and 'not' == each_conditions[1]: if each_conditions[3][1] == '%'", "\"x\", \"type\": \"nominal\"}, \"y\": {\"field\": \"y\", \"type\": \"quantitative\"} } },", "', VegaZero['transform']['sort']['axis']) sort_field = VegaZero['transform']['sort']['axis'] if VegaZero['transform']['sort']['type'] == 'desc': sort_order", "in ['group', 'bin', 'sort', 'topk']: filter_part_token.append(each) else: break if 'between'", "'group': '', 'bin': { 'axis': '', 'type': '' }, 'sort':", "each_conditions[2][len(each_conditions[2]) - 2] == '%': final_filter_part += 'indexof(' + 'datum.'", "'&' or '|' if 'like' == each_conditions[1]: # only consider", "'filter' not in self.VegaLiteSpec[VegaZero['mark']]['transform']: self.VegaLiteSpec[VegaZero['mark']]['transform'].append({ \"filter\": VegaZero['transform']['filter'] }) else: self.VegaLiteSpec[VegaZero['mark']]['transform']['filter']", "if each != '&' and each != '|': # ’=‘", "& ' + VegaZero['transform']['filter'] if VegaZero['transform']['topk'] != '': if VegaZero['transform']['sort']['axis']", "VegaZero['encoding']['x'] elif VegaZero['transform']['sort']['axis'] == 'y': sort_field = VegaZero['encoding']['y']['y'] else: print('Unknown", "+ ' '.join(each_conditions) if i != len(filter_part_token) - 1: final_filter_part", "'color': { 'z': '' } }, 'transform': { 'filter': '',", "= VegaZero['encoding']['y']['y'] else: print('Unknown sorting field: ', VegaZero['transform']['sort']['axis']) sort_field =", "{ \"filter\": \"datum.rank <= \" + str(VegaZero['transform']['topk']) } ] if", "'bar': { \"mark\": \"bar\", \"encoding\": { \"x\": {\"field\": \"x\", \"type\":", "if VegaZero['transform']['sort']['type'] == 'desc': sort_order = 'descending' else: sort_order =", "x in filter_part_token] filter_part_token = ['|' if x == 'or'", "self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['field'] = VegaZero['encoding']['y']['y'] if VegaZero['encoding']['y']['aggregate'] != '' and VegaZero['encoding']['y']['aggregate'] !=", "1] self.parsed_vegaZero['transform']['sort']['type'] = vega_zero_keywords[vega_zero_keywords.index('sort') + 2] if 'group' in vega_zero_keywords:", "VegaZero['transform']['group'] != '': pass if VegaZero['transform']['bin']['axis'] != '': if VegaZero['transform']['bin']['axis']", "') self.parsed_vegaZero['mark'] = vega_zero_keywords[vega_zero_keywords.index('mark') + 1] self.parsed_vegaZero['data'] = vega_zero_keywords[vega_zero_keywords.index('data') +", "in Vega-Lite if each == '=': each = '==' each_conditions.append(each)", "'transform': { 'filter': '', 'group': '', 'bin': { 'axis': '',", "'' and VegaZero['encoding']['y']['aggregate'] != 'none': self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['aggregate'] = VegaZero['encoding']['y']['aggregate'] else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['color']['field']", "vega_zero_keywords: self.parsed_vegaZero['encoding']['color']['z'] = vega_zero_keywords[vega_zero_keywords.index('color') + 1] if 'topk' in vega_zero_keywords:", "' <=' filter_part_token[filter_part_token.index('between')] = '>=' # replace 'and' -- 'or'", "if 'sort' in vega_zero_keywords: self.parsed_vegaZero['transform']['sort']['axis'] = vega_zero_keywords[vega_zero_keywords.index('sort') + 1] self.parsed_vegaZero['transform']['sort']['type']", "# ’=‘ in SQL --to--> ’==‘ in Vega-Lite if each", "each_conditions.append(each) if each == '&' or each == '|' or", "' + VegaZero['transform']['filter'] if VegaZero['transform']['topk'] != '': if VegaZero['transform']['sort']['axis'] ==", "+ 2] = 'and ' + filter_part_token[ filter_part_token.index('between') - 1]", "self.VegaLiteSpec[VegaZero['mark']]: current_filter = self.VegaLiteSpec[VegaZero['mark']]['transform'][0]['filter'] self.VegaLiteSpec[VegaZero['mark']]['transform'][0][ 'filter'] = current_filter + '", "\"filter\": VegaZero['transform']['filter'] }] elif 'filter' not in self.VegaLiteSpec[VegaZero['mark']]['transform']: self.VegaLiteSpec[VegaZero['mark']]['transform'].append({ \"filter\":", "sort_order = 'descending' else: sort_order = 'ascending' if 'transform' in", "<= \" + str(VegaZero['transform']['topk']) } ] if VegaZero['transform']['sort']['axis'] != '':", "not in ['group', 'bin', 'sort', 'topk']: filter_part_token.append(each) else: break if", "VegaZero['transform']['filter'] != '': if 'transform' not in self.VegaLiteSpec[VegaZero['mark']]: self.VegaLiteSpec[VegaZero['mark']]['transform'] =", "vega_zero): self.parsed_vegaZero = { 'mark': '', 'data': '', 'encoding': {", "'transform' not in self.VegaLiteSpec[VegaZero['mark']]: self.VegaLiteSpec[VegaZero['mark']]['transform'] = [{ \"filter\": VegaZero['transform']['filter'] }]", "'week' else: print('Unknown binning step.') if VegaZero['transform']['filter'] != '': if", "+= 'datum.' + ' '.join(each_conditions) if i != len(filter_part_token) -", "performed by VegaLite defaultly, in our cases. if VegaZero['transform']['group'] !=", "-1' elif 'like' == each_conditions[2] and 'not' == each_conditions[1]: if", "' '.join(filter_part_token).strip() return self.parsed_vegaZero def to_VegaLite(self, vega_zero, dataframe=None): self.VegaLiteSpec =", "\"dense_rank\", \"as\": \"rank\" }], \"sort\": [{\"field\": sort_field, \"order\": sort_order}] })", "else x for x in filter_part_token] filter_part_token = ['|' if", "\"filter\": \"datum.rank <= \" + str(VegaZero['transform']['topk']) } ] if VegaZero['transform']['sort']['axis']", "'', 'y': { 'aggregate': '', 'y': '' }, 'color': {", "'>=' # replace 'and' -- 'or' filter_part_token = ' '.join(filter_part_token).split()", "group will be performed by VegaLite defaultly, in our cases.", "'datum.' + ' '.join(each_conditions) if i != len(filter_part_token) - 1:", "def __init__(self): pass def parse_vegaZero(self, vega_zero): self.parsed_vegaZero = { 'mark':", "[{\"field\": sort_field, \"order\": sort_order}] }, { \"filter\": \"datum.rank <= \"", "['|' if x == 'or' else x for x in", "',\"' + \\ each_conditions[2][2:len(each_conditions[2]) - 2] + '\") != -1'", "2] if 'group' in vega_zero_keywords: self.parsed_vegaZero['transform']['group'] = vega_zero_keywords[vega_zero_keywords.index('group') + 1]", "filter_part_token: filter_part_token[filter_part_token.index('between') + 2] = 'and ' + filter_part_token[ filter_part_token.index('between')", "+ '\") != -1' elif 'like' == each_conditions[2] and 'not'", "if 'transform' not in self.VegaLiteSpec[VegaZero['mark']]: self.VegaLiteSpec[VegaZero['mark']]['transform'] = [{ \"filter\": VegaZero['transform']['filter']", "def to_VegaLite(self, vega_zero, dataframe=None): self.VegaLiteSpec = { 'bar': { \"mark\":", "= { 'bar': { \"mark\": \"bar\", \"encoding\": { \"x\": {\"field\":", "'none': self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['aggregate'] = VegaZero['encoding']['y']['aggregate'] else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['color']['field'] = VegaZero['encoding']['x'] self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['field'] =", "else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = 'x' else: if VegaZero['transform']['sort']['type'] == 'desc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort']", "= vega_zero_keywords[vega_zero_keywords.index('aggregate') + 1] if 'color' in vega_zero_keywords: self.parsed_vegaZero['encoding']['color']['z'] =", "consider this case: '%a%' if each_conditions[2][1] == '%' and each_conditions[2][len(each_conditions[2])", "final_filter_part = '' each_conditions = [] for i in range(len(filter_part_token)):", "vega_zero_keywords[vega_zero_keywords.index('filter') + 1:]: if each not in ['group', 'bin', 'sort',", "+ 1] self.parsed_vegaZero['transform']['bin']['type'] = vega_zero_keywords[vega_zero_keywords.index('bin') + 3] if 'filter' in", "VegaLiteSpec object if isinstance(dataframe, pandas.core.frame.DataFrame): self.VegaLiteSpec[VegaZero['mark']]['data'] = dict() self.VegaLiteSpec[VegaZero['mark']]['data']['values'] =", "in our cases. if VegaZero['transform']['group'] != '': pass if VegaZero['transform']['bin']['axis']", "1] self.parsed_vegaZero['transform']['bin']['type'] = vega_zero_keywords[vega_zero_keywords.index('bin') + 3] if 'filter' in vega_zero_keywords:", "only consider this case: '%a%' if each_conditions[2][1] == '%' and", "filter_part_token or '|' in filter_part_token: final_filter_part = '' each_conditions =", "each_conditions[3][len(each_conditions[3]) - 2] == '%': final_filter_part += 'indexof(' + 'datum.'", "be performed by VegaLite defaultly, in our cases. if VegaZero['transform']['group']", "['date', 'year', 'week', 'month']: self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = VegaZero['transform']['bin']['type'] elif VegaZero['transform']['bin']['type'] ==", "filter_part_token: final_filter_part = '' each_conditions = [] for i in", "\" + str(VegaZero['transform']['topk']) self.VegaLiteSpec[VegaZero['mark']]['transform'].insert(0, { \"window\": [{ \"field\": sort_field, \"op\":", "self.parsed_vegaZero['data'] = vega_zero_keywords[vega_zero_keywords.index('data') + 1] self.parsed_vegaZero['encoding']['x'] = vega_zero_keywords[vega_zero_keywords.index('x') + 1]", "only single filter condition self.parsed_vegaZero['transform']['filter'] = 'datum.' + ' '.join(filter_part_token).strip()", "\"type\": \"quantitative\"} } }, 'line': { \"mark\": \"line\", \"encoding\": {", "\"field\": sort_field, \"op\": \"dense_rank\", \"as\": \"rank\" }], \"sort\": [{\"field\": sort_field,", "\"type\": \"quantitative\"}, \"y\": {\"field\": \"y\", \"type\": \"quantitative\"} } } }", "!= '': if VegaZero['transform']['sort']['axis'] == 'x': if VegaZero['transform']['sort']['type'] == 'desc':", "!= '' and VegaZero['encoding']['y']['aggregate'] != 'none': self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['aggregate'] = VegaZero['encoding']['y']['aggregate'] else:", "'none': self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['aggregate'] = VegaZero['encoding']['y'][ 'aggregate'] if VegaZero['encoding']['color']['z'] != '': self.VegaLiteSpec[VegaZero['mark']]['encoding']['color']", "'indexof(' + 'datum.' + each_conditions[0] + ',\"' + \\ each_conditions[3][2:len(each_conditions[3])", "__author__ = \"<NAME>\" import json import pandas class VegaZero2VegaLite(object): def", "vega_zero_keywords = vega_zero.split(' ') self.parsed_vegaZero['mark'] = vega_zero_keywords[vega_zero_keywords.index('mark') + 1] self.parsed_vegaZero['data']", "' ' + each + ' ' each_conditions = []", "single filter condition self.parsed_vegaZero['transform']['filter'] = 'datum.' + ' '.join(filter_part_token).strip() return", "= dict() self.VegaLiteSpec[VegaZero['mark']]['data']['values'] = json.loads(dataframe.to_json(orient='records')) if VegaZero['mark'] != 'arc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['field']", "filter_part_token = [] for each in vega_zero_keywords[vega_zero_keywords.index('filter') + 1:]: if", "'\") != -1' elif 'like' == each_conditions[2] and 'not' ==", "the VegaLiteSpec object if isinstance(dataframe, pandas.core.frame.DataFrame): self.VegaLiteSpec[VegaZero['mark']]['data'] = dict() self.VegaLiteSpec[VegaZero['mark']]['data']['values']", "{ \"mark\": \"bar\", \"encoding\": { \"x\": {\"field\": \"x\", \"type\": \"nominal\"},", "'', 'y': '' }, 'color': { 'z': '' } },", "and 'not' == each_conditions[1]: if each_conditions[3][1] == '%' and each_conditions[3][len(each_conditions[3])", "self.VegaLiteSpec[VegaZero['mark']]['data']['values'] = json.loads(dataframe.to_json(orient='records')) if VegaZero['mark'] != 'arc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['field'] = VegaZero['encoding']['x']", "vega_zero_keywords[vega_zero_keywords.index('data') + 1] self.parsed_vegaZero['encoding']['x'] = vega_zero_keywords[vega_zero_keywords.index('x') + 1] self.parsed_vegaZero['encoding']['y']['y'] =", "condition self.parsed_vegaZero['transform']['filter'] = 'datum.' + ' '.join(filter_part_token).strip() return self.parsed_vegaZero def", "'type': '' }, 'topk': '' } } vega_zero_keywords = vega_zero.split('", "self.VegaLiteSpec[VegaZero['mark']]['encoding']['color'] = { 'field': VegaZero['encoding']['color']['z'], 'type': 'nominal' } # it", "self.VegaLiteSpec[VegaZero['mark']]['transform'][0][ 'filter'] = current_filter + ' & ' + \"datum.rank", "' & ' + VegaZero['transform']['filter'] if VegaZero['transform']['topk'] != '': if", "\"y\", \"type\": \"quantitative\"} } }, 'point': { \"mark\": \"point\", \"encoding\":", "SQL --to--> ’==‘ in Vega-Lite if each == '=': each", "} }, 'transform': { 'filter': '', 'group': '', 'bin': {", "'.join(filter_part_token).strip() return self.parsed_vegaZero def to_VegaLite(self, vega_zero, dataframe=None): self.VegaLiteSpec = {", "+ VegaZero['transform']['filter'] if VegaZero['transform']['topk'] != '': if VegaZero['transform']['sort']['axis'] == 'x':", "str(VegaZero['transform']['topk']) self.VegaLiteSpec[VegaZero['mark']]['transform'].insert(0, { \"window\": [{ \"field\": sort_field, \"op\": \"dense_rank\", \"as\":", "if isinstance(dataframe, pandas.core.frame.DataFrame): self.VegaLiteSpec[VegaZero['mark']]['data'] = dict() self.VegaLiteSpec[VegaZero['mark']]['data']['values'] = json.loads(dataframe.to_json(orient='records')) if", "self.parsed_vegaZero def to_VegaLite(self, vega_zero, dataframe=None): self.VegaLiteSpec = { 'bar': {", "vega_zero_keywords[vega_zero_keywords.index('sort') + 1] self.parsed_vegaZero['transform']['sort']['type'] = vega_zero_keywords[vega_zero_keywords.index('sort') + 2] if 'group'", "if 'bin' in vega_zero_keywords: self.parsed_vegaZero['transform']['bin']['axis'] = vega_zero_keywords[vega_zero_keywords.index('bin') + 1] self.parsed_vegaZero['transform']['bin']['type']", "'datum.' + each_conditions[0] + ',\"' + \\ each_conditions[2][2:len(each_conditions[2]) - 2]", "= vega_zero_keywords[vega_zero_keywords.index('bin') + 3] if 'filter' in vega_zero_keywords: filter_part_token =", "2] + '\") == -1' else: final_filter_part += 'datum.' +", "!= '': pass if VegaZero['transform']['bin']['axis'] != '': if VegaZero['transform']['bin']['axis'] ==", "in self.VegaLiteSpec[VegaZero['mark']]: self.VegaLiteSpec[VegaZero['mark']]['transform'] = [{ \"filter\": VegaZero['transform']['filter'] }] elif 'filter'", "vega_zero_keywords[vega_zero_keywords.index('x') + 1] self.parsed_vegaZero['encoding']['y']['y'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') + 2] self.parsed_vegaZero['encoding']['y']['aggregate'] =", "if 'between' in filter_part_token: filter_part_token[filter_part_token.index('between') + 2] = 'and '", "== 'and' else x for x in filter_part_token] filter_part_token =", "\"order\": sort_order}] }, { \"filter\": \"datum.rank <= \" + str(VegaZero['transform']['topk'])", "'', 'bin': { 'axis': '', 'type': '' }, 'sort': {", "each != '&' and each != '|': # ’=‘ in", "'type': '' }, 'sort': { 'axis': '', 'type': '' },", "+ 1] self.parsed_vegaZero['encoding']['x'] = vega_zero_keywords[vega_zero_keywords.index('x') + 1] self.parsed_vegaZero['encoding']['y']['y'] = vega_zero_keywords[vega_zero_keywords.index('aggregate')", "else: self.VegaLiteSpec[VegaZero['mark']]['transform']['filter'] += ' & ' + VegaZero['transform']['filter'] if VegaZero['transform']['topk']", "each_conditions[1]: if each_conditions[3][1] == '%' and each_conditions[3][len(each_conditions[3]) - 2] ==", "{ 'z': '' } }, 'transform': { 'filter': '', 'group':", "+ 1] if 'sort' in vega_zero_keywords: self.parsed_vegaZero['transform']['sort']['axis'] = vega_zero_keywords[vega_zero_keywords.index('sort') +", "filter_part_token[filter_part_token.index('between')] = '>=' # replace 'and' -- 'or' filter_part_token =", "!= 'none': self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['aggregate'] = VegaZero['encoding']['y'][ 'aggregate'] if VegaZero['encoding']['color']['z'] != '':", "vega_zero_keywords[vega_zero_keywords.index('mark') + 1] self.parsed_vegaZero['data'] = vega_zero_keywords[vega_zero_keywords.index('data') + 1] self.parsed_vegaZero['encoding']['x'] =", "'': if VegaZero['transform']['sort']['axis'] == 'x': sort_field = VegaZero['encoding']['x'] elif VegaZero['transform']['sort']['axis']", "2] = 'and ' + filter_part_token[ filter_part_token.index('between') - 1] +", "'x' else: if VegaZero['transform']['sort']['type'] == 'desc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort'] = '-y' else:", "= VegaZero['encoding']['y']['aggregate'] else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['color']['field'] = VegaZero['encoding']['x'] self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['field'] = VegaZero['encoding']['y']['y'] if", "'filter': '', 'group': '', 'bin': { 'axis': '', 'type': ''", "== '|' or i == len(filter_part_token) - 1: # each", "+ ' <=' filter_part_token[filter_part_token.index('between')] = '>=' # replace 'and' --", "'year', 'week', 'month']: self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = VegaZero['transform']['bin']['type'] elif VegaZero['transform']['bin']['type'] == 'weekday':", "for x in filter_part_token] if '&' in filter_part_token or '|'", "the group will be performed by VegaLite defaultly, in our", "VegaZero['transform']['filter'] }] elif 'filter' not in self.VegaLiteSpec[VegaZero['mark']]['transform']: self.VegaLiteSpec[VegaZero['mark']]['transform'].append({ \"filter\": VegaZero['transform']['filter']", "3] if 'filter' in vega_zero_keywords: filter_part_token = [] for each", "+ 1] if 'color' in vega_zero_keywords: self.parsed_vegaZero['encoding']['color']['z'] = vega_zero_keywords[vega_zero_keywords.index('color') +", "\"line\", \"encoding\": { \"x\": {\"field\": \"x\", \"type\": \"nominal\"}, \"y\": {\"field\":", "= vega_zero_keywords[vega_zero_keywords.index('color') + 1] if 'topk' in vega_zero_keywords: self.parsed_vegaZero['transform']['topk'] =", "vega_zero_keywords: self.parsed_vegaZero['transform']['group'] = vega_zero_keywords[vega_zero_keywords.index('group') + 1] if 'bin' in vega_zero_keywords:", "== each_conditions[1]: if each_conditions[3][1] == '%' and each_conditions[3][len(each_conditions[3]) - 2]", "{ \"x\": {\"field\": \"x\", \"type\": \"quantitative\"}, \"y\": {\"field\": \"y\", \"type\":", "else: print('Unknown binning step.') if VegaZero['transform']['filter'] != '': if 'transform'", "}], \"sort\": [{\"field\": sort_field, \"order\": sort_order}] }) else: self.VegaLiteSpec[VegaZero['mark']]['transform'] =", "\"sort\": [{\"field\": sort_field, \"order\": sort_order}] }, { \"filter\": \"datum.rank <=", "+ ',\"' + \\ each_conditions[3][2:len(each_conditions[3]) - 2] + '\") ==", "if x == 'or' else x for x in filter_part_token]", "else: self.VegaLiteSpec[VegaZero['mark']]['transform'] = [ { \"window\": [{ \"field\": sort_field, \"op\":", "sorting field: ', VegaZero['transform']['sort']['axis']) sort_field = VegaZero['transform']['sort']['axis'] if VegaZero['transform']['sort']['type'] ==", "== -1' else: final_filter_part += 'datum.' + ' '.join(each_conditions) if", "'x': if VegaZero['transform']['sort']['type'] == 'desc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = '-x' else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort']", "-1' else: final_filter_part += 'datum.' + ' '.join(each_conditions) if i", "'&' or each == '|' or i == len(filter_part_token) -", "+ ',\"' + \\ each_conditions[2][2:len(each_conditions[2]) - 2] + '\") !=", "if each == '&' or each == '|' or i", "+ \"datum.rank <= \" + str(VegaZero['transform']['topk']) self.VegaLiteSpec[VegaZero['mark']]['transform'].insert(0, { \"window\": [{", "vega_zero.split(' ') self.parsed_vegaZero['mark'] = vega_zero_keywords[vega_zero_keywords.index('mark') + 1] self.parsed_vegaZero['data'] = vega_zero_keywords[vega_zero_keywords.index('data')", "vega_zero_keywords: self.parsed_vegaZero['transform']['sort']['axis'] = vega_zero_keywords[vega_zero_keywords.index('sort') + 1] self.parsed_vegaZero['transform']['sort']['type'] = vega_zero_keywords[vega_zero_keywords.index('sort') +", "!= '': if VegaZero['transform']['sort']['axis'] == 'x': sort_field = VegaZero['encoding']['x'] elif", "sort_field, \"op\": \"dense_rank\", \"as\": \"rank\" }], \"sort\": [{\"field\": sort_field, \"order\":", "not in self.VegaLiteSpec[VegaZero['mark']]: self.VegaLiteSpec[VegaZero['mark']]['transform'] = [{ \"filter\": VegaZero['transform']['filter'] }] elif", "else: break if 'between' in filter_part_token: filter_part_token[filter_part_token.index('between') + 2] =", "each_conditions[2][1] == '%' and each_conditions[2][len(each_conditions[2]) - 2] == '%': final_filter_part", "'sort', 'topk']: filter_part_token.append(each) else: break if 'between' in filter_part_token: filter_part_token[filter_part_token.index('between')", "\"quantitative\"}, \"y\": {\"field\": \"y\", \"type\": \"quantitative\"} } } } VegaZero", "'filter'] = current_filter + ' & ' + \"datum.rank <=", "- 1] + ' <=' filter_part_token[filter_part_token.index('between')] = '>=' # replace", "if '&' in filter_part_token or '|' in filter_part_token: final_filter_part =", "vega_zero_keywords: filter_part_token = [] for each in vega_zero_keywords[vega_zero_keywords.index('filter') + 1:]:", "this case: '%a%' if each_conditions[2][1] == '%' and each_conditions[2][len(each_conditions[2]) -", "in range(len(filter_part_token)): each = filter_part_token[i] if each != '&' and", "VegaZero['encoding']['y'][ 'aggregate'] if VegaZero['encoding']['color']['z'] != '': self.VegaLiteSpec[VegaZero['mark']]['encoding']['color'] = { 'field':", "[{ \"field\": sort_field, \"op\": \"dense_rank\", \"as\": \"rank\" }], \"sort\": [{\"field\":", "'z': '' } }, 'transform': { 'filter': '', 'group': '',", "in filter_part_token: final_filter_part = '' each_conditions = [] for i", "\\ each_conditions[3][2:len(each_conditions[3]) - 2] + '\") == -1' else: final_filter_part", "filter_part_token[ filter_part_token.index('between') - 1] + ' <=' filter_part_token[filter_part_token.index('between')] = '>='", "'ascending' if 'transform' in self.VegaLiteSpec[VegaZero['mark']]: current_filter = self.VegaLiteSpec[VegaZero['mark']]['transform'][0]['filter'] self.VegaLiteSpec[VegaZero['mark']]['transform'][0][ 'filter']", "if VegaZero['transform']['sort']['type'] == 'desc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = '-x' else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] =", "'' and VegaZero['encoding']['y']['aggregate'] != 'none': self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['aggregate'] = VegaZero['encoding']['y'][ 'aggregate'] if", "'|': # ’=‘ in SQL --to--> ’==‘ in Vega-Lite if", "filter_part_token[filter_part_token.index('between') + 2] = 'and ' + filter_part_token[ filter_part_token.index('between') -", "elif 'filter' not in self.VegaLiteSpec[VegaZero['mark']]['transform']: self.VegaLiteSpec[VegaZero['mark']]['transform'].append({ \"filter\": VegaZero['transform']['filter'] }) else:", "'%' and each_conditions[3][len(each_conditions[3]) - 2] == '%': final_filter_part += 'indexof('", "i == len(filter_part_token) - 1: # each = '&' or", "'bin' in vega_zero_keywords: self.parsed_vegaZero['transform']['bin']['axis'] = vega_zero_keywords[vega_zero_keywords.index('bin') + 1] self.parsed_vegaZero['transform']['bin']['type'] =", "} } } VegaZero = self.parse_vegaZero(vega_zero) # assign some vega-zero", "'==' each_conditions.append(each) if each == '&' or each == '|'", "self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = 'week' else: print('Unknown binning step.') if VegaZero['transform']['filter'] !=", "if VegaZero['transform']['filter'] != '': if 'transform' not in self.VegaLiteSpec[VegaZero['mark']]: self.VegaLiteSpec[VegaZero['mark']]['transform']", "\"y\", \"type\": \"quantitative\"} } } } VegaZero = self.parse_vegaZero(vega_zero) #", "VegaZero['transform']['filter'] if VegaZero['transform']['topk'] != '': if VegaZero['transform']['sort']['axis'] == 'x': sort_field", "self.parse_vegaZero(vega_zero) # assign some vega-zero keywords to the VegaLiteSpec object", "final_filter_part += 'datum.' + ' '.join(each_conditions) if i != len(filter_part_token)", "}, 'transform': { 'filter': '', 'group': '', 'bin': { 'axis':", "= 'datum.' + ' '.join(filter_part_token).strip() return self.parsed_vegaZero def to_VegaLite(self, vega_zero,", "{ \"color\": {\"field\": \"x\", \"type\": \"nominal\"}, \"theta\": {\"field\": \"y\", \"type\":", "VegaZero['encoding']['color']['z'] != '': self.VegaLiteSpec[VegaZero['mark']]['encoding']['color'] = { 'field': VegaZero['encoding']['color']['z'], 'type': 'nominal'", "'datum.' + ' '.join(filter_part_token).strip() return self.parsed_vegaZero def to_VegaLite(self, vega_zero, dataframe=None):", "each_conditions = [] self.parsed_vegaZero['transform']['filter'] = final_filter_part else: # only single", "\"x\", \"type\": \"nominal\"}, \"theta\": {\"field\": \"y\", \"type\": \"quantitative\"} } },", "each not in ['group', 'bin', 'sort', 'topk']: filter_part_token.append(each) else: break", "= '==' each_conditions.append(each) if each == '&' or each ==", "VegaZero['mark'] != 'arc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['field'] = VegaZero['encoding']['x'] self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['field'] = VegaZero['encoding']['y']['y'] if", "# only single filter condition self.parsed_vegaZero['transform']['filter'] = 'datum.' + '", "range(len(filter_part_token)): each = filter_part_token[i] if each != '&' and each", "vega_zero_keywords[vega_zero_keywords.index('bin') + 3] if 'filter' in vega_zero_keywords: filter_part_token = []", "json.loads(dataframe.to_json(orient='records')) if VegaZero['mark'] != 'arc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['field'] = VegaZero['encoding']['x'] self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['field'] =", "# assign some vega-zero keywords to the VegaLiteSpec object if", "it seems that the group will be performed by VegaLite", "binning step.') if VegaZero['transform']['filter'] != '': if 'transform' not in", "if VegaZero['mark'] != 'arc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['field'] = VegaZero['encoding']['x'] self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['field'] = VegaZero['encoding']['y']['y']", "= vega_zero_keywords[vega_zero_keywords.index('sort') + 1] self.parsed_vegaZero['transform']['sort']['type'] = vega_zero_keywords[vega_zero_keywords.index('sort') + 2] if", "= { 'mark': '', 'data': '', 'encoding': { 'x': '',", "'.join(filter_part_token).split() filter_part_token = ['&' if x == 'and' else x", "VegaZero['encoding']['y']['y'] if VegaZero['encoding']['y']['aggregate'] != '' and VegaZero['encoding']['y']['aggregate'] != 'none': self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['aggregate']", "= 'x' else: if VegaZero['transform']['sort']['type'] == 'desc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort'] = '-y'", "+ 1] self.parsed_vegaZero['transform']['sort']['type'] = vega_zero_keywords[vega_zero_keywords.index('sort') + 2] if 'group' in", "str(VegaZero['transform']['topk']) } ] if VegaZero['transform']['sort']['axis'] != '': if VegaZero['transform']['sort']['axis'] ==", "'=': each = '==' each_conditions.append(each) if each == '&' or", "'.join(each_conditions) if i != len(filter_part_token) - 1: final_filter_part += '", "!= len(filter_part_token) - 1: final_filter_part += ' ' + each", "\"arc\", \"encoding\": { \"color\": {\"field\": \"x\", \"type\": \"nominal\"}, \"theta\": {\"field\":", "VegaZero['transform']['sort']['type'] == 'desc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = '-x' else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = 'x'", "== '=': each = '==' each_conditions.append(each) if each == '&'", "if VegaZero['transform']['sort']['axis'] == 'x': if VegaZero['transform']['sort']['type'] == 'desc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] =", "+ ' & ' + \"datum.rank <= \" + str(VegaZero['transform']['topk'])", "and VegaZero['encoding']['y']['aggregate'] != 'none': self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['aggregate'] = VegaZero['encoding']['y'][ 'aggregate'] if VegaZero['encoding']['color']['z']", "each = '==' each_conditions.append(each) if each == '&' or each", "'', 'type': '' }, 'topk': '' } } vega_zero_keywords =", "VegaZero['transform']['bin']['type'] == 'weekday': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = 'week' else: print('Unknown binning step.')", "some vega-zero keywords to the VegaLiteSpec object if isinstance(dataframe, pandas.core.frame.DataFrame):", "{ 'x': '', 'y': { 'aggregate': '', 'y': '' },", "each_conditions[1]: # only consider this case: '%a%' if each_conditions[2][1] ==", "'', 'type': '' }, 'sort': { 'axis': '', 'type': ''", "'arc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['field'] = VegaZero['encoding']['x'] self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['field'] = VegaZero['encoding']['y']['y'] if VegaZero['encoding']['y']['aggregate'] !=", "’=‘ in SQL --to--> ’==‘ in Vega-Lite if each ==", "parse_vegaZero(self, vega_zero): self.parsed_vegaZero = { 'mark': '', 'data': '', 'encoding':", "+ each_conditions[0] + ',\"' + \\ each_conditions[2][2:len(each_conditions[2]) - 2] +", "'' }, 'sort': { 'axis': '', 'type': '' }, 'topk':", "filter_part_token = ['&' if x == 'and' else x for", "pass def parse_vegaZero(self, vega_zero): self.parsed_vegaZero = { 'mark': '', 'data':", "{ 'axis': '', 'type': '' }, 'sort': { 'axis': '',", "== '%': final_filter_part += 'indexof(' + 'datum.' + each_conditions[0] +", "if VegaZero['transform']['sort']['axis'] == 'x': sort_field = VegaZero['encoding']['x'] elif VegaZero['transform']['sort']['axis'] ==", "+ 'datum.' + each_conditions[0] + ',\"' + \\ each_conditions[2][2:len(each_conditions[2]) -", "[{\"field\": sort_field, \"order\": sort_order}] }) else: self.VegaLiteSpec[VegaZero['mark']]['transform'] = [ {", "1] self.parsed_vegaZero['encoding']['y']['y'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') + 2] self.parsed_vegaZero['encoding']['y']['aggregate'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') +", "= filter_part_token[i] if each != '&' and each != '|':", "= vega_zero_keywords[vega_zero_keywords.index('bin') + 1] self.parsed_vegaZero['transform']['bin']['type'] = vega_zero_keywords[vega_zero_keywords.index('bin') + 3] if", "\"type\": \"nominal\"}, \"y\": {\"field\": \"y\", \"type\": \"quantitative\"} } }, 'arc':", "vega_zero_keywords[vega_zero_keywords.index('aggregate') + 2] self.parsed_vegaZero['encoding']['y']['aggregate'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') + 1] if 'color'", "final_filter_part += ' ' + each + ' ' each_conditions", "\"rank\" }], \"sort\": [{\"field\": sort_field, \"order\": sort_order}] }, { \"filter\":", "VegaZero2VegaLite(object): def __init__(self): pass def parse_vegaZero(self, vega_zero): self.parsed_vegaZero = {", "vega_zero_keywords: self.parsed_vegaZero['transform']['topk'] = vega_zero_keywords[vega_zero_keywords.index('topk') + 1] if 'sort' in vega_zero_keywords:", "import json import pandas class VegaZero2VegaLite(object): def __init__(self): pass def", "= json.loads(dataframe.to_json(orient='records')) if VegaZero['mark'] != 'arc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['field'] = VegaZero['encoding']['x'] self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['field']", "\"x\": {\"field\": \"x\", \"type\": \"quantitative\"}, \"y\": {\"field\": \"y\", \"type\": \"quantitative\"}", "'x': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['type'] = 'temporal' if VegaZero['transform']['bin']['type'] in ['date', 'year', 'week',", "self.parsed_vegaZero['transform']['filter'] = final_filter_part else: # only single filter condition self.parsed_vegaZero['transform']['filter']", "'%': final_filter_part += 'indexof(' + 'datum.' + each_conditions[0] + ',\"'", "'x': sort_field = VegaZero['encoding']['x'] elif VegaZero['transform']['sort']['axis'] == 'y': sort_field =", "sort_field = VegaZero['transform']['sort']['axis'] if VegaZero['transform']['sort']['type'] == 'desc': sort_order = 'descending'", "{\"field\": \"y\", \"type\": \"quantitative\"} } } } VegaZero = self.parse_vegaZero(vega_zero)", "\"quantitative\"} } }, 'line': { \"mark\": \"line\", \"encoding\": { \"x\":", "+ 2] if 'group' in vega_zero_keywords: self.parsed_vegaZero['transform']['group'] = vega_zero_keywords[vega_zero_keywords.index('group') +", "'topk': '' } } vega_zero_keywords = vega_zero.split(' ') self.parsed_vegaZero['mark'] =", "= vega_zero_keywords[vega_zero_keywords.index('data') + 1] self.parsed_vegaZero['encoding']['x'] = vega_zero_keywords[vega_zero_keywords.index('x') + 1] self.parsed_vegaZero['encoding']['y']['y']", "self.parsed_vegaZero['transform']['bin']['axis'] = vega_zero_keywords[vega_zero_keywords.index('bin') + 1] self.parsed_vegaZero['transform']['bin']['type'] = vega_zero_keywords[vega_zero_keywords.index('bin') + 3]", "if each not in ['group', 'bin', 'sort', 'topk']: filter_part_token.append(each) else:", "'-x' else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = 'x' else: if VegaZero['transform']['sort']['type'] == 'desc':", "in SQL --to--> ’==‘ in Vega-Lite if each == '=':", "else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['color']['field'] = VegaZero['encoding']['x'] self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['field'] = VegaZero['encoding']['y']['y'] if VegaZero['encoding']['y']['aggregate'] !=", "2] + '\") != -1' elif 'like' == each_conditions[2] and", "json import pandas class VegaZero2VegaLite(object): def __init__(self): pass def parse_vegaZero(self,", "VegaZero['transform']['sort']['axis'] if VegaZero['transform']['sort']['type'] == 'desc': sort_order = 'descending' else: sort_order", "+ 'datum.' + each_conditions[0] + ',\"' + \\ each_conditions[3][2:len(each_conditions[3]) -", "self.VegaLiteSpec[VegaZero['mark']]['data'] = dict() self.VegaLiteSpec[VegaZero['mark']]['data']['values'] = json.loads(dataframe.to_json(orient='records')) if VegaZero['mark'] != 'arc':", "\"op\": \"dense_rank\", \"as\": \"rank\" }], \"sort\": [{\"field\": sort_field, \"order\": sort_order}]", "VegaZero['encoding']['y']['y'] if VegaZero['encoding']['y']['aggregate'] != '' and VegaZero['encoding']['y']['aggregate'] != 'none': self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['aggregate']", "'' } }, 'transform': { 'filter': '', 'group': '', 'bin':", "{ \"mark\": \"arc\", \"encoding\": { \"color\": {\"field\": \"x\", \"type\": \"nominal\"},", "'and' else x for x in filter_part_token] filter_part_token = ['|'", "VegaZero['transform']['sort']['type'] == 'desc': sort_order = 'descending' else: sort_order = 'ascending'", "if VegaZero['encoding']['color']['z'] != '': self.VegaLiteSpec[VegaZero['mark']]['encoding']['color'] = { 'field': VegaZero['encoding']['color']['z'], 'type':", "{\"field\": \"y\", \"type\": \"quantitative\"} } }, 'line': { \"mark\": \"line\",", "assign some vega-zero keywords to the VegaLiteSpec object if isinstance(dataframe,", "[] for each in vega_zero_keywords[vega_zero_keywords.index('filter') + 1:]: if each not", "\"x\": {\"field\": \"x\", \"type\": \"nominal\"}, \"y\": {\"field\": \"y\", \"type\": \"quantitative\"}", "if each == '=': each = '==' each_conditions.append(each) if each", "} ] if VegaZero['transform']['sort']['axis'] != '': if VegaZero['transform']['sort']['axis'] == 'x':", "VegaZero['transform']['bin']['axis'] == 'x': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['type'] = 'temporal' if VegaZero['transform']['bin']['type'] in ['date',", "defaultly, in our cases. if VegaZero['transform']['group'] != '': pass if", "each_conditions[0] + ',\"' + \\ each_conditions[3][2:len(each_conditions[3]) - 2] + '\")", "and each_conditions[2][len(each_conditions[2]) - 2] == '%': final_filter_part += 'indexof(' +", "'bin', 'sort', 'topk']: filter_part_token.append(each) else: break if 'between' in filter_part_token:", "if VegaZero['encoding']['y']['aggregate'] != '' and VegaZero['encoding']['y']['aggregate'] != 'none': self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['aggregate'] =", "each + ' ' each_conditions = [] self.parsed_vegaZero['transform']['filter'] = final_filter_part", "filter_part_token.index('between') - 1] + ' <=' filter_part_token[filter_part_token.index('between')] = '>=' #", "' ' each_conditions = [] self.parsed_vegaZero['transform']['filter'] = final_filter_part else: #", "self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['field'] = VegaZero['encoding']['x'] self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['field'] = VegaZero['encoding']['y']['y'] if VegaZero['encoding']['y']['aggregate'] != ''", "= vega_zero.split(' ') self.parsed_vegaZero['mark'] = vega_zero_keywords[vega_zero_keywords.index('mark') + 1] self.parsed_vegaZero['data'] =", "= current_filter + ' & ' + \"datum.rank <= \"", "} VegaZero = self.parse_vegaZero(vega_zero) # assign some vega-zero keywords to", "self.parsed_vegaZero['encoding']['color']['z'] = vega_zero_keywords[vega_zero_keywords.index('color') + 1] if 'topk' in vega_zero_keywords: self.parsed_vegaZero['transform']['topk']", "i != len(filter_part_token) - 1: final_filter_part += ' ' +", "each in vega_zero_keywords[vega_zero_keywords.index('filter') + 1:]: if each not in ['group',", "= [ { \"window\": [{ \"field\": sort_field, \"op\": \"dense_rank\", \"as\":", "filter_part_token = ' '.join(filter_part_token).split() filter_part_token = ['&' if x ==", "'type': 'nominal' } # it seems that the group will", "if VegaZero['transform']['sort']['axis'] != '': if VegaZero['transform']['sort']['axis'] == 'x': if VegaZero['transform']['sort']['type']", "in vega_zero_keywords[vega_zero_keywords.index('filter') + 1:]: if each not in ['group', 'bin',", "'sort': { 'axis': '', 'type': '' }, 'topk': '' }", "!= '': if VegaZero['transform']['bin']['axis'] == 'x': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['type'] = 'temporal' if", "{ 'bar': { \"mark\": \"bar\", \"encoding\": { \"x\": {\"field\": \"x\",", "{ \"window\": [{ \"field\": sort_field, \"op\": \"dense_rank\", \"as\": \"rank\" }],", "'', 'group': '', 'bin': { 'axis': '', 'type': '' },", "'and ' + filter_part_token[ filter_part_token.index('between') - 1] + ' <='", "'' } } vega_zero_keywords = vega_zero.split(' ') self.parsed_vegaZero['mark'] = vega_zero_keywords[vega_zero_keywords.index('mark')", "'', 'encoding': { 'x': '', 'y': { 'aggregate': '', 'y':", "in filter_part_token] filter_part_token = ['|' if x == 'or' else", "# only consider this case: '%a%' if each_conditions[2][1] == '%'", "VegaZero['encoding']['y']['aggregate'] != '' and VegaZero['encoding']['y']['aggregate'] != 'none': self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['aggregate'] = VegaZero['encoding']['y'][", "== 'desc': sort_order = 'descending' else: sort_order = 'ascending' if", "VegaZero['encoding']['y']['aggregate'] != '' and VegaZero['encoding']['y']['aggregate'] != 'none': self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['aggregate'] = VegaZero['encoding']['y']['aggregate']", "will be performed by VegaLite defaultly, in our cases. if", "VegaZero['transform']['filter'] }) else: self.VegaLiteSpec[VegaZero['mark']]['transform']['filter'] += ' & ' + VegaZero['transform']['filter']", "’==‘ in Vega-Lite if each == '=': each = '=='", "VegaZero['encoding']['y']['aggregate'] != 'none': self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['aggregate'] = VegaZero['encoding']['y']['aggregate'] else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['color']['field'] = VegaZero['encoding']['x']", "' each_conditions = [] self.parsed_vegaZero['transform']['filter'] = final_filter_part else: # only", "VegaZero['encoding']['color']['z'], 'type': 'nominal' } # it seems that the group", "self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = '-x' else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = 'x' else: if VegaZero['transform']['sort']['type']", "step.') if VegaZero['transform']['filter'] != '': if 'transform' not in self.VegaLiteSpec[VegaZero['mark']]:", "'descending' else: sort_order = 'ascending' if 'transform' in self.VegaLiteSpec[VegaZero['mark']]: current_filter", "current_filter = self.VegaLiteSpec[VegaZero['mark']]['transform'][0]['filter'] self.VegaLiteSpec[VegaZero['mark']]['transform'][0][ 'filter'] = current_filter + ' &", "\"mark\": \"bar\", \"encoding\": { \"x\": {\"field\": \"x\", \"type\": \"nominal\"}, \"y\":", "cases. if VegaZero['transform']['group'] != '': pass if VegaZero['transform']['bin']['axis'] != '':", "vega_zero_keywords[vega_zero_keywords.index('topk') + 1] if 'sort' in vega_zero_keywords: self.parsed_vegaZero['transform']['sort']['axis'] = vega_zero_keywords[vega_zero_keywords.index('sort')", "\"window\": [{ \"field\": sort_field, \"op\": \"dense_rank\", \"as\": \"rank\" }], \"sort\":", "VegaZero['transform']['sort']['type'] == 'desc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort'] = '-y' else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort'] = 'y'", "'' each_conditions = [] for i in range(len(filter_part_token)): each =", "'aggregate'] if VegaZero['encoding']['color']['z'] != '': self.VegaLiteSpec[VegaZero['mark']]['encoding']['color'] = { 'field': VegaZero['encoding']['color']['z'],", "# replace 'and' -- 'or' filter_part_token = ' '.join(filter_part_token).split() filter_part_token", "} }, 'line': { \"mark\": \"line\", \"encoding\": { \"x\": {\"field\":", "each_conditions[2][2:len(each_conditions[2]) - 2] + '\") != -1' elif 'like' ==", "' + each + ' ' each_conditions = [] self.parsed_vegaZero['transform']['filter']", "} vega_zero_keywords = vega_zero.split(' ') self.parsed_vegaZero['mark'] = vega_zero_keywords[vega_zero_keywords.index('mark') + 1]", "print('Unknown sorting field: ', VegaZero['transform']['sort']['axis']) sort_field = VegaZero['transform']['sort']['axis'] if VegaZero['transform']['sort']['type']", "else: # only single filter condition self.parsed_vegaZero['transform']['filter'] = 'datum.' +", "each_conditions = [] for i in range(len(filter_part_token)): each = filter_part_token[i]", "= '-x' else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = 'x' else: if VegaZero['transform']['sort']['type'] ==", "if 'color' in vega_zero_keywords: self.parsed_vegaZero['encoding']['color']['z'] = vega_zero_keywords[vega_zero_keywords.index('color') + 1] if", "VegaZero['encoding']['y']['aggregate'] != 'none': self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['aggregate'] = VegaZero['encoding']['y'][ 'aggregate'] if VegaZero['encoding']['color']['z'] !=", "'' }, 'color': { 'z': '' } }, 'transform': {", "for i in range(len(filter_part_token)): each = filter_part_token[i] if each !=", "self.VegaLiteSpec[VegaZero['mark']]['transform']['filter'] += ' & ' + VegaZero['transform']['filter'] if VegaZero['transform']['topk'] !=", "each = filter_part_token[i] if each != '&' and each !=", "\"rank\" }], \"sort\": [{\"field\": sort_field, \"order\": sort_order}] }) else: self.VegaLiteSpec[VegaZero['mark']]['transform']", "filter condition self.parsed_vegaZero['transform']['filter'] = 'datum.' + ' '.join(filter_part_token).strip() return self.parsed_vegaZero", "VegaZero['encoding']['x'] self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['field'] = VegaZero['encoding']['y']['y'] if VegaZero['encoding']['y']['aggregate'] != '' and VegaZero['encoding']['y']['aggregate']", "replace 'and' -- 'or' filter_part_token = ' '.join(filter_part_token).split() filter_part_token =", "each_conditions[2] and 'not' == each_conditions[1]: if each_conditions[3][1] == '%' and", "\"sort\": [{\"field\": sort_field, \"order\": sort_order}] }) else: self.VegaLiteSpec[VegaZero['mark']]['transform'] = [", "'%' and each_conditions[2][len(each_conditions[2]) - 2] == '%': final_filter_part += 'indexof('", "vega_zero, dataframe=None): self.VegaLiteSpec = { 'bar': { \"mark\": \"bar\", \"encoding\":", "VegaZero['transform']['sort']['axis'] == 'x': if VegaZero['transform']['sort']['type'] == 'desc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = '-x'", "to_VegaLite(self, vega_zero, dataframe=None): self.VegaLiteSpec = { 'bar': { \"mark\": \"bar\",", "and VegaZero['encoding']['y']['aggregate'] != 'none': self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['aggregate'] = VegaZero['encoding']['y']['aggregate'] else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['color']['field'] =", "'topk']: filter_part_token.append(each) else: break if 'between' in filter_part_token: filter_part_token[filter_part_token.index('between') +", "'y': '' }, 'color': { 'z': '' } }, 'transform':", "!= -1' elif 'like' == each_conditions[2] and 'not' == each_conditions[1]:", "1] + ' <=' filter_part_token[filter_part_token.index('between')] = '>=' # replace 'and'", "seems that the group will be performed by VegaLite defaultly,", "pass if VegaZero['transform']['bin']['axis'] != '': if VegaZero['transform']['bin']['axis'] == 'x': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['type']", "\"datum.rank <= \" + str(VegaZero['transform']['topk']) self.VegaLiteSpec[VegaZero['mark']]['transform'].insert(0, { \"window\": [{ \"field\":", "\"y\", \"type\": \"quantitative\"} } }, 'arc': { \"mark\": \"arc\", \"encoding\":", "+ str(VegaZero['transform']['topk']) self.VegaLiteSpec[VegaZero['mark']]['transform'].insert(0, { \"window\": [{ \"field\": sort_field, \"op\": \"dense_rank\",", "sort_order}] }, { \"filter\": \"datum.rank <= \" + str(VegaZero['transform']['topk']) }", "\"encoding\": { \"color\": {\"field\": \"x\", \"type\": \"nominal\"}, \"theta\": {\"field\": \"y\",", "'or' else x for x in filter_part_token] if '&' in", "- 1: # each = '&' or '|' if 'like'", "}, 'sort': { 'axis': '', 'type': '' }, 'topk': ''", "sort_order = 'ascending' if 'transform' in self.VegaLiteSpec[VegaZero['mark']]: current_filter = self.VegaLiteSpec[VegaZero['mark']]['transform'][0]['filter']", "each != '|': # ’=‘ in SQL --to--> ’==‘ in", "each == '|' or i == len(filter_part_token) - 1: #", "+ each + ' ' each_conditions = [] self.parsed_vegaZero['transform']['filter'] =", "{ 'axis': '', 'type': '' }, 'topk': '' } }", "'field': VegaZero['encoding']['color']['z'], 'type': 'nominal' } # it seems that the", "'aggregate': '', 'y': '' }, 'color': { 'z': '' }", "self.VegaLiteSpec[VegaZero['mark']]['transform'][0]['filter'] self.VegaLiteSpec[VegaZero['mark']]['transform'][0][ 'filter'] = current_filter + ' & ' +", "\"as\": \"rank\" }], \"sort\": [{\"field\": sort_field, \"order\": sort_order}] }) else:", "= VegaZero['encoding']['x'] elif VegaZero['transform']['sort']['axis'] == 'y': sort_field = VegaZero['encoding']['y']['y'] else:", "\"as\": \"rank\" }], \"sort\": [{\"field\": sort_field, \"order\": sort_order}] }, {", "{ 'aggregate': '', 'y': '' }, 'color': { 'z': ''", "sort_field = VegaZero['encoding']['y']['y'] else: print('Unknown sorting field: ', VegaZero['transform']['sort']['axis']) sort_field", "\"mark\": \"point\", \"encoding\": { \"x\": {\"field\": \"x\", \"type\": \"quantitative\"}, \"y\":", "\"y\", \"type\": \"quantitative\"} } }, 'line': { \"mark\": \"line\", \"encoding\":", "['group', 'bin', 'sort', 'topk']: filter_part_token.append(each) else: break if 'between' in", "if 'like' == each_conditions[1]: # only consider this case: '%a%'", "\"mark\": \"line\", \"encoding\": { \"x\": {\"field\": \"x\", \"type\": \"nominal\"}, \"y\":", "'': if VegaZero['transform']['bin']['axis'] == 'x': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['type'] = 'temporal' if VegaZero['transform']['bin']['type']", "} }, 'arc': { \"mark\": \"arc\", \"encoding\": { \"color\": {\"field\":", "\"encoding\": { \"x\": {\"field\": \"x\", \"type\": \"quantitative\"}, \"y\": {\"field\": \"y\",", "= [{ \"filter\": VegaZero['transform']['filter'] }] elif 'filter' not in self.VegaLiteSpec[VegaZero['mark']]['transform']:", "- 2] == '%': final_filter_part += 'indexof(' + 'datum.' +", "VegaZero['transform']['sort']['axis'] != '': if VegaZero['transform']['sort']['axis'] == 'x': if VegaZero['transform']['sort']['type'] ==", "elif VegaZero['transform']['bin']['type'] == 'weekday': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = 'week' else: print('Unknown binning", "self.VegaLiteSpec[VegaZero['mark']]['encoding']['color']['field'] = VegaZero['encoding']['x'] self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['field'] = VegaZero['encoding']['y']['y'] if VegaZero['encoding']['y']['aggregate'] != ''", "each == '=': each = '==' each_conditions.append(each) if each ==", "[{ \"filter\": VegaZero['transform']['filter'] }] elif 'filter' not in self.VegaLiteSpec[VegaZero['mark']]['transform']: self.VegaLiteSpec[VegaZero['mark']]['transform'].append({", "[ { \"window\": [{ \"field\": sort_field, \"op\": \"dense_rank\", \"as\": \"rank\"", "= vega_zero_keywords[vega_zero_keywords.index('group') + 1] if 'bin' in vega_zero_keywords: self.parsed_vegaZero['transform']['bin']['axis'] =", "else: print('Unknown sorting field: ', VegaZero['transform']['sort']['axis']) sort_field = VegaZero['transform']['sort']['axis'] if", "'sort' in vega_zero_keywords: self.parsed_vegaZero['transform']['sort']['axis'] = vega_zero_keywords[vega_zero_keywords.index('sort') + 1] self.parsed_vegaZero['transform']['sort']['type'] =", "- 2] + '\") == -1' else: final_filter_part += 'datum.'", "'arc': { \"mark\": \"arc\", \"encoding\": { \"color\": {\"field\": \"x\", \"type\":", "'or' filter_part_token = ' '.join(filter_part_token).split() filter_part_token = ['&' if x", "dict() self.VegaLiteSpec[VegaZero['mark']]['data']['values'] = json.loads(dataframe.to_json(orient='records')) if VegaZero['mark'] != 'arc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['field'] =", "__init__(self): pass def parse_vegaZero(self, vega_zero): self.parsed_vegaZero = { 'mark': '',", "'indexof(' + 'datum.' + each_conditions[0] + ',\"' + \\ each_conditions[2][2:len(each_conditions[2])", "case: '%a%' if each_conditions[2][1] == '%' and each_conditions[2][len(each_conditions[2]) - 2]", "self.VegaLiteSpec[VegaZero['mark']]['transform'] = [{ \"filter\": VegaZero['transform']['filter'] }] elif 'filter' not in", "self.parsed_vegaZero['transform']['bin']['type'] = vega_zero_keywords[vega_zero_keywords.index('bin') + 3] if 'filter' in vega_zero_keywords: filter_part_token", "'transform' in self.VegaLiteSpec[VegaZero['mark']]: current_filter = self.VegaLiteSpec[VegaZero['mark']]['transform'][0]['filter'] self.VegaLiteSpec[VegaZero['mark']]['transform'][0][ 'filter'] = current_filter", "'between' in filter_part_token: filter_part_token[filter_part_token.index('between') + 2] = 'and ' +", "= VegaZero['encoding']['y'][ 'aggregate'] if VegaZero['encoding']['color']['z'] != '': self.VegaLiteSpec[VegaZero['mark']]['encoding']['color'] = {", "'', 'data': '', 'encoding': { 'x': '', 'y': { 'aggregate':", "1] if 'bin' in vega_zero_keywords: self.parsed_vegaZero['transform']['bin']['axis'] = vega_zero_keywords[vega_zero_keywords.index('bin') + 1]", "in filter_part_token: filter_part_token[filter_part_token.index('between') + 2] = 'and ' + filter_part_token[", "elif VegaZero['transform']['sort']['axis'] == 'y': sort_field = VegaZero['encoding']['y']['y'] else: print('Unknown sorting", "self.parsed_vegaZero['encoding']['y']['aggregate'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') + 1] if 'color' in vega_zero_keywords: self.parsed_vegaZero['encoding']['color']['z']", "self.parsed_vegaZero['transform']['sort']['type'] = vega_zero_keywords[vega_zero_keywords.index('sort') + 2] if 'group' in vega_zero_keywords: self.parsed_vegaZero['transform']['group']", "each == '&' or each == '|' or i ==", "} }, 'point': { \"mark\": \"point\", \"encoding\": { \"x\": {\"field\":", "VegaZero['transform']['bin']['axis'] != '': if VegaZero['transform']['bin']['axis'] == 'x': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['type'] = 'temporal'", "}] elif 'filter' not in self.VegaLiteSpec[VegaZero['mark']]['transform']: self.VegaLiteSpec[VegaZero['mark']]['transform'].append({ \"filter\": VegaZero['transform']['filter'] })", "+ 1:]: if each not in ['group', 'bin', 'sort', 'topk']:", "= final_filter_part else: # only single filter condition self.parsed_vegaZero['transform']['filter'] =", "current_filter + ' & ' + \"datum.rank <= \" +", "'desc': sort_order = 'descending' else: sort_order = 'ascending' if 'transform'", "= vega_zero_keywords[vega_zero_keywords.index('sort') + 2] if 'group' in vega_zero_keywords: self.parsed_vegaZero['transform']['group'] =", "self.VegaLiteSpec = { 'bar': { \"mark\": \"bar\", \"encoding\": { \"x\":", "+ ' ' each_conditions = [] self.parsed_vegaZero['transform']['filter'] = final_filter_part else:", "{\"field\": \"x\", \"type\": \"quantitative\"}, \"y\": {\"field\": \"y\", \"type\": \"quantitative\"} }", "in vega_zero_keywords: self.parsed_vegaZero['transform']['bin']['axis'] = vega_zero_keywords[vega_zero_keywords.index('bin') + 1] self.parsed_vegaZero['transform']['bin']['type'] = vega_zero_keywords[vega_zero_keywords.index('bin')", "self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['aggregate'] = VegaZero['encoding']['y'][ 'aggregate'] if VegaZero['encoding']['color']['z'] != '': self.VegaLiteSpec[VegaZero['mark']]['encoding']['color'] =", "'like' == each_conditions[2] and 'not' == each_conditions[1]: if each_conditions[3][1] ==", "VegaZero['transform']['sort']['axis'] == 'y': sort_field = VegaZero['encoding']['y']['y'] else: print('Unknown sorting field:", "self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = 'x' else: if VegaZero['transform']['sort']['type'] == 'desc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort'] =", "== each_conditions[1]: # only consider this case: '%a%' if each_conditions[2][1]", "1] if 'topk' in vega_zero_keywords: self.parsed_vegaZero['transform']['topk'] = vega_zero_keywords[vega_zero_keywords.index('topk') + 1]", "== '%' and each_conditions[3][len(each_conditions[3]) - 2] == '%': final_filter_part +=", "2] self.parsed_vegaZero['encoding']['y']['aggregate'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') + 1] if 'color' in vega_zero_keywords:", "in filter_part_token or '|' in filter_part_token: final_filter_part = '' each_conditions", "in ['date', 'year', 'week', 'month']: self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = VegaZero['transform']['bin']['type'] elif VegaZero['transform']['bin']['type']", "== 'x': sort_field = VegaZero['encoding']['x'] elif VegaZero['transform']['sort']['axis'] == 'y': sort_field", "'color' in vega_zero_keywords: self.parsed_vegaZero['encoding']['color']['z'] = vega_zero_keywords[vega_zero_keywords.index('color') + 1] if 'topk'", "1] self.parsed_vegaZero['encoding']['x'] = vega_zero_keywords[vega_zero_keywords.index('x') + 1] self.parsed_vegaZero['encoding']['y']['y'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') +", "\"nominal\"}, \"y\": {\"field\": \"y\", \"type\": \"quantitative\"} } }, 'arc': {", "= VegaZero['transform']['bin']['type'] elif VegaZero['transform']['bin']['type'] == 'weekday': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = 'week' else:", "\"y\": {\"field\": \"y\", \"type\": \"quantitative\"} } }, 'point': { \"mark\":", "and each_conditions[3][len(each_conditions[3]) - 2] == '%': final_filter_part += 'indexof(' +", "in self.VegaLiteSpec[VegaZero['mark']]['transform']: self.VegaLiteSpec[VegaZero['mark']]['transform'].append({ \"filter\": VegaZero['transform']['filter'] }) else: self.VegaLiteSpec[VegaZero['mark']]['transform']['filter'] += '", "for x in filter_part_token] filter_part_token = ['|' if x ==", "if x == 'and' else x for x in filter_part_token]", "in vega_zero_keywords: self.parsed_vegaZero['transform']['group'] = vega_zero_keywords[vega_zero_keywords.index('group') + 1] if 'bin' in", "\"point\", \"encoding\": { \"x\": {\"field\": \"x\", \"type\": \"quantitative\"}, \"y\": {\"field\":", "\"encoding\": { \"x\": {\"field\": \"x\", \"type\": \"nominal\"}, \"y\": {\"field\": \"y\",", "self.VegaLiteSpec[VegaZero['mark']]['transform'] = [ { \"window\": [{ \"field\": sort_field, \"op\": \"dense_rank\",", "\"mark\": \"arc\", \"encoding\": { \"color\": {\"field\": \"x\", \"type\": \"nominal\"}, \"theta\":", "self.VegaLiteSpec[VegaZero['mark']]['transform']: self.VegaLiteSpec[VegaZero['mark']]['transform'].append({ \"filter\": VegaZero['transform']['filter'] }) else: self.VegaLiteSpec[VegaZero['mark']]['transform']['filter'] += ' &", "filter_part_token[i] if each != '&' and each != '|': #", "{ \"mark\": \"line\", \"encoding\": { \"x\": {\"field\": \"x\", \"type\": \"nominal\"},", "== '&' or each == '|' or i == len(filter_part_token)", "= ['&' if x == 'and' else x for x", "= vega_zero_keywords[vega_zero_keywords.index('aggregate') + 2] self.parsed_vegaZero['encoding']['y']['aggregate'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') + 1] if", "'y': sort_field = VegaZero['encoding']['y']['y'] else: print('Unknown sorting field: ', VegaZero['transform']['sort']['axis'])", "!= 'none': self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['aggregate'] = VegaZero['encoding']['y']['aggregate'] else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['color']['field'] = VegaZero['encoding']['x'] self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['field']", "each_conditions[0] + ',\"' + \\ each_conditions[2][2:len(each_conditions[2]) - 2] + '\")", "'filter' in vega_zero_keywords: filter_part_token = [] for each in vega_zero_keywords[vega_zero_keywords.index('filter')", "== 'or' else x for x in filter_part_token] if '&'", "'%a%' if each_conditions[2][1] == '%' and each_conditions[2][len(each_conditions[2]) - 2] ==", "+ \\ each_conditions[2][2:len(each_conditions[2]) - 2] + '\") != -1' elif", "== 'x': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['type'] = 'temporal' if VegaZero['transform']['bin']['type'] in ['date', 'year',", "self.parsed_vegaZero['transform']['group'] = vega_zero_keywords[vega_zero_keywords.index('group') + 1] if 'bin' in vega_zero_keywords: self.parsed_vegaZero['transform']['bin']['axis']", "\"datum.rank <= \" + str(VegaZero['transform']['topk']) } ] if VegaZero['transform']['sort']['axis'] !=", "-- 'or' filter_part_token = ' '.join(filter_part_token).split() filter_part_token = ['&' if", "x for x in filter_part_token] if '&' in filter_part_token or", "and each != '|': # ’=‘ in SQL --to--> ’==‘", "self.parsed_vegaZero['encoding']['x'] = vega_zero_keywords[vega_zero_keywords.index('x') + 1] self.parsed_vegaZero['encoding']['y']['y'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') + 2]", "+ 1] if 'bin' in vega_zero_keywords: self.parsed_vegaZero['transform']['bin']['axis'] = vega_zero_keywords[vega_zero_keywords.index('bin') +", "}) else: self.VegaLiteSpec[VegaZero['mark']]['transform']['filter'] += ' & ' + VegaZero['transform']['filter'] if", "'&' and each != '|': # ’=‘ in SQL --to-->", "2] == '%': final_filter_part += 'indexof(' + 'datum.' + each_conditions[0]", "\"type\": \"nominal\"}, \"theta\": {\"field\": \"y\", \"type\": \"quantitative\"} } }, 'line':", "= 'ascending' if 'transform' in self.VegaLiteSpec[VegaZero['mark']]: current_filter = self.VegaLiteSpec[VegaZero['mark']]['transform'][0]['filter'] self.VegaLiteSpec[VegaZero['mark']]['transform'][0][", "'weekday': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = 'week' else: print('Unknown binning step.') if VegaZero['transform']['filter']", "VegaZero['transform']['sort']['axis']) sort_field = VegaZero['transform']['sort']['axis'] if VegaZero['transform']['sort']['type'] == 'desc': sort_order =", "self.VegaLiteSpec[VegaZero['mark']]: self.VegaLiteSpec[VegaZero['mark']]['transform'] = [{ \"filter\": VegaZero['transform']['filter'] }] elif 'filter' not", "\\ each_conditions[2][2:len(each_conditions[2]) - 2] + '\") != -1' elif 'like'", "' & ' + \"datum.rank <= \" + str(VegaZero['transform']['topk']) self.VegaLiteSpec[VegaZero['mark']]['transform'].insert(0,", "<=' filter_part_token[filter_part_token.index('between')] = '>=' # replace 'and' -- 'or' filter_part_token", "1] if 'sort' in vega_zero_keywords: self.parsed_vegaZero['transform']['sort']['axis'] = vega_zero_keywords[vega_zero_keywords.index('sort') + 1]", "for each in vega_zero_keywords[vega_zero_keywords.index('filter') + 1:]: if each not in", "}, { \"filter\": \"datum.rank <= \" + str(VegaZero['transform']['topk']) } ]", "1: # each = '&' or '|' if 'like' ==", "x for x in filter_part_token] filter_part_token = ['|' if x", "break if 'between' in filter_part_token: filter_part_token[filter_part_token.index('between') + 2] = 'and", "[] for i in range(len(filter_part_token)): each = filter_part_token[i] if each", "in vega_zero_keywords: filter_part_token = [] for each in vega_zero_keywords[vega_zero_keywords.index('filter') +", "{\"field\": \"y\", \"type\": \"quantitative\"} } }, 'point': { \"mark\": \"point\",", "= '>=' # replace 'and' -- 'or' filter_part_token = '", "x == 'and' else x for x in filter_part_token] filter_part_token", "+= 'indexof(' + 'datum.' + each_conditions[0] + ',\"' + \\", "keywords to the VegaLiteSpec object if isinstance(dataframe, pandas.core.frame.DataFrame): self.VegaLiteSpec[VegaZero['mark']]['data'] =", "\"color\": {\"field\": \"x\", \"type\": \"nominal\"}, \"theta\": {\"field\": \"y\", \"type\": \"quantitative\"}", "+ each_conditions[0] + ',\"' + \\ each_conditions[3][2:len(each_conditions[3]) - 2] +", "= { 'field': VegaZero['encoding']['color']['z'], 'type': 'nominal' } # it seems", "['&' if x == 'and' else x for x in", "\" + str(VegaZero['transform']['topk']) } ] if VegaZero['transform']['sort']['axis'] != '': if", "'datum.' + each_conditions[0] + ',\"' + \\ each_conditions[3][2:len(each_conditions[3]) - 2]", "self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['field'] = VegaZero['encoding']['y']['y'] if VegaZero['encoding']['y']['aggregate'] != '' and VegaZero['encoding']['y']['aggregate'] !=", "len(filter_part_token) - 1: final_filter_part += ' ' + each +", "}, 'line': { \"mark\": \"line\", \"encoding\": { \"x\": {\"field\": \"x\",", "else: if VegaZero['transform']['sort']['type'] == 'desc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort'] = '-y' else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort']", "VegaZero['encoding']['y']['y'] else: print('Unknown sorting field: ', VegaZero['transform']['sort']['axis']) sort_field = VegaZero['transform']['sort']['axis']", "'y': { 'aggregate': '', 'y': '' }, 'color': { 'z':", "each_conditions[3][2:len(each_conditions[3]) - 2] + '\") == -1' else: final_filter_part +=", "1] self.parsed_vegaZero['data'] = vega_zero_keywords[vega_zero_keywords.index('data') + 1] self.parsed_vegaZero['encoding']['x'] = vega_zero_keywords[vega_zero_keywords.index('x') +", "\"quantitative\"} } }, 'point': { \"mark\": \"point\", \"encoding\": { \"x\":", "or i == len(filter_part_token) - 1: # each = '&'", "!= '|': # ’=‘ in SQL --to--> ’==‘ in Vega-Lite", "+ 1] if 'topk' in vega_zero_keywords: self.parsed_vegaZero['transform']['topk'] = vega_zero_keywords[vega_zero_keywords.index('topk') +", "' + \"datum.rank <= \" + str(VegaZero['transform']['topk']) self.VegaLiteSpec[VegaZero['mark']]['transform'].insert(0, { \"window\":", "pandas class VegaZero2VegaLite(object): def __init__(self): pass def parse_vegaZero(self, vega_zero): self.parsed_vegaZero", "'and' -- 'or' filter_part_token = ' '.join(filter_part_token).split() filter_part_token = ['&'", "'not' == each_conditions[1]: if each_conditions[3][1] == '%' and each_conditions[3][len(each_conditions[3]) -", "vega_zero_keywords[vega_zero_keywords.index('sort') + 2] if 'group' in vega_zero_keywords: self.parsed_vegaZero['transform']['group'] = vega_zero_keywords[vega_zero_keywords.index('group')", "self.VegaLiteSpec[VegaZero['mark']]['transform'].insert(0, { \"window\": [{ \"field\": sort_field, \"op\": \"dense_rank\", \"as\": \"rank\"", "if 'group' in vega_zero_keywords: self.parsed_vegaZero['transform']['group'] = vega_zero_keywords[vega_zero_keywords.index('group') + 1] if", "filter_part_token.append(each) else: break if 'between' in filter_part_token: filter_part_token[filter_part_token.index('between') + 2]", "'point': { \"mark\": \"point\", \"encoding\": { \"x\": {\"field\": \"x\", \"type\":", "filter_part_token] filter_part_token = ['|' if x == 'or' else x", "not in self.VegaLiteSpec[VegaZero['mark']]['transform']: self.VegaLiteSpec[VegaZero['mark']]['transform'].append({ \"filter\": VegaZero['transform']['filter'] }) else: self.VegaLiteSpec[VegaZero['mark']]['transform']['filter'] +=", "x == 'or' else x for x in filter_part_token] if", "self.VegaLiteSpec[VegaZero['mark']]['transform'].append({ \"filter\": VegaZero['transform']['filter'] }) else: self.VegaLiteSpec[VegaZero['mark']]['transform']['filter'] += ' & '", "self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = VegaZero['transform']['bin']['type'] elif VegaZero['transform']['bin']['type'] == 'weekday': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = 'week'", "VegaZero['transform']['sort']['axis'] == 'x': sort_field = VegaZero['encoding']['x'] elif VegaZero['transform']['sort']['axis'] == 'y':", "',\"' + \\ each_conditions[3][2:len(each_conditions[3]) - 2] + '\") == -1'", "+ ' '.join(filter_part_token).strip() return self.parsed_vegaZero def to_VegaLite(self, vega_zero, dataframe=None): self.VegaLiteSpec", "return self.parsed_vegaZero def to_VegaLite(self, vega_zero, dataframe=None): self.VegaLiteSpec = { 'bar':", "vega_zero_keywords[vega_zero_keywords.index('group') + 1] if 'bin' in vega_zero_keywords: self.parsed_vegaZero['transform']['bin']['axis'] = vega_zero_keywords[vega_zero_keywords.index('bin')", "import pandas class VegaZero2VegaLite(object): def __init__(self): pass def parse_vegaZero(self, vega_zero):", "object if isinstance(dataframe, pandas.core.frame.DataFrame): self.VegaLiteSpec[VegaZero['mark']]['data'] = dict() self.VegaLiteSpec[VegaZero['mark']]['data']['values'] = json.loads(dataframe.to_json(orient='records'))", "if each_conditions[3][1] == '%' and each_conditions[3][len(each_conditions[3]) - 2] == '%':", "1:]: if each not in ['group', 'bin', 'sort', 'topk']: filter_part_token.append(each)", "if VegaZero['transform']['topk'] != '': if VegaZero['transform']['sort']['axis'] == 'x': sort_field =", "}) else: self.VegaLiteSpec[VegaZero['mark']]['transform'] = [ { \"window\": [{ \"field\": sort_field,", "}, 'point': { \"mark\": \"point\", \"encoding\": { \"x\": {\"field\": \"x\",", "VegaZero['transform']['topk'] != '': if VegaZero['transform']['sort']['axis'] == 'x': sort_field = VegaZero['encoding']['x']", "print('Unknown binning step.') if VegaZero['transform']['filter'] != '': if 'transform' not", "our cases. if VegaZero['transform']['group'] != '': pass if VegaZero['transform']['bin']['axis'] !=", "\"x\", \"type\": \"quantitative\"}, \"y\": {\"field\": \"y\", \"type\": \"quantitative\"} } }", "'': self.VegaLiteSpec[VegaZero['mark']]['encoding']['color'] = { 'field': VegaZero['encoding']['color']['z'], 'type': 'nominal' } #", "vega_zero_keywords[vega_zero_keywords.index('aggregate') + 1] if 'color' in vega_zero_keywords: self.parsed_vegaZero['encoding']['color']['z'] = vega_zero_keywords[vega_zero_keywords.index('color')", "'x': '', 'y': { 'aggregate': '', 'y': '' }, 'color':", "else x for x in filter_part_token] if '&' in filter_part_token", "\"quantitative\"} } }, 'arc': { \"mark\": \"arc\", \"encoding\": { \"color\":", "VegaZero['transform']['bin']['type'] in ['date', 'year', 'week', 'month']: self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = VegaZero['transform']['bin']['type'] elif", "+ '\") == -1' else: final_filter_part += 'datum.' + '", "= self.VegaLiteSpec[VegaZero['mark']]['transform'][0]['filter'] self.VegaLiteSpec[VegaZero['mark']]['transform'][0][ 'filter'] = current_filter + ' & '", "= vega_zero_keywords[vega_zero_keywords.index('topk') + 1] if 'sort' in vega_zero_keywords: self.parsed_vegaZero['transform']['sort']['axis'] =", "that the group will be performed by VegaLite defaultly, in", "+ 1] self.parsed_vegaZero['encoding']['y']['y'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') + 2] self.parsed_vegaZero['encoding']['y']['aggregate'] = vega_zero_keywords[vega_zero_keywords.index('aggregate')", "= 'temporal' if VegaZero['transform']['bin']['type'] in ['date', 'year', 'week', 'month']: self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit']", "else: final_filter_part += 'datum.' + ' '.join(each_conditions) if i !=", "if VegaZero['transform']['bin']['type'] in ['date', 'year', 'week', 'month']: self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = VegaZero['transform']['bin']['type']", "'group' in vega_zero_keywords: self.parsed_vegaZero['transform']['group'] = vega_zero_keywords[vega_zero_keywords.index('group') + 1] if 'bin'", "VegaZero['transform']['bin']['type'] elif VegaZero['transform']['bin']['type'] == 'weekday': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = 'week' else: print('Unknown", "elif 'like' == each_conditions[2] and 'not' == each_conditions[1]: if each_conditions[3][1]", "each = '&' or '|' if 'like' == each_conditions[1]: #", "} } vega_zero_keywords = vega_zero.split(' ') self.parsed_vegaZero['mark'] = vega_zero_keywords[vega_zero_keywords.index('mark') +", "{ 'filter': '', 'group': '', 'bin': { 'axis': '', 'type':", "'bin': { 'axis': '', 'type': '' }, 'sort': { 'axis':", "& ' + \"datum.rank <= \" + str(VegaZero['transform']['topk']) self.VegaLiteSpec[VegaZero['mark']]['transform'].insert(0, {", "\"theta\": {\"field\": \"y\", \"type\": \"quantitative\"} } }, 'line': { \"mark\":", "self.parsed_vegaZero['encoding']['y']['y'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') + 2] self.parsed_vegaZero['encoding']['y']['aggregate'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') + 1]", "\"<NAME>\" import json import pandas class VegaZero2VegaLite(object): def __init__(self): pass", "\"filter\": VegaZero['transform']['filter'] }) else: self.VegaLiteSpec[VegaZero['mark']]['transform']['filter'] += ' & ' +", "= [] for i in range(len(filter_part_token)): each = filter_part_token[i] if", "\"type\": \"nominal\"}, \"y\": {\"field\": \"y\", \"type\": \"quantitative\"} } }, 'point':", "} } VegaZero = self.parse_vegaZero(vega_zero) # assign some vega-zero keywords", "!= '' and VegaZero['encoding']['y']['aggregate'] != 'none': self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['aggregate'] = VegaZero['encoding']['y'][ 'aggregate']", "self.parsed_vegaZero['transform']['topk'] = vega_zero_keywords[vega_zero_keywords.index('topk') + 1] if 'sort' in vega_zero_keywords: self.parsed_vegaZero['transform']['sort']['axis']", "'|' in filter_part_token: final_filter_part = '' each_conditions = [] for", "self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['aggregate'] = VegaZero['encoding']['y']['aggregate'] else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['color']['field'] = VegaZero['encoding']['x'] self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['field'] = VegaZero['encoding']['y']['y']", "in vega_zero_keywords: self.parsed_vegaZero['transform']['sort']['axis'] = vega_zero_keywords[vega_zero_keywords.index('sort') + 1] self.parsed_vegaZero['transform']['sort']['type'] = vega_zero_keywords[vega_zero_keywords.index('sort')", "'|' or i == len(filter_part_token) - 1: # each =", "+ 2] self.parsed_vegaZero['encoding']['y']['aggregate'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') + 1] if 'color' in", "if VegaZero['transform']['bin']['axis'] != '': if VegaZero['transform']['bin']['axis'] == 'x': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['type'] =", "'&' in filter_part_token or '|' in filter_part_token: final_filter_part = ''", "!= 'arc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['field'] = VegaZero['encoding']['x'] self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['field'] = VegaZero['encoding']['y']['y'] if VegaZero['encoding']['y']['aggregate']", "== '%' and each_conditions[2][len(each_conditions[2]) - 2] == '%': final_filter_part +=", "VegaZero['encoding']['x'] self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['field'] = VegaZero['encoding']['y']['y'] if VegaZero['encoding']['y']['aggregate'] != '' and VegaZero['encoding']['y']['aggregate']", "'': pass if VegaZero['transform']['bin']['axis'] != '': if VegaZero['transform']['bin']['axis'] == 'x':", "len(filter_part_token) - 1: # each = '&' or '|' if", "== 'weekday': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = 'week' else: print('Unknown binning step.') if", "\"type\": \"quantitative\"} } }, 'point': { \"mark\": \"point\", \"encoding\": {", "1] if 'color' in vega_zero_keywords: self.parsed_vegaZero['encoding']['color']['z'] = vega_zero_keywords[vega_zero_keywords.index('color') + 1]", "'desc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort'] = '-y' else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort'] = 'y' return self.VegaLiteSpec[VegaZero['mark']]", "= VegaZero['encoding']['x'] self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['field'] = VegaZero['encoding']['y']['y'] if VegaZero['encoding']['y']['aggregate'] != '' and", "if VegaZero['transform']['bin']['axis'] == 'x': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['type'] = 'temporal' if VegaZero['transform']['bin']['type'] in", "= self.parse_vegaZero(vega_zero) # assign some vega-zero keywords to the VegaLiteSpec", "= '' each_conditions = [] for i in range(len(filter_part_token)): each", "\"nominal\"}, \"y\": {\"field\": \"y\", \"type\": \"quantitative\"} } }, 'point': {", "vega_zero_keywords[vega_zero_keywords.index('bin') + 1] self.parsed_vegaZero['transform']['bin']['type'] = vega_zero_keywords[vega_zero_keywords.index('bin') + 3] if 'filter'", "'data': '', 'encoding': { 'x': '', 'y': { 'aggregate': '',", "== 'x': if VegaZero['transform']['sort']['type'] == 'desc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = '-x' else:", "filter_part_token] if '&' in filter_part_token or '|' in filter_part_token: final_filter_part", "field: ', VegaZero['transform']['sort']['axis']) sort_field = VegaZero['transform']['sort']['axis'] if VegaZero['transform']['sort']['type'] == 'desc':", "in vega_zero_keywords: self.parsed_vegaZero['encoding']['color']['z'] = vega_zero_keywords[vega_zero_keywords.index('color') + 1] if 'topk' in", "class VegaZero2VegaLite(object): def __init__(self): pass def parse_vegaZero(self, vega_zero): self.parsed_vegaZero =", "self.parsed_vegaZero['transform']['filter'] = 'datum.' + ' '.join(filter_part_token).strip() return self.parsed_vegaZero def to_VegaLite(self,", "if 'topk' in vega_zero_keywords: self.parsed_vegaZero['transform']['topk'] = vega_zero_keywords[vega_zero_keywords.index('topk') + 1] if", "+ 1] self.parsed_vegaZero['data'] = vega_zero_keywords[vega_zero_keywords.index('data') + 1] self.parsed_vegaZero['encoding']['x'] = vega_zero_keywords[vega_zero_keywords.index('x')", "{ 'mark': '', 'data': '', 'encoding': { 'x': '', 'y':", "= vega_zero_keywords[vega_zero_keywords.index('mark') + 1] self.parsed_vegaZero['data'] = vega_zero_keywords[vega_zero_keywords.index('data') + 1] self.parsed_vegaZero['encoding']['x']", "each_conditions[3][1] == '%' and each_conditions[3][len(each_conditions[3]) - 2] == '%': final_filter_part", "+ str(VegaZero['transform']['topk']) } ] if VegaZero['transform']['sort']['axis'] != '': if VegaZero['transform']['sort']['axis']", "= ' '.join(filter_part_token).split() filter_part_token = ['&' if x == 'and'", "== 'desc': self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = '-x' else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = 'x' else:", "'week', 'month']: self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = VegaZero['transform']['bin']['type'] elif VegaZero['transform']['bin']['type'] == 'weekday': self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit']", "VegaZero['encoding']['y']['aggregate'] else: self.VegaLiteSpec[VegaZero['mark']]['encoding']['color']['field'] = VegaZero['encoding']['x'] self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['field'] = VegaZero['encoding']['y']['y'] if VegaZero['encoding']['y']['aggregate']" ]
[ "self.steps = steps self.stepi = 0 def on_start(self) -> None:", "self.done() return self.stepi = 1 self.send_step() self.on_update() def on_stop(self) ->", "all specific dance steps.\"\"\" def update(self, acn: AC) -> None:", "skip processing and continue. \"\"\" name_to_id: Dict[str, int] = {}", "AC, block: ac.Block) -> None: assert isinstance(acn, DanceAC) if self.block", "self.stepi in self.steps.keys(): if self.running(): description = self.steps[self.stepi].disp_str() self.statestr =", "= f'Aktuální krok: {self.stepi}: {description}' self.statestr_send() def on_block_change(self, block: ac.Block)", "-> None: pass def on_start(self, acn: AC) -> None: pass", "name: str, acn: AC) -> int: if not StepJC.name_to_id: jcs", "= { jc['name']: jc['id'] for jc in jcs if jc['type']", "self.statestr_send() def on_update(self) -> None: AC.on_update(self) if not self.running(): return", "on_block_change(self, acn: AC, block: ac.Block) -> None: assert isinstance(acn, DanceAC)", "= None acn.step_done() def disp_str(self) -> str: return f'Čekání {self.delay}'", "-> None: logging.info('Start') for stepi, step in self.steps.items(): try: step.on_start(self)", "None: self.get_jc_id(self.name, acn) def get_jc_id(self, name: str, acn: AC) ->", "None: logging.info(f'Step {self.stepi} done, ' f'going to step {self.stepi+1}...') self.stepi", "self.statestr = f'Aktuální krok: {self.stepi}: {description}' self.statestr_send() def on_block_change(self, block:", "self.steps.items(): try: step.on_start(self) except DanceStartException as e: self.disp_error(f'Krok {stepi}: '+str(e))", "already, skip processing and continue. \"\"\" name_to_id: Dict[str, int] =", "{} def __init__(self, name: str, checker: Callable[[ac.Block], bool]) -> None:", "result['success']: self.jc = None acn.step_done() def on_start(self, acn: AC) ->", "None: self.finish = datetime.datetime.now() + self.delay if datetime.datetime.now() > self.finish:", "if self.checker(self.block): self.block = None acn.step_done() else: ac.blocks.register([self.block['id']]) def on_start(self,", "update(self, acn: AC) -> None: pass def on_start(self, acn: AC)", "\"\"\" Process jc 'name'. If processed already, skip processing and", "\"\"\"Base class for all specific dance steps.\"\"\" def update(self, acn:", "examples below.\"\"\" name_to_id: Dict[str, int] = {} def __init__(self, name:", "blockid = self.get_block_id(self.name, acn) self.block = acn.pt_get(f'/blocks/{blockid}?state=true')['block'] if self.checker(self.block): self.block", "return self.stepi = 1 self.send_step() self.on_update() def on_stop(self) -> None:", "__init__(self, id_: str, password: str, steps: Dict[int, Step]) -> None:", "return f'Čekání {self.delay}' class BlockNotFoundException(DanceStartException): pass class StepWaitForBlock(Step): \"\"\"Wait for", "self.block = acn.pt_get(f'/blocks/{blockid}?state=true')['block'] if self.checker(self.block): self.block = None acn.step_done() else:", "if self.block is None or block['id'] != self.block['id']: return if", "in self.steps.keys(): if self.running(): description = self.steps[self.stepi].disp_str() self.statestr = f'Aktuální", "DanceStartException as e: self.disp_error(f'Krok {stepi}: '+str(e)) self.done() return self.stepi =", "if not self.running(): return if self.stepi in self.steps: self.steps[self.stepi].update(self) else:", "continue. \"\"\" name_to_id: Dict[str, int] = {} def __init__(self, name:", "password) self.steps = steps self.stepi = 0 def on_start(self) ->", "executing user-defined dance.\"\"\" import logging from typing import Any, Dict,", "acn.step_done() return result = acn.pt_put(f'/jc/{self.jc[\"id\"]}/state', {}) if result['success']: self.jc =", "class StepWaitForBlock(Step): \"\"\"Wait for specific state of any block. See", "def on_start(self, acn: AC) -> None: self.get_jc_id(self.name, acn) def get_jc_id(self,", "self.statestr_send() def on_block_change(self, block: ac.Block) -> None: if (self.running() and", "on_start(self, acn: AC) -> None: pass def disp_str(self) -> str:", "= self.get_block_id(self.name, acn) self.block = acn.pt_get(f'/blocks/{blockid}?state=true')['block'] if self.checker(self.block): self.block =", "{self.name} neexistuje!') return StepJC.name_to_id[name] def disp_str(self) -> str: return f'Stavění", "as e: self.disp_error(f'Krok {stepi}: '+str(e)) self.done() return self.stepi = 1", "return StepJC.name_to_id[name] def disp_str(self) -> str: return f'Stavění JC {self.name}'", "not in StepWaitForBlock.name_to_id.keys(): raise BlockNotFoundException(f\"Blok {self.name} neexistuje!\") return StepWaitForBlock.name_to_id[name] def", "acn.pt_put(f'/jc/{self.jc[\"id\"]}/state', {}) if result['success']: self.jc = None acn.step_done() def on_start(self,", "block['name']: block['id'] for block in blocks } if name not", "-> None: AC.__init__(self, id_, password) self.steps = steps self.stepi =", "= 1 self.send_step() self.on_update() def on_stop(self) -> None: self.statestr =", "self.finish = datetime.datetime.now() + self.delay if datetime.datetime.now() > self.finish: self.finish", "self.checker(block): ac.blocks.unregister([self.block['id']]) self.block = None acn.step_done() def get_block_id(self, name: str,", "predefined steps.\"\"\" def __init__(self, id_: str, password: str, steps: Dict[int,", "type_ self.name = name def update(self, acn: AC) -> None:", "isinstance(acn, DanceAC) if self.jc is None: jcid = self.get_jc_id(self.name, acn)", "pass class Step: \"\"\"Base class for all specific dance steps.\"\"\"", "If processed already, skip processing and continue. \"\"\" name_to_id: Dict[str,", "block['id'] != self.block['id']: return if self.checker(block): ac.blocks.unregister([self.block['id']]) self.block = None", "steps self.stepi = 0 def on_start(self) -> None: logging.info('Start') for", "self.get_block_id(self.name, acn) self.block = acn.pt_get(f'/blocks/{blockid}?state=true')['block'] if self.checker(self.block): self.block = None", "self.running(): description = self.steps[self.stepi].disp_str() self.statestr = f'Aktuální krok: {self.stepi}: {description}'", "= None acn.step_done() def get_block_id(self, name: str, acn: AC) ->", "= steps self.stepi = 0 def on_start(self) -> None: logging.info('Start')", "logging from typing import Any, Dict, Optional, Callable import datetime", "def __init__(self, id_: str, password: str, steps: Dict[int, Step]) ->", "self.get_jc_id(self.name, acn) def get_jc_id(self, name: str, acn: AC) -> int:", "DanceAC) if self.finish is None: self.finish = datetime.datetime.now() + self.delay", "name self.checker = checker self.block: Optional[ac.Block] = None def update(self,", "pass class StepWaitForBlock(Step): \"\"\"Wait for specific state of any block.", "def disp_str(self) -> str: return f'Čekání {self.delay}' class BlockNotFoundException(DanceStartException): pass", "acn.step_done() def disp_str(self) -> str: return f'Čekání {self.delay}' class BlockNotFoundException(DanceStartException):", "jc 'name'. If processed already, skip processing and continue. \"\"\"", "bloku {self.name}' def track_is_occupied(block: ac.Block) -> bool: return bool(block['blockState']['state'] ==", "any time.\"\"\" def __init__(self, delay: datetime.timedelta) -> None: self.delay =", "is None: blockid = self.get_block_id(self.name, acn) self.block = acn.pt_get(f'/blocks/{blockid}?state=true')['block'] if", "def disp_str(self) -> str: return f'Stavění JC {self.name}' class StepDelay(Step):", "StepWaitForBlock.name_to_id.keys(): raise BlockNotFoundException(f\"Blok {self.name} neexistuje!\") return StepWaitForBlock.name_to_id[name] def disp_str(self) ->", "if name not in StepWaitForBlock.name_to_id.keys(): raise BlockNotFoundException(f\"Blok {self.name} neexistuje!\") return", "DanceAC(AC): \"\"\"This AC executes predefined steps.\"\"\" def __init__(self, id_: str,", "name_to_id: Dict[str, int] = {} def __init__(self, name: str, checker:", "AC) -> None: self.get_block_id(self.name, acn) def on_block_change(self, acn: AC, block:", "\"\"\"This AC executes predefined steps.\"\"\" def __init__(self, id_: str, password:", "self.type = type_ self.name = name def update(self, acn: AC)", "else: ac.blocks.register([self.block['id']]) def on_start(self, acn: AC) -> None: self.get_block_id(self.name, acn)", "None acn.step_done() def disp_str(self) -> str: return f'Čekání {self.delay}' class", "JC {self.name}' class StepDelay(Step): \"\"\"Delay any time.\"\"\" def __init__(self, delay:", "= Dict[str, Any] class DanceStartException(Exception): pass class Step: \"\"\"Base class", "acn: AC, block: ac.Block) -> None: assert isinstance(acn, DanceAC) if", "ac import ac.blocks from ac import ACs, AC JC =", "isinstance(acn, DanceAC) if self.block is None or block['id'] != self.block['id']:", "name: str, acn: AC) -> int: if not StepWaitForBlock.name_to_id: blocks", "block['id'] for block in blocks } if name not in", "acn: AC) -> None: pass def disp_str(self) -> str: return", "isinstance(acn, DanceAC) if self.finish is None: self.finish = datetime.datetime.now() +", "block. See examples below.\"\"\" name_to_id: Dict[str, int] = {} def", "in blocks } if name not in StepWaitForBlock.name_to_id.keys(): raise BlockNotFoundException(f\"Blok", "} if name not in StepWaitForBlock.name_to_id.keys(): raise BlockNotFoundException(f\"Blok {self.name} neexistuje!\")", "block) # type: ignore @ac.blocks.on_block_change() def _on_block_change(block: ac.Block) -> None:", "jc in jcs if jc['type'] == self.type } if name", "str: return f'Stavění JC {self.name}' class StepDelay(Step): \"\"\"Delay any time.\"\"\"", "time.\"\"\" def __init__(self, delay: datetime.timedelta) -> None: self.delay = delay", "to step {self.stepi+1}...') self.stepi += 1 self.send_step() self.on_update() def send_step(self)", "= self.steps[self.stepi].disp_str() self.statestr = f'Aktuální krok: {self.stepi}: {description}' self.statestr_send() def", "AC) -> None: self.get_jc_id(self.name, acn) def get_jc_id(self, name: str, acn:", "-> None: self.delay = delay self.finish: Optional[datetime.datetime] = None def", "StepWaitForBlock.name_to_id: blocks = acn.pt_get('/blocks')['blocks'] StepWaitForBlock.name_to_id = { block['name']: block['id'] for", "def disp_str(self) -> str: return f'Čekání na stav bloku {self.name}'", "acn.pt_get(f'/blocks/{blockid}?state=true')['block'] if self.checker(self.block): self.block = None acn.step_done() else: ac.blocks.register([self.block['id']]) def", "in StepJC.name_to_id.keys(): raise JCNotFoundException(f'Jízdní cesta {self.name} neexistuje!') return StepJC.name_to_id[name] def", "specific state of any block. See examples below.\"\"\" name_to_id: Dict[str,", "class JCNotFoundException(DanceStartException): pass class StepJC(Step): \"\"\" Process jc 'name'. If", "import datetime import ac import ac.blocks from ac import ACs,", "StepWaitForBlock.name_to_id = { block['name']: block['id'] for block in blocks }", "Dict[int, Step]) -> None: AC.__init__(self, id_, password) self.steps = steps", "None: self.statestr = '' self.statestr_send() def on_update(self) -> None: AC.on_update(self)", "update(self, acn: AC) -> None: assert isinstance(acn, DanceAC) if self.finish", "= '' self.statestr_send() def on_update(self) -> None: AC.on_update(self) if not", "ACs, AC JC = Dict[str, Any] class DanceStartException(Exception): pass class", "def step_done(self) -> None: logging.info(f'Step {self.stepi} done, ' f'going to", "-> None: self.jc: Optional[JC] = None self.type = type_ self.name", "self.finish: Optional[datetime.datetime] = None def update(self, acn: AC) -> None:", "__init__(self, delay: datetime.timedelta) -> None: self.delay = delay self.finish: Optional[datetime.datetime]", "None: pass def on_start(self, acn: AC) -> None: pass def", "disp_str(self) -> str: return f'Čekání {self.delay}' class BlockNotFoundException(DanceStartException): pass class", "Process jc 'name'. If processed already, skip processing and continue.", "BlockNotFoundException(f\"Blok {self.name} neexistuje!\") return StepWaitForBlock.name_to_id[name] def disp_str(self) -> str: return", "StepJC.name_to_id = { jc['name']: jc['id'] for jc in jcs if", "get_jc_id(self, name: str, acn: AC) -> int: if not StepJC.name_to_id:", "{self.name}' def track_is_occupied(block: ac.Block) -> bool: return bool(block['blockState']['state'] == 'occupied')", "self.steps.keys(): if self.running(): description = self.steps[self.stepi].disp_str() self.statestr = f'Aktuální krok:", "ac import ACs, AC JC = Dict[str, Any] class DanceStartException(Exception):", "if self.running(): description = self.steps[self.stepi].disp_str() self.statestr = f'Aktuální krok: {self.stepi}:", "-> str: return f'Stavění JC {self.name}' class StepDelay(Step): \"\"\"Delay any", "bool]) -> None: self.name = name self.checker = checker self.block:", "type_: str = 'VC') -> None: self.jc: Optional[JC] = None", "BlockNotFoundException(DanceStartException): pass class StepWaitForBlock(Step): \"\"\"Wait for specific state of any", "AC) -> int: if not StepWaitForBlock.name_to_id: blocks = acn.pt_get('/blocks')['blocks'] StepWaitForBlock.name_to_id", "@ac.blocks.on_block_change() def _on_block_change(block: ac.Block) -> None: for acn in ACs.values():", "Optional[ac.Block] = None def update(self, acn: AC) -> None: assert", "None def update(self, acn: AC) -> None: assert isinstance(acn, DanceAC)", "self.block is None or block['id'] != self.block['id']: return if self.checker(block):", "self.jc['state']['active']: self.jc = None acn.step_done() return result = acn.pt_put(f'/jc/{self.jc[\"id\"]}/state', {})", "acn.step_done() def get_block_id(self, name: str, acn: AC) -> int: if", "= acn.pt_get('/blocks')['blocks'] StepWaitForBlock.name_to_id = { block['name']: block['id'] for block in", "class StepJC(Step): \"\"\" Process jc 'name'. If processed already, skip", "acn: AC) -> None: pass def on_start(self, acn: AC) ->", "Dict[str, int] = {} def __init__(self, name: str, checker: Callable[[ac.Block],", "self.send_step() self.on_update() def on_stop(self) -> None: self.statestr = '' self.statestr_send()", "def on_start(self, acn: AC) -> None: pass def disp_str(self) ->", "None: self.get_block_id(self.name, acn) def on_block_change(self, acn: AC, block: ac.Block) ->", "self.checker(self.block): self.block = None acn.step_done() else: ac.blocks.register([self.block['id']]) def on_start(self, acn:", "if datetime.datetime.now() > self.finish: self.finish = None acn.step_done() def disp_str(self)", "def track_is_occupied(block: ac.Block) -> bool: return bool(block['blockState']['state'] == 'occupied') class", "-> str: return f'Čekání {self.delay}' class BlockNotFoundException(DanceStartException): pass class StepWaitForBlock(Step):", "None: if (self.running() and isinstance(self.steps[self.stepi], StepWaitForBlock)): self.steps[self.stepi].on_block_change(self, block) # type:", "update(self, acn: AC) -> None: assert isinstance(acn, DanceAC) if self.jc", "Step: \"\"\"Base class for all specific dance steps.\"\"\" def update(self,", "int] = {} def __init__(self, name: str, type_: str =", "= acn.pt_get('/jc')['jc'] StepJC.name_to_id = { jc['name']: jc['id'] for jc in", "return f'Stavění JC {self.name}' class StepDelay(Step): \"\"\"Delay any time.\"\"\" def", "-> None: assert isinstance(acn, DanceAC) if self.jc is None: jcid", "self.block = None acn.step_done() else: ac.blocks.register([self.block['id']]) def on_start(self, acn: AC)", "-> str: return f'Čekání na stav bloku {self.name}' def track_is_occupied(block:", "str: return f'Čekání na stav bloku {self.name}' def track_is_occupied(block: ac.Block)", "import ac import ac.blocks from ac import ACs, AC JC", "!= self.block['id']: return if self.checker(block): ac.blocks.unregister([self.block['id']]) self.block = None acn.step_done()", "= acn.pt_get(f'/blocks/{blockid}?state=true')['block'] if self.checker(self.block): self.block = None acn.step_done() else: ac.blocks.register([self.block['id']])", "return bool(block['blockState']['state'] == 'occupied') class DanceAC(AC): \"\"\"This AC executes predefined", "DanceAC) if self.jc is None: jcid = self.get_jc_id(self.name, acn) self.jc", "None: logging.info('Start') for stepi, step in self.steps.items(): try: step.on_start(self) except", "AC) -> None: pass def disp_str(self) -> str: return ''", "on_update(self) -> None: AC.on_update(self) if not self.running(): return if self.stepi", "description = self.steps[self.stepi].disp_str() self.statestr = f'Aktuální krok: {self.stepi}: {description}' self.statestr_send()", "acn: AC) -> int: if not StepJC.name_to_id: jcs = acn.pt_get('/jc')['jc']", "self.block['id']: return if self.checker(block): ac.blocks.unregister([self.block['id']]) self.block = None acn.step_done() def", "self.stepi += 1 self.send_step() self.on_update() def send_step(self) -> None: if", "return '' class JCNotFoundException(DanceStartException): pass class StepJC(Step): \"\"\" Process jc", "int: if not StepWaitForBlock.name_to_id: blocks = acn.pt_get('/blocks')['blocks'] StepWaitForBlock.name_to_id = {", "self.name = name def update(self, acn: AC) -> None: assert", "for specific state of any block. See examples below.\"\"\" name_to_id:", "{ block['name']: block['id'] for block in blocks } if name", "isinstance(acn, DanceAC) if self.block is None: blockid = self.get_block_id(self.name, acn)", "= {} def __init__(self, name: str, checker: Callable[[ac.Block], bool]) ->", "self.steps[self.stepi].disp_str() self.statestr = f'Aktuální krok: {self.stepi}: {description}' self.statestr_send() def on_block_change(self,", "\"\"\"Library for executing user-defined dance.\"\"\" import logging from typing import", "'+str(e)) self.done() return self.stepi = 1 self.send_step() self.on_update() def on_stop(self)", "acn: AC) -> None: self.get_block_id(self.name, acn) def on_block_change(self, acn: AC,", "name def update(self, acn: AC) -> None: assert isinstance(acn, DanceAC)", "self.finish: self.finish = None acn.step_done() def disp_str(self) -> str: return", "isinstance(self.steps[self.stepi], StepWaitForBlock)): self.steps[self.stepi].on_block_change(self, block) # type: ignore @ac.blocks.on_block_change() def _on_block_change(block:", "block: ac.Block) -> None: if (self.running() and isinstance(self.steps[self.stepi], StepWaitForBlock)): self.steps[self.stepi].on_block_change(self,", "-> None: logging.info(f'Step {self.stepi} done, ' f'going to step {self.stepi+1}...')", "None or block['id'] != self.block['id']: return if self.checker(block): ac.blocks.unregister([self.block['id']]) self.block", "in self.steps.items(): try: step.on_start(self) except DanceStartException as e: self.disp_error(f'Krok {stepi}:", "self.block = None acn.step_done() def get_block_id(self, name: str, acn: AC)", "self.disp_error(f'Krok {stepi}: '+str(e)) self.done() return self.stepi = 1 self.send_step() self.on_update()", "JCNotFoundException(f'Jízdní cesta {self.name} neexistuje!') return StepJC.name_to_id[name] def disp_str(self) -> str:", "Callable[[ac.Block], bool]) -> None: self.name = name self.checker = checker", "try: step.on_start(self) except DanceStartException as e: self.disp_error(f'Krok {stepi}: '+str(e)) self.done()", "None: pass def disp_str(self) -> str: return '' class JCNotFoundException(DanceStartException):", "str, steps: Dict[int, Step]) -> None: AC.__init__(self, id_, password) self.steps", "1 self.send_step() self.on_update() def on_stop(self) -> None: self.statestr = ''", "is None or block['id'] != self.block['id']: return if self.checker(block): ac.blocks.unregister([self.block['id']])", "neexistuje!\") return StepWaitForBlock.name_to_id[name] def disp_str(self) -> str: return f'Čekání na", "{self.name}' class StepDelay(Step): \"\"\"Delay any time.\"\"\" def __init__(self, delay: datetime.timedelta)", "steps.\"\"\" def __init__(self, id_: str, password: str, steps: Dict[int, Step])", "{self.stepi+1}...') self.stepi += 1 self.send_step() self.on_update() def send_step(self) -> None:", "id_, password) self.steps = steps self.stepi = 0 def on_start(self)", "str: return '' class JCNotFoundException(DanceStartException): pass class StepJC(Step): \"\"\" Process", "ac.blocks.unregister([self.block['id']]) self.block = None acn.step_done() def get_block_id(self, name: str, acn:", "None: jcid = self.get_jc_id(self.name, acn) self.jc = acn.pt_get(f'/jc/{jcid}?state=true')['jc'] if self.jc['state']['active']:", "for block in blocks } if name not in StepWaitForBlock.name_to_id.keys():", "AC.__init__(self, id_, password) self.steps = steps self.stepi = 0 def", "pass def on_start(self, acn: AC) -> None: pass def disp_str(self)", "+ self.delay if datetime.datetime.now() > self.finish: self.finish = None acn.step_done()", "class DanceAC(AC): \"\"\"This AC executes predefined steps.\"\"\" def __init__(self, id_:", "processing and continue. \"\"\" name_to_id: Dict[str, int] = {} def", "-> None: self.get_jc_id(self.name, acn) def get_jc_id(self, name: str, acn: AC)", "pass def disp_str(self) -> str: return '' class JCNotFoundException(DanceStartException): pass", "self.jc: Optional[JC] = None self.type = type_ self.name = name", "block in blocks } if name not in StepWaitForBlock.name_to_id.keys(): raise", "import Any, Dict, Optional, Callable import datetime import ac import", "{self.name} neexistuje!\") return StepWaitForBlock.name_to_id[name] def disp_str(self) -> str: return f'Čekání", "jc['name']: jc['id'] for jc in jcs if jc['type'] == self.type", "= None acn.step_done() return result = acn.pt_put(f'/jc/{self.jc[\"id\"]}/state', {}) if result['success']:", "self.delay = delay self.finish: Optional[datetime.datetime] = None def update(self, acn:", "def update(self, acn: AC) -> None: pass def on_start(self, acn:", "'occupied') class DanceAC(AC): \"\"\"This AC executes predefined steps.\"\"\" def __init__(self,", "f'going to step {self.stepi+1}...') self.stepi += 1 self.send_step() self.on_update() def", "acn: AC) -> None: self.get_jc_id(self.name, acn) def get_jc_id(self, name: str,", "pass class StepJC(Step): \"\"\" Process jc 'name'. If processed already,", "if not StepJC.name_to_id: jcs = acn.pt_get('/jc')['jc'] StepJC.name_to_id = { jc['name']:", "= None self.type = type_ self.name = name def update(self,", "send_step(self) -> None: if self.stepi in self.steps.keys(): if self.running(): description", "acn: AC) -> None: assert isinstance(acn, DanceAC) if self.finish is", "steps.\"\"\" def update(self, acn: AC) -> None: pass def on_start(self,", "class DanceStartException(Exception): pass class Step: \"\"\"Base class for all specific", "str, acn: AC) -> int: if not StepWaitForBlock.name_to_id: blocks =", "datetime.timedelta) -> None: self.delay = delay self.finish: Optional[datetime.datetime] = None", "ac.Block) -> None: for acn in ACs.values(): if isinstance(acn, DanceAC):", "acn.pt_get(f'/jc/{jcid}?state=true')['jc'] if self.jc['state']['active']: self.jc = None acn.step_done() return result =", "def __init__(self, name: str, type_: str = 'VC') -> None:", "str, checker: Callable[[ac.Block], bool]) -> None: self.name = name self.checker", "track_is_occupied(block: ac.Block) -> bool: return bool(block['blockState']['state'] == 'occupied') class DanceAC(AC):", "from ac import ACs, AC JC = Dict[str, Any] class", "# type: ignore @ac.blocks.on_block_change() def _on_block_change(block: ac.Block) -> None: for", "= datetime.datetime.now() + self.delay if datetime.datetime.now() > self.finish: self.finish =", "Dict[str, Any] class DanceStartException(Exception): pass class Step: \"\"\"Base class for", "-> None: for acn in ACs.values(): if isinstance(acn, DanceAC): acn.on_block_change(block)", "for executing user-defined dance.\"\"\" import logging from typing import Any,", "self.done() def step_done(self) -> None: logging.info(f'Step {self.stepi} done, ' f'going", "import ACs, AC JC = Dict[str, Any] class DanceStartException(Exception): pass", "None: AC.on_update(self) if not self.running(): return if self.stepi in self.steps:", "step.on_start(self) except DanceStartException as e: self.disp_error(f'Krok {stepi}: '+str(e)) self.done() return", "step_done(self) -> None: logging.info(f'Step {self.stepi} done, ' f'going to step", "checker: Callable[[ac.Block], bool]) -> None: self.name = name self.checker =", "{self.stepi} done, ' f'going to step {self.stepi+1}...') self.stepi += 1", "= 0 def on_start(self) -> None: logging.info('Start') for stepi, step", "+= 1 self.send_step() self.on_update() def send_step(self) -> None: if self.stepi", "} if name not in StepJC.name_to_id.keys(): raise JCNotFoundException(f'Jízdní cesta {self.name}", "name: str, type_: str = 'VC') -> None: self.jc: Optional[JC]", "blocks = acn.pt_get('/blocks')['blocks'] StepWaitForBlock.name_to_id = { block['name']: block['id'] for block", "processed already, skip processing and continue. \"\"\" name_to_id: Dict[str, int]", "def __init__(self, name: str, checker: Callable[[ac.Block], bool]) -> None: self.name", "is None: self.finish = datetime.datetime.now() + self.delay if datetime.datetime.now() >", "> self.finish: self.finish = None acn.step_done() def disp_str(self) -> str:", "self.jc = acn.pt_get(f'/jc/{jcid}?state=true')['jc'] if self.jc['state']['active']: self.jc = None acn.step_done() return", "return result = acn.pt_put(f'/jc/{self.jc[\"id\"]}/state', {}) if result['success']: self.jc = None", "None: if self.stepi in self.steps.keys(): if self.running(): description = self.steps[self.stepi].disp_str()", "acn.pt_get('/blocks')['blocks'] StepWaitForBlock.name_to_id = { block['name']: block['id'] for block in blocks", "= acn.pt_put(f'/jc/{self.jc[\"id\"]}/state', {}) if result['success']: self.jc = None acn.step_done() def", "self.on_update() def on_stop(self) -> None: self.statestr = '' self.statestr_send() def", "for stepi, step in self.steps.items(): try: step.on_start(self) except DanceStartException as", "if self.block is None: blockid = self.get_block_id(self.name, acn) self.block =", "-> None: assert isinstance(acn, DanceAC) if self.block is None: blockid", "self.get_jc_id(self.name, acn) self.jc = acn.pt_get(f'/jc/{jcid}?state=true')['jc'] if self.jc['state']['active']: self.jc = None", "self.jc = None acn.step_done() def on_start(self, acn: AC) -> None:", "in StepWaitForBlock.name_to_id.keys(): raise BlockNotFoundException(f\"Blok {self.name} neexistuje!\") return StepWaitForBlock.name_to_id[name] def disp_str(self)", "in jcs if jc['type'] == self.type } if name not", "str, type_: str = 'VC') -> None: self.jc: Optional[JC] =", "result = acn.pt_put(f'/jc/{self.jc[\"id\"]}/state', {}) if result['success']: self.jc = None acn.step_done()", "krok: {self.stepi}: {description}' self.statestr_send() def on_block_change(self, block: ac.Block) -> None:", "f'Čekání na stav bloku {self.name}' def track_is_occupied(block: ac.Block) -> bool:", "jcid = self.get_jc_id(self.name, acn) self.jc = acn.pt_get(f'/jc/{jcid}?state=true')['jc'] if self.jc['state']['active']: self.jc", "1 self.send_step() self.on_update() def send_step(self) -> None: if self.stepi in", "self.block is None: blockid = self.get_block_id(self.name, acn) self.block = acn.pt_get(f'/blocks/{blockid}?state=true')['block']", "ac.Block) -> bool: return bool(block['blockState']['state'] == 'occupied') class DanceAC(AC): \"\"\"This", "self.on_update() def send_step(self) -> None: if self.stepi in self.steps.keys(): if", "datetime import ac import ac.blocks from ac import ACs, AC", "for jc in jcs if jc['type'] == self.type } if", "= delay self.finish: Optional[datetime.datetime] = None def update(self, acn: AC)", "def _on_block_change(block: ac.Block) -> None: for acn in ACs.values(): if", "{} def __init__(self, name: str, type_: str = 'VC') ->", "if (self.running() and isinstance(self.steps[self.stepi], StepWaitForBlock)): self.steps[self.stepi].on_block_change(self, block) # type: ignore", "AC) -> None: assert isinstance(acn, DanceAC) if self.jc is None:", "assert isinstance(acn, DanceAC) if self.jc is None: jcid = self.get_jc_id(self.name,", "assert isinstance(acn, DanceAC) if self.block is None or block['id'] !=", "ignore @ac.blocks.on_block_change() def _on_block_change(block: ac.Block) -> None: for acn in", "is None: jcid = self.get_jc_id(self.name, acn) self.jc = acn.pt_get(f'/jc/{jcid}?state=true')['jc'] if", "bool(block['blockState']['state'] == 'occupied') class DanceAC(AC): \"\"\"This AC executes predefined steps.\"\"\"", "None: assert isinstance(acn, DanceAC) if self.block is None or block['id']", "Optional[datetime.datetime] = None def update(self, acn: AC) -> None: assert", "ac.blocks.register([self.block['id']]) def on_start(self, acn: AC) -> None: self.get_block_id(self.name, acn) def", "name: str, checker: Callable[[ac.Block], bool]) -> None: self.name = name", "not in StepJC.name_to_id.keys(): raise JCNotFoundException(f'Jízdní cesta {self.name} neexistuje!') return StepJC.name_to_id[name]", "AC) -> None: assert isinstance(acn, DanceAC) if self.block is None:", "raise BlockNotFoundException(f\"Blok {self.name} neexistuje!\") return StepWaitForBlock.name_to_id[name] def disp_str(self) -> str:", "acn: AC) -> int: if not StepWaitForBlock.name_to_id: blocks = acn.pt_get('/blocks')['blocks']", "'' self.statestr_send() def on_update(self) -> None: AC.on_update(self) if not self.running():", "def __init__(self, delay: datetime.timedelta) -> None: self.delay = delay self.finish:", "AC) -> int: if not StepJC.name_to_id: jcs = acn.pt_get('/jc')['jc'] StepJC.name_to_id", "def update(self, acn: AC) -> None: assert isinstance(acn, DanceAC) if", "return StepWaitForBlock.name_to_id[name] def disp_str(self) -> str: return f'Čekání na stav", "-> int: if not StepJC.name_to_id: jcs = acn.pt_get('/jc')['jc'] StepJC.name_to_id =", "self.steps[self.stepi].on_block_change(self, block) # type: ignore @ac.blocks.on_block_change() def _on_block_change(block: ac.Block) ->", "class for all specific dance steps.\"\"\" def update(self, acn: AC)", "Step]) -> None: AC.__init__(self, id_, password) self.steps = steps self.stepi", "self.get_block_id(self.name, acn) def on_block_change(self, acn: AC, block: ac.Block) -> None:", "= name self.checker = checker self.block: Optional[ac.Block] = None def", "{}) if result['success']: self.jc = None acn.step_done() def on_start(self, acn:", "-> None: self.statestr = '' self.statestr_send() def on_update(self) -> None:", "self.finish is None: self.finish = datetime.datetime.now() + self.delay if datetime.datetime.now()", "except DanceStartException as e: self.disp_error(f'Krok {stepi}: '+str(e)) self.done() return self.stepi", "return if self.checker(block): ac.blocks.unregister([self.block['id']]) self.block = None acn.step_done() def get_block_id(self,", "AC executes predefined steps.\"\"\" def __init__(self, id_: str, password: str,", "below.\"\"\" name_to_id: Dict[str, int] = {} def __init__(self, name: str,", "None acn.step_done() return result = acn.pt_put(f'/jc/{self.jc[\"id\"]}/state', {}) if result['success']: self.jc", "class BlockNotFoundException(DanceStartException): pass class StepWaitForBlock(Step): \"\"\"Wait for specific state of", "AC.on_update(self) if not self.running(): return if self.stepi in self.steps: self.steps[self.stepi].update(self)", "stav bloku {self.name}' def track_is_occupied(block: ac.Block) -> bool: return bool(block['blockState']['state']", "None: AC.__init__(self, id_, password) self.steps = steps self.stepi = 0", "on_stop(self) -> None: self.statestr = '' self.statestr_send() def on_update(self) ->", "acn.pt_get('/jc')['jc'] StepJC.name_to_id = { jc['name']: jc['id'] for jc in jcs", "if self.stepi in self.steps.keys(): if self.running(): description = self.steps[self.stepi].disp_str() self.statestr", "== self.type } if name not in StepJC.name_to_id.keys(): raise JCNotFoundException(f'Jízdní", "= type_ self.name = name def update(self, acn: AC) ->", "== 'occupied') class DanceAC(AC): \"\"\"This AC executes predefined steps.\"\"\" def", "delay: datetime.timedelta) -> None: self.delay = delay self.finish: Optional[datetime.datetime] =", "StepWaitForBlock.name_to_id[name] def disp_str(self) -> str: return f'Čekání na stav bloku", "on_block_change(self, block: ac.Block) -> None: if (self.running() and isinstance(self.steps[self.stepi], StepWaitForBlock)):", "if self.stepi in self.steps: self.steps[self.stepi].update(self) else: logging.info('Done') self.done() def step_done(self)", "f'Čekání {self.delay}' class BlockNotFoundException(DanceStartException): pass class StepWaitForBlock(Step): \"\"\"Wait for specific", "class StepDelay(Step): \"\"\"Delay any time.\"\"\" def __init__(self, delay: datetime.timedelta) ->", "-> bool: return bool(block['blockState']['state'] == 'occupied') class DanceAC(AC): \"\"\"This AC", "def get_jc_id(self, name: str, acn: AC) -> int: if not", "specific dance steps.\"\"\" def update(self, acn: AC) -> None: pass", "datetime.datetime.now() + self.delay if datetime.datetime.now() > self.finish: self.finish = None", "logging.info('Start') for stepi, step in self.steps.items(): try: step.on_start(self) except DanceStartException", "stepi, step in self.steps.items(): try: step.on_start(self) except DanceStartException as e:", "self.stepi = 1 self.send_step() self.on_update() def on_stop(self) -> None: self.statestr", "None acn.step_done() def get_block_id(self, name: str, acn: AC) -> int:", "not StepJC.name_to_id: jcs = acn.pt_get('/jc')['jc'] StepJC.name_to_id = { jc['name']: jc['id']", "password: str, steps: Dict[int, Step]) -> None: AC.__init__(self, id_, password)", "block: ac.Block) -> None: assert isinstance(acn, DanceAC) if self.block is", "from typing import Any, Dict, Optional, Callable import datetime import", "-> None: self.name = name self.checker = checker self.block: Optional[ac.Block]", "ac.Block) -> None: if (self.running() and isinstance(self.steps[self.stepi], StepWaitForBlock)): self.steps[self.stepi].on_block_change(self, block)", "done, ' f'going to step {self.stepi+1}...') self.stepi += 1 self.send_step()", "= None acn.step_done() else: ac.blocks.register([self.block['id']]) def on_start(self, acn: AC) ->", "-> None: if (self.running() and isinstance(self.steps[self.stepi], StepWaitForBlock)): self.steps[self.stepi].on_block_change(self, block) #", "= None acn.step_done() def on_start(self, acn: AC) -> None: self.get_jc_id(self.name,", "acn.step_done() else: ac.blocks.register([self.block['id']]) def on_start(self, acn: AC) -> None: self.get_block_id(self.name,", "logging.info(f'Step {self.stepi} done, ' f'going to step {self.stepi+1}...') self.stepi +=", "executes predefined steps.\"\"\" def __init__(self, id_: str, password: str, steps:", "if not StepWaitForBlock.name_to_id: blocks = acn.pt_get('/blocks')['blocks'] StepWaitForBlock.name_to_id = { block['name']:", "Optional[JC] = None self.type = type_ self.name = name def", "ac.blocks from ac import ACs, AC JC = Dict[str, Any]", "def send_step(self) -> None: if self.stepi in self.steps.keys(): if self.running():", "type: ignore @ac.blocks.on_block_change() def _on_block_change(block: ac.Block) -> None: for acn", "None self.type = type_ self.name = name def update(self, acn:", "self.send_step() self.on_update() def send_step(self) -> None: if self.stepi in self.steps.keys():", "update(self, acn: AC) -> None: assert isinstance(acn, DanceAC) if self.block", "= name def update(self, acn: AC) -> None: assert isinstance(acn,", "blocks } if name not in StepWaitForBlock.name_to_id.keys(): raise BlockNotFoundException(f\"Blok {self.name}", "Optional, Callable import datetime import ac import ac.blocks from ac", "if self.checker(block): ac.blocks.unregister([self.block['id']]) self.block = None acn.step_done() def get_block_id(self, name:", "def on_start(self) -> None: logging.info('Start') for stepi, step in self.steps.items():", "on_start(self) -> None: logging.info('Start') for stepi, step in self.steps.items(): try:", "if result['success']: self.jc = None acn.step_done() def on_start(self, acn: AC)", "str = 'VC') -> None: self.jc: Optional[JC] = None self.type", "step {self.stepi+1}...') self.stepi += 1 self.send_step() self.on_update() def send_step(self) ->", "self.running(): return if self.stepi in self.steps: self.steps[self.stepi].update(self) else: logging.info('Done') self.done()", "def on_start(self, acn: AC) -> None: self.get_block_id(self.name, acn) def on_block_change(self,", "DanceAC) if self.block is None: blockid = self.get_block_id(self.name, acn) self.block", "Dict[str, int] = {} def __init__(self, name: str, type_: str", "-> None: assert isinstance(acn, DanceAC) if self.finish is None: self.finish", "-> None: AC.on_update(self) if not self.running(): return if self.stepi in", "str: return f'Čekání {self.delay}' class BlockNotFoundException(DanceStartException): pass class StepWaitForBlock(Step): \"\"\"Wait", "if self.jc is None: jcid = self.get_jc_id(self.name, acn) self.jc =", "not self.running(): return if self.stepi in self.steps: self.steps[self.stepi].update(self) else: logging.info('Done')", "name_to_id: Dict[str, int] = {} def __init__(self, name: str, type_:", "delay self.finish: Optional[datetime.datetime] = None def update(self, acn: AC) ->", "def on_update(self) -> None: AC.on_update(self) if not self.running(): return if", "na stav bloku {self.name}' def track_is_occupied(block: ac.Block) -> bool: return", "jc['type'] == self.type } if name not in StepJC.name_to_id.keys(): raise", "of any block. See examples below.\"\"\" name_to_id: Dict[str, int] =", "{stepi}: '+str(e)) self.done() return self.stepi = 1 self.send_step() self.on_update() def", "typing import Any, Dict, Optional, Callable import datetime import ac", "-> str: return '' class JCNotFoundException(DanceStartException): pass class StepJC(Step): \"\"\"", "disp_str(self) -> str: return f'Čekání na stav bloku {self.name}' def", "StepJC(Step): \"\"\" Process jc 'name'. If processed already, skip processing", "StepJC.name_to_id[name] def disp_str(self) -> str: return f'Stavění JC {self.name}' class", "AC) -> None: assert isinstance(acn, DanceAC) if self.finish is None:", "-> None: if self.stepi in self.steps.keys(): if self.running(): description =", "self.name = name self.checker = checker self.block: Optional[ac.Block] = None", "cesta {self.name} neexistuje!') return StepJC.name_to_id[name] def disp_str(self) -> str: return", "if name not in StepJC.name_to_id.keys(): raise JCNotFoundException(f'Jízdní cesta {self.name} neexistuje!')", "str, password: str, steps: Dict[int, Step]) -> None: AC.__init__(self, id_,", "assert isinstance(acn, DanceAC) if self.block is None: blockid = self.get_block_id(self.name,", "__init__(self, name: str, type_: str = 'VC') -> None: self.jc:", "e: self.disp_error(f'Krok {stepi}: '+str(e)) self.done() return self.stepi = 1 self.send_step()", "self.block: Optional[ac.Block] = None def update(self, acn: AC) -> None:", "state of any block. See examples below.\"\"\" name_to_id: Dict[str, int]", "self.statestr = '' self.statestr_send() def on_update(self) -> None: AC.on_update(self) if", "disp_str(self) -> str: return f'Stavění JC {self.name}' class StepDelay(Step): \"\"\"Delay", "assert isinstance(acn, DanceAC) if self.finish is None: self.finish = datetime.datetime.now()", "self.type } if name not in StepJC.name_to_id.keys(): raise JCNotFoundException(f'Jízdní cesta", "self.steps[self.stepi].update(self) else: logging.info('Done') self.done() def step_done(self) -> None: logging.info(f'Step {self.stepi}", "acn) def on_block_change(self, acn: AC, block: ac.Block) -> None: assert", "self.stepi in self.steps: self.steps[self.stepi].update(self) else: logging.info('Done') self.done() def step_done(self) ->", "AC JC = Dict[str, Any] class DanceStartException(Exception): pass class Step:", "See examples below.\"\"\" name_to_id: Dict[str, int] = {} def __init__(self,", "'VC') -> None: self.jc: Optional[JC] = None self.type = type_", "and continue. \"\"\" name_to_id: Dict[str, int] = {} def __init__(self,", "\"\"\"Delay any time.\"\"\" def __init__(self, delay: datetime.timedelta) -> None: self.delay", "name not in StepWaitForBlock.name_to_id.keys(): raise BlockNotFoundException(f\"Blok {self.name} neexistuje!\") return StepWaitForBlock.name_to_id[name]", "None: assert isinstance(acn, DanceAC) if self.block is None: blockid =", "StepDelay(Step): \"\"\"Delay any time.\"\"\" def __init__(self, delay: datetime.timedelta) -> None:", "not StepWaitForBlock.name_to_id: blocks = acn.pt_get('/blocks')['blocks'] StepWaitForBlock.name_to_id = { block['name']: block['id']", "None acn.step_done() def on_start(self, acn: AC) -> None: self.get_jc_id(self.name, acn)", "{self.delay}' class BlockNotFoundException(DanceStartException): pass class StepWaitForBlock(Step): \"\"\"Wait for specific state", "StepWaitForBlock(Step): \"\"\"Wait for specific state of any block. See examples", "= { block['name']: block['id'] for block in blocks } if", "def on_stop(self) -> None: self.statestr = '' self.statestr_send() def on_update(self)", "jcs if jc['type'] == self.type } if name not in", "0 def on_start(self) -> None: logging.info('Start') for stepi, step in", "logging.info('Done') self.done() def step_done(self) -> None: logging.info(f'Step {self.stepi} done, '", "datetime.datetime.now() > self.finish: self.finish = None acn.step_done() def disp_str(self) ->", "self.steps: self.steps[self.stepi].update(self) else: logging.info('Done') self.done() def step_done(self) -> None: logging.info(f'Step", "dance steps.\"\"\" def update(self, acn: AC) -> None: pass def", "any block. See examples below.\"\"\" name_to_id: Dict[str, int] = {}", "bool: return bool(block['blockState']['state'] == 'occupied') class DanceAC(AC): \"\"\"This AC executes", "acn: AC) -> None: assert isinstance(acn, DanceAC) if self.jc is", "None: self.name = name self.checker = checker self.block: Optional[ac.Block] =", "disp_str(self) -> str: return '' class JCNotFoundException(DanceStartException): pass class StepJC(Step):", "or block['id'] != self.block['id']: return if self.checker(block): ac.blocks.unregister([self.block['id']]) self.block =", "on_start(self, acn: AC) -> None: self.get_jc_id(self.name, acn) def get_jc_id(self, name:", "StepJC.name_to_id.keys(): raise JCNotFoundException(f'Jízdní cesta {self.name} neexistuje!') return StepJC.name_to_id[name] def disp_str(self)", "self.checker = checker self.block: Optional[ac.Block] = None def update(self, acn:", "acn: AC) -> None: assert isinstance(acn, DanceAC) if self.block is", "def on_block_change(self, acn: AC, block: ac.Block) -> None: assert isinstance(acn,", "= self.get_jc_id(self.name, acn) self.jc = acn.pt_get(f'/jc/{jcid}?state=true')['jc'] if self.jc['state']['active']: self.jc =", "self.jc is None: jcid = self.get_jc_id(self.name, acn) self.jc = acn.pt_get(f'/jc/{jcid}?state=true')['jc']", "for all specific dance steps.\"\"\" def update(self, acn: AC) ->", "'name'. If processed already, skip processing and continue. \"\"\" name_to_id:", "-> None: assert isinstance(acn, DanceAC) if self.block is None or", "Any, Dict, Optional, Callable import datetime import ac import ac.blocks", "acn.step_done() def on_start(self, acn: AC) -> None: self.get_jc_id(self.name, acn) def", "raise JCNotFoundException(f'Jízdní cesta {self.name} neexistuje!') return StepJC.name_to_id[name] def disp_str(self) ->", "None: blockid = self.get_block_id(self.name, acn) self.block = acn.pt_get(f'/blocks/{blockid}?state=true')['block'] if self.checker(self.block):", "DanceStartException(Exception): pass class Step: \"\"\"Base class for all specific dance", "\"\"\"Wait for specific state of any block. See examples below.\"\"\"", "else: logging.info('Done') self.done() def step_done(self) -> None: logging.info(f'Step {self.stepi} done,", "StepJC.name_to_id: jcs = acn.pt_get('/jc')['jc'] StepJC.name_to_id = { jc['name']: jc['id'] for", "jcs = acn.pt_get('/jc')['jc'] StepJC.name_to_id = { jc['name']: jc['id'] for jc", "{ jc['name']: jc['id'] for jc in jcs if jc['type'] ==", "def get_block_id(self, name: str, acn: AC) -> int: if not", "\"\"\" name_to_id: Dict[str, int] = {} def __init__(self, name: str,", "f'Stavění JC {self.name}' class StepDelay(Step): \"\"\"Delay any time.\"\"\" def __init__(self,", "_on_block_change(block: ac.Block) -> None: for acn in ACs.values(): if isinstance(acn,", "acn) def get_jc_id(self, name: str, acn: AC) -> int: if", "if self.finish is None: self.finish = datetime.datetime.now() + self.delay if", "None: assert isinstance(acn, DanceAC) if self.finish is None: self.finish =", "checker self.block: Optional[ac.Block] = None def update(self, acn: AC) ->", "{self.stepi}: {description}' self.statestr_send() def on_block_change(self, block: ac.Block) -> None: if", "steps: Dict[int, Step]) -> None: AC.__init__(self, id_, password) self.steps =", "Callable import datetime import ac import ac.blocks from ac import", "get_block_id(self, name: str, acn: AC) -> int: if not StepWaitForBlock.name_to_id:", "-> None: pass def disp_str(self) -> str: return '' class", "= 'VC') -> None: self.jc: Optional[JC] = None self.type =", "on_start(self, acn: AC) -> None: self.get_block_id(self.name, acn) def on_block_change(self, acn:", "JC = Dict[str, Any] class DanceStartException(Exception): pass class Step: \"\"\"Base", "return if self.stepi in self.steps: self.steps[self.stepi].update(self) else: logging.info('Done') self.done() def", "name not in StepJC.name_to_id.keys(): raise JCNotFoundException(f'Jízdní cesta {self.name} neexistuje!') return", "{description}' self.statestr_send() def on_block_change(self, block: ac.Block) -> None: if (self.running()", "self.jc = None acn.step_done() return result = acn.pt_put(f'/jc/{self.jc[\"id\"]}/state', {}) if", "acn) self.jc = acn.pt_get(f'/jc/{jcid}?state=true')['jc'] if self.jc['state']['active']: self.jc = None acn.step_done()", "AC) -> None: pass def on_start(self, acn: AC) -> None:", "user-defined dance.\"\"\" import logging from typing import Any, Dict, Optional,", "None: assert isinstance(acn, DanceAC) if self.jc is None: jcid =", "ac.Block) -> None: assert isinstance(acn, DanceAC) if self.block is None", "in self.steps: self.steps[self.stepi].update(self) else: logging.info('Done') self.done() def step_done(self) -> None:", "acn) self.block = acn.pt_get(f'/blocks/{blockid}?state=true')['block'] if self.checker(self.block): self.block = None acn.step_done()", "self.delay if datetime.datetime.now() > self.finish: self.finish = None acn.step_done() def", "JCNotFoundException(DanceStartException): pass class StepJC(Step): \"\"\" Process jc 'name'. If processed", "int] = {} def __init__(self, name: str, checker: Callable[[ac.Block], bool])", "None: self.delay = delay self.finish: Optional[datetime.datetime] = None def update(self,", "return f'Čekání na stav bloku {self.name}' def track_is_occupied(block: ac.Block) ->", "str, acn: AC) -> int: if not StepJC.name_to_id: jcs =", "if jc['type'] == self.type } if name not in StepJC.name_to_id.keys():", "def on_block_change(self, block: ac.Block) -> None: if (self.running() and isinstance(self.steps[self.stepi],", "(self.running() and isinstance(self.steps[self.stepi], StepWaitForBlock)): self.steps[self.stepi].on_block_change(self, block) # type: ignore @ac.blocks.on_block_change()", "neexistuje!') return StepJC.name_to_id[name] def disp_str(self) -> str: return f'Stavění JC", "__init__(self, name: str, checker: Callable[[ac.Block], bool]) -> None: self.name =", "None: self.jc: Optional[JC] = None self.type = type_ self.name =", "def disp_str(self) -> str: return '' class JCNotFoundException(DanceStartException): pass class", "'' class JCNotFoundException(DanceStartException): pass class StepJC(Step): \"\"\" Process jc 'name'.", "import logging from typing import Any, Dict, Optional, Callable import", "and isinstance(self.steps[self.stepi], StepWaitForBlock)): self.steps[self.stepi].on_block_change(self, block) # type: ignore @ac.blocks.on_block_change() def", "StepWaitForBlock)): self.steps[self.stepi].on_block_change(self, block) # type: ignore @ac.blocks.on_block_change() def _on_block_change(block: ac.Block)", "class Step: \"\"\"Base class for all specific dance steps.\"\"\" def", "Any] class DanceStartException(Exception): pass class Step: \"\"\"Base class for all", "if self.jc['state']['active']: self.jc = None acn.step_done() return result = acn.pt_put(f'/jc/{self.jc[\"id\"]}/state',", "-> None: self.get_block_id(self.name, acn) def on_block_change(self, acn: AC, block: ac.Block)", "Dict, Optional, Callable import datetime import ac import ac.blocks from", "= checker self.block: Optional[ac.Block] = None def update(self, acn: AC)", "-> int: if not StepWaitForBlock.name_to_id: blocks = acn.pt_get('/blocks')['blocks'] StepWaitForBlock.name_to_id =", "dance.\"\"\" import logging from typing import Any, Dict, Optional, Callable", "int: if not StepJC.name_to_id: jcs = acn.pt_get('/jc')['jc'] StepJC.name_to_id = {", "= acn.pt_get(f'/jc/{jcid}?state=true')['jc'] if self.jc['state']['active']: self.jc = None acn.step_done() return result", "jc['id'] for jc in jcs if jc['type'] == self.type }", "self.stepi = 0 def on_start(self) -> None: logging.info('Start') for stepi,", "= None def update(self, acn: AC) -> None: assert isinstance(acn,", "DanceAC) if self.block is None or block['id'] != self.block['id']: return", "= {} def __init__(self, name: str, type_: str = 'VC')", "f'Aktuální krok: {self.stepi}: {description}' self.statestr_send() def on_block_change(self, block: ac.Block) ->", "import ac.blocks from ac import ACs, AC JC = Dict[str,", "step in self.steps.items(): try: step.on_start(self) except DanceStartException as e: self.disp_error(f'Krok", "None acn.step_done() else: ac.blocks.register([self.block['id']]) def on_start(self, acn: AC) -> None:", "self.finish = None acn.step_done() def disp_str(self) -> str: return f'Čekání", "' f'going to step {self.stepi+1}...') self.stepi += 1 self.send_step() self.on_update()", "id_: str, password: str, steps: Dict[int, Step]) -> None: AC.__init__(self," ]
[ "reported.\"\"\" def report(self, reason): \"\"\"Report this object to the moderators", "can be reported.\"\"\" def report(self, reason): \"\"\"Report this object to", "be reported.\"\"\" def report(self, reason): \"\"\"Report this object to the", "comment.report('report reason') \"\"\" self._reddit.post( API_PATH[\"report\"], data={\"id\": self.fullname, \"reason\": reason} )", "<gh_stars>10-100 \"\"\"Provide the ReportableMixin class.\"\"\" from ....const import API_PATH class", "if ``reason`` is longer than 100 characters. Example usage: ..", ":param reason: The reason for reporting. Raises :class:`.APIException` if ``reason``", "this object to the moderators of its subreddit. :param reason:", "Raises :class:`.APIException` if ``reason`` is longer than 100 characters. Example", "of its subreddit. :param reason: The reason for reporting. Raises", "100 characters. Example usage: .. code-block:: python submission = reddit.submission(id='5or86n')", "submission.report('report reason') comment = reddit.comment(id='dxolpyc') comment.report('report reason') \"\"\" self._reddit.post( API_PATH[\"report\"],", "API_PATH class ReportableMixin: \"\"\"Interface for RedditBase classes that can be", "reason: The reason for reporting. Raises :class:`.APIException` if ``reason`` is", "RedditBase classes that can be reported.\"\"\" def report(self, reason): \"\"\"Report", "its subreddit. :param reason: The reason for reporting. Raises :class:`.APIException`", "from ....const import API_PATH class ReportableMixin: \"\"\"Interface for RedditBase classes", "ReportableMixin: \"\"\"Interface for RedditBase classes that can be reported.\"\"\" def", "the ReportableMixin class.\"\"\" from ....const import API_PATH class ReportableMixin: \"\"\"Interface", "reddit.submission(id='5or86n') submission.report('report reason') comment = reddit.comment(id='dxolpyc') comment.report('report reason') \"\"\" self._reddit.post(", "that can be reported.\"\"\" def report(self, reason): \"\"\"Report this object", "object to the moderators of its subreddit. :param reason: The", "characters. Example usage: .. code-block:: python submission = reddit.submission(id='5or86n') submission.report('report", "for reporting. Raises :class:`.APIException` if ``reason`` is longer than 100", "code-block:: python submission = reddit.submission(id='5or86n') submission.report('report reason') comment = reddit.comment(id='dxolpyc')", "for RedditBase classes that can be reported.\"\"\" def report(self, reason):", "reason for reporting. Raises :class:`.APIException` if ``reason`` is longer than", "subreddit. :param reason: The reason for reporting. Raises :class:`.APIException` if", "moderators of its subreddit. :param reason: The reason for reporting.", ":class:`.APIException` if ``reason`` is longer than 100 characters. Example usage:", "reason): \"\"\"Report this object to the moderators of its subreddit.", "= reddit.comment(id='dxolpyc') comment.report('report reason') \"\"\" self._reddit.post( API_PATH[\"report\"], data={\"id\": self.fullname, \"reason\":", "reddit.comment(id='dxolpyc') comment.report('report reason') \"\"\" self._reddit.post( API_PATH[\"report\"], data={\"id\": self.fullname, \"reason\": reason}", "the moderators of its subreddit. :param reason: The reason for", "\"\"\"Provide the ReportableMixin class.\"\"\" from ....const import API_PATH class ReportableMixin:", "reason') comment = reddit.comment(id='dxolpyc') comment.report('report reason') \"\"\" self._reddit.post( API_PATH[\"report\"], data={\"id\":", "submission = reddit.submission(id='5or86n') submission.report('report reason') comment = reddit.comment(id='dxolpyc') comment.report('report reason')", "The reason for reporting. Raises :class:`.APIException` if ``reason`` is longer", "``reason`` is longer than 100 characters. Example usage: .. code-block::", "= reddit.submission(id='5or86n') submission.report('report reason') comment = reddit.comment(id='dxolpyc') comment.report('report reason') \"\"\"", "than 100 characters. Example usage: .. code-block:: python submission =", "class.\"\"\" from ....const import API_PATH class ReportableMixin: \"\"\"Interface for RedditBase", "ReportableMixin class.\"\"\" from ....const import API_PATH class ReportableMixin: \"\"\"Interface for", "python submission = reddit.submission(id='5or86n') submission.report('report reason') comment = reddit.comment(id='dxolpyc') comment.report('report", "to the moderators of its subreddit. :param reason: The reason", "is longer than 100 characters. Example usage: .. code-block:: python", "comment = reddit.comment(id='dxolpyc') comment.report('report reason') \"\"\" self._reddit.post( API_PATH[\"report\"], data={\"id\": self.fullname,", "class ReportableMixin: \"\"\"Interface for RedditBase classes that can be reported.\"\"\"", "def report(self, reason): \"\"\"Report this object to the moderators of", "import API_PATH class ReportableMixin: \"\"\"Interface for RedditBase classes that can", "usage: .. code-block:: python submission = reddit.submission(id='5or86n') submission.report('report reason') comment", "\"\"\"Interface for RedditBase classes that can be reported.\"\"\" def report(self,", "report(self, reason): \"\"\"Report this object to the moderators of its", "longer than 100 characters. Example usage: .. code-block:: python submission", ".. code-block:: python submission = reddit.submission(id='5or86n') submission.report('report reason') comment =", "classes that can be reported.\"\"\" def report(self, reason): \"\"\"Report this", "reporting. Raises :class:`.APIException` if ``reason`` is longer than 100 characters.", "Example usage: .. code-block:: python submission = reddit.submission(id='5or86n') submission.report('report reason')", "\"\"\"Report this object to the moderators of its subreddit. :param", "....const import API_PATH class ReportableMixin: \"\"\"Interface for RedditBase classes that" ]
[ "= ToPILImage()(im) savepath = BytesIO() im.save(savepath, 'JPEG', quality=75) im =", "BytesIO() im.save(savepath, 'JPEG', quality=75) im = Image.open(savepath) im = ToTensor()(im)", "_jpeg_compression(im): assert torch.is_tensor(im) im = ToPILImage()(im) savepath = BytesIO() im.save(savepath,", "torch.is_tensor(im) im = ToPILImage()(im) savepath = BytesIO() im.save(savepath, 'JPEG', quality=75)", "ToPILImage()(im) savepath = BytesIO() im.save(savepath, 'JPEG', quality=75) im = Image.open(savepath)", "def _jpeg_compression(im): assert torch.is_tensor(im) im = ToPILImage()(im) savepath = BytesIO()", "= BytesIO() im.save(savepath, 'JPEG', quality=75) im = Image.open(savepath) im =", "'JPEG', quality=75) im = Image.open(savepath) im = ToTensor()(im) return im", "im.save(savepath, 'JPEG', quality=75) im = Image.open(savepath) im = ToTensor()(im) return", "assert torch.is_tensor(im) im = ToPILImage()(im) savepath = BytesIO() im.save(savepath, 'JPEG',", "savepath = BytesIO() im.save(savepath, 'JPEG', quality=75) im = Image.open(savepath) im", "im = ToPILImage()(im) savepath = BytesIO() im.save(savepath, 'JPEG', quality=75) im" ]
[ "@interface.implementer(mellon.IMellonFileProvider) class MellonFileProviderForRecursiveDirectoryConfig(object): def __init__(self, config): \"\"\"Init Args: config: sparc.configuration.container.ISparcAppPyContainerConfiguration", "as stream: file_ = component.createObject(u'mellon.unicode_file_from_stream', stream, self.config) for snippet in", "with open(str(self.file_path), 'rU') as stream: file_ = component.createObject(u'mellon.unicode_file_from_stream', stream, self.config)", "'rU') as stream: file_ = component.createObject(u'mellon.unicode_file_from_stream', stream, self.config) for snippet", "get('FileSystemDir')['directory'] for d, dirs, files in os.walk(base_path): for f in", "component.createObject(u'mellon.filesystem_path', path) if mellon.IBinaryChecker(path).check(): yield component.createObject(\\ u'mellon.factories.filesystem.byte_file', path, self.config) else:", "self.config = config def __iter__(self): base_path = container.IPyContainerConfigValue(self.config).\\ get('FileSystemDir')['directory'] for", "= os.path.join(d, f) if not os.path.isfile(path): continue #get interface-assigned string", "Factory from sparc.configuration import container import mellon @interface.implementer(mellon.IByteMellonFile) class MellonByteFileFromFilePathAndConfig(object):", "\"Unicode file at location {}\".format(self.file_path) def __iter__(self): _end = 0", "= file_path self.config = config def __str__(self): return \"byte file", "at location {}\".format(self.file_path) def __iter__(self): _end = 0 _buffer =", "location {}\".format(self.file_path) def __iter__(self): _end = 0 _buffer = collections.deque()", "os.path from zope import component from zope import interface from", "def __init__(self, file_path, config): self.file_path = file_path self.config = config", "@interface.implementer(mellon.IUnicodeMellonFile) class MellonUnicodeFileFromFilePathAndConfig(object): def __init__(self, file_path, config): self.file_path = file_path", "from zope.component.factory import Factory from sparc.configuration import container import mellon", "mellonUnicodeFileFromFilePathAndConfigFactory = Factory(MellonUnicodeFileFromFilePathAndConfig) @interface.implementer(mellon.IMellonFileProvider) class MellonFileProviderForRecursiveDirectoryConfig(object): def __init__(self, config): \"\"\"Init", "{}\".format(self.file_path) def __iter__(self): _end = 0 _buffer = collections.deque() _eof_buffer", "= collections.deque() _eof_buffer = collections.deque() with open(str(self.file_path), 'rU') as stream:", "_eof_buffer = collections.deque() with open(str(self.file_path), 'rU') as stream: file_ =", "stream: file_ = component.createObject(u'mellon.byte_file_from_stream', stream, self.config) for snippet in file_:", "self.config) else: yield component.createObject(\\ u'mellon.factories.filesystem.unicode_file', path, self.config) mellonFileProviderForRecursiveDirectoryConfigFactory = Factory(MellonFileProviderForRecursiveDirectoryConfig)", "__init__(self, config): \"\"\"Init Args: config: sparc.configuration.container.ISparcAppPyContainerConfiguration provider with mellon.factories.filesystem[configure.yaml:FileSystemDir] and", "entries. \"\"\" self.config = config def __iter__(self): base_path = container.IPyContainerConfigValue(self.config).\\", "0 _buffer = collections.deque() _eof_buffer = collections.deque() with open(str(self.file_path), 'rU')", "def __iter__(self): with open(self.file_path, 'rb') as stream: file_ = component.createObject(u'mellon.byte_file_from_stream',", "component.createObject(u'mellon.byte_file_from_stream', stream, self.config) for snippet in file_: yield snippet mellonByteFileFromFilePathAndConfigFactory", "with mellon.factories.filesystem[configure.yaml:FileSystemDir] and mellon[configure.yaml:MellonSnippet] entries. \"\"\" self.config = config def", "collections.deque() with open(str(self.file_path), 'rU') as stream: file_ = component.createObject(u'mellon.unicode_file_from_stream', stream,", "open(self.file_path, 'rb') as stream: file_ = component.createObject(u'mellon.byte_file_from_stream', stream, self.config) for", "__init__(self, file_path, config): self.file_path = file_path self.config = config def", "not os.path.isfile(path): continue #get interface-assigned string (IPath) path = component.createObject(u'mellon.filesystem_path',", "class MellonByteFileFromFilePathAndConfig(object): def __init__(self, file_path, config): self.file_path = file_path self.config", "d, dirs, files in os.walk(base_path): for f in files: path", "(IPath) path = component.createObject(u'mellon.filesystem_path', path) if mellon.IBinaryChecker(path).check(): yield component.createObject(\\ u'mellon.factories.filesystem.byte_file',", "import os.path from zope import component from zope import interface", "__iter__(self): _end = 0 _buffer = collections.deque() _eof_buffer = collections.deque()", "= 0 _buffer = collections.deque() _eof_buffer = collections.deque() with open(str(self.file_path),", "at location {}\".format(self.file_path) def __iter__(self): with open(self.file_path, 'rb') as stream:", "path = component.createObject(u'mellon.filesystem_path', path) if mellon.IBinaryChecker(path).check(): yield component.createObject(\\ u'mellon.factories.filesystem.byte_file', path,", "stream, self.config) for snippet in file_: yield snippet mellonByteFileFromFilePathAndConfigFactory =", "{}\".format(self.file_path) def __iter__(self): with open(self.file_path, 'rb') as stream: file_ =", "= collections.deque() with open(str(self.file_path), 'rU') as stream: file_ = component.createObject(u'mellon.unicode_file_from_stream',", "yield snippet mellonByteFileFromFilePathAndConfigFactory = Factory(MellonByteFileFromFilePathAndConfig) @interface.implementer(mellon.IUnicodeMellonFile) class MellonUnicodeFileFromFilePathAndConfig(object): def __init__(self,", "= config def __iter__(self): base_path = container.IPyContainerConfigValue(self.config).\\ get('FileSystemDir')['directory'] for d,", "file_: yield snippet mellonUnicodeFileFromFilePathAndConfigFactory = Factory(MellonUnicodeFileFromFilePathAndConfig) @interface.implementer(mellon.IMellonFileProvider) class MellonFileProviderForRecursiveDirectoryConfig(object): def", "return \"Unicode file at location {}\".format(self.file_path) def __iter__(self): _end =", "MellonUnicodeFileFromFilePathAndConfig(object): def __init__(self, file_path, config): self.file_path = file_path self.config =", "_end = 0 _buffer = collections.deque() _eof_buffer = collections.deque() with", "zope import interface from zope.component.factory import Factory from sparc.configuration import", "mellon.factories.filesystem[configure.yaml:FileSystemDir] and mellon[configure.yaml:MellonSnippet] entries. \"\"\" self.config = config def __iter__(self):", "snippet mellonUnicodeFileFromFilePathAndConfigFactory = Factory(MellonUnicodeFileFromFilePathAndConfig) @interface.implementer(mellon.IMellonFileProvider) class MellonFileProviderForRecursiveDirectoryConfig(object): def __init__(self, config):", "Args: config: sparc.configuration.container.ISparcAppPyContainerConfiguration provider with mellon.factories.filesystem[configure.yaml:FileSystemDir] and mellon[configure.yaml:MellonSnippet] entries. \"\"\"", "interface-assigned string (IPath) path = component.createObject(u'mellon.filesystem_path', path) if mellon.IBinaryChecker(path).check(): yield", "self.config = config def __str__(self): return \"byte file at location", "self.config) for snippet in file_: yield snippet mellonByteFileFromFilePathAndConfigFactory = Factory(MellonByteFileFromFilePathAndConfig)", "string (IPath) path = component.createObject(u'mellon.filesystem_path', path) if mellon.IBinaryChecker(path).check(): yield component.createObject(\\", "file_path, config): self.file_path = file_path self.config = config def __str__(self):", "files: path = os.path.join(d, f) if not os.path.isfile(path): continue #get", "\"\"\" self.config = config def __iter__(self): base_path = container.IPyContainerConfigValue(self.config).\\ get('FileSystemDir')['directory']", "snippet in file_: yield snippet mellonByteFileFromFilePathAndConfigFactory = Factory(MellonByteFileFromFilePathAndConfig) @interface.implementer(mellon.IUnicodeMellonFile) class", "collections import os.path from zope import component from zope import", "zope import component from zope import interface from zope.component.factory import", "return \"byte file at location {}\".format(self.file_path) def __iter__(self): with open(self.file_path,", "def __iter__(self): base_path = container.IPyContainerConfigValue(self.config).\\ get('FileSystemDir')['directory'] for d, dirs, files", "self.file_path = file_path self.config = config def __str__(self): return \"Unicode", "__iter__(self): base_path = container.IPyContainerConfigValue(self.config).\\ get('FileSystemDir')['directory'] for d, dirs, files in", "if mellon.IBinaryChecker(path).check(): yield component.createObject(\\ u'mellon.factories.filesystem.byte_file', path, self.config) else: yield component.createObject(\\", "dirs, files in os.walk(base_path): for f in files: path =", "__str__(self): return \"Unicode file at location {}\".format(self.file_path) def __iter__(self): _end", "<reponame>LaudateCorpus1/mellon import collections import os.path from zope import component from", "for snippet in file_: yield snippet mellonUnicodeFileFromFilePathAndConfigFactory = Factory(MellonUnicodeFileFromFilePathAndConfig) @interface.implementer(mellon.IMellonFileProvider)", "location {}\".format(self.file_path) def __iter__(self): with open(self.file_path, 'rb') as stream: file_", "and mellon[configure.yaml:MellonSnippet] entries. \"\"\" self.config = config def __iter__(self): base_path", "in file_: yield snippet mellonByteFileFromFilePathAndConfigFactory = Factory(MellonByteFileFromFilePathAndConfig) @interface.implementer(mellon.IUnicodeMellonFile) class MellonUnicodeFileFromFilePathAndConfig(object):", "= container.IPyContainerConfigValue(self.config).\\ get('FileSystemDir')['directory'] for d, dirs, files in os.walk(base_path): for", "as stream: file_ = component.createObject(u'mellon.byte_file_from_stream', stream, self.config) for snippet in", "file_ = component.createObject(u'mellon.byte_file_from_stream', stream, self.config) for snippet in file_: yield", "continue #get interface-assigned string (IPath) path = component.createObject(u'mellon.filesystem_path', path) if", "__str__(self): return \"byte file at location {}\".format(self.file_path) def __iter__(self): with", "#get interface-assigned string (IPath) path = component.createObject(u'mellon.filesystem_path', path) if mellon.IBinaryChecker(path).check():", "for f in files: path = os.path.join(d, f) if not", "interface from zope.component.factory import Factory from sparc.configuration import container import", "else: yield component.createObject(\\ u'mellon.factories.filesystem.unicode_file', path, self.config) mellonFileProviderForRecursiveDirectoryConfigFactory = Factory(MellonFileProviderForRecursiveDirectoryConfig) interface.alsoProvides(mellonFileProviderForRecursiveDirectoryConfigFactory,", "def __str__(self): return \"Unicode file at location {}\".format(self.file_path) def __iter__(self):", "stream: file_ = component.createObject(u'mellon.unicode_file_from_stream', stream, self.config) for snippet in file_:", "config def __iter__(self): base_path = container.IPyContainerConfigValue(self.config).\\ get('FileSystemDir')['directory'] for d, dirs,", "file at location {}\".format(self.file_path) def __iter__(self): _end = 0 _buffer", "u'mellon.factories.filesystem.byte_file', path, self.config) else: yield component.createObject(\\ u'mellon.factories.filesystem.unicode_file', path, self.config) mellonFileProviderForRecursiveDirectoryConfigFactory", "class MellonFileProviderForRecursiveDirectoryConfig(object): def __init__(self, config): \"\"\"Init Args: config: sparc.configuration.container.ISparcAppPyContainerConfiguration provider", "Factory(MellonUnicodeFileFromFilePathAndConfig) @interface.implementer(mellon.IMellonFileProvider) class MellonFileProviderForRecursiveDirectoryConfig(object): def __init__(self, config): \"\"\"Init Args: config:", "MellonByteFileFromFilePathAndConfig(object): def __init__(self, file_path, config): self.file_path = file_path self.config =", "import Factory from sparc.configuration import container import mellon @interface.implementer(mellon.IByteMellonFile) class", "stream, self.config) for snippet in file_: yield snippet mellonUnicodeFileFromFilePathAndConfigFactory =", "config): \"\"\"Init Args: config: sparc.configuration.container.ISparcAppPyContainerConfiguration provider with mellon.factories.filesystem[configure.yaml:FileSystemDir] and mellon[configure.yaml:MellonSnippet]", "file_path self.config = config def __str__(self): return \"byte file at", "path, self.config) else: yield component.createObject(\\ u'mellon.factories.filesystem.unicode_file', path, self.config) mellonFileProviderForRecursiveDirectoryConfigFactory =", "component from zope import interface from zope.component.factory import Factory from", "provider with mellon.factories.filesystem[configure.yaml:FileSystemDir] and mellon[configure.yaml:MellonSnippet] entries. \"\"\" self.config = config", "if not os.path.isfile(path): continue #get interface-assigned string (IPath) path =", "collections.deque() _eof_buffer = collections.deque() with open(str(self.file_path), 'rU') as stream: file_", "'rb') as stream: file_ = component.createObject(u'mellon.byte_file_from_stream', stream, self.config) for snippet", "zope.component.factory import Factory from sparc.configuration import container import mellon @interface.implementer(mellon.IByteMellonFile)", "open(str(self.file_path), 'rU') as stream: file_ = component.createObject(u'mellon.unicode_file_from_stream', stream, self.config) for", "base_path = container.IPyContainerConfigValue(self.config).\\ get('FileSystemDir')['directory'] for d, dirs, files in os.walk(base_path):", "self.config) for snippet in file_: yield snippet mellonUnicodeFileFromFilePathAndConfigFactory = Factory(MellonUnicodeFileFromFilePathAndConfig)", "path) if mellon.IBinaryChecker(path).check(): yield component.createObject(\\ u'mellon.factories.filesystem.byte_file', path, self.config) else: yield", "yield component.createObject(\\ u'mellon.factories.filesystem.unicode_file', path, self.config) mellonFileProviderForRecursiveDirectoryConfigFactory = Factory(MellonFileProviderForRecursiveDirectoryConfig) interface.alsoProvides(mellonFileProviderForRecursiveDirectoryConfigFactory, mellon.IMellonFileProviderFactory)", "= component.createObject(u'mellon.unicode_file_from_stream', stream, self.config) for snippet in file_: yield snippet", "import collections import os.path from zope import component from zope", "os.path.join(d, f) if not os.path.isfile(path): continue #get interface-assigned string (IPath)", "import component from zope import interface from zope.component.factory import Factory", "container import mellon @interface.implementer(mellon.IByteMellonFile) class MellonByteFileFromFilePathAndConfig(object): def __init__(self, file_path, config):", "snippet mellonByteFileFromFilePathAndConfigFactory = Factory(MellonByteFileFromFilePathAndConfig) @interface.implementer(mellon.IUnicodeMellonFile) class MellonUnicodeFileFromFilePathAndConfig(object): def __init__(self, file_path,", "class MellonUnicodeFileFromFilePathAndConfig(object): def __init__(self, file_path, config): self.file_path = file_path self.config", "mellon.IBinaryChecker(path).check(): yield component.createObject(\\ u'mellon.factories.filesystem.byte_file', path, self.config) else: yield component.createObject(\\ u'mellon.factories.filesystem.unicode_file',", "= component.createObject(u'mellon.filesystem_path', path) if mellon.IBinaryChecker(path).check(): yield component.createObject(\\ u'mellon.factories.filesystem.byte_file', path, self.config)", "= component.createObject(u'mellon.byte_file_from_stream', stream, self.config) for snippet in file_: yield snippet", "= config def __str__(self): return \"Unicode file at location {}\".format(self.file_path)", "for snippet in file_: yield snippet mellonByteFileFromFilePathAndConfigFactory = Factory(MellonByteFileFromFilePathAndConfig) @interface.implementer(mellon.IUnicodeMellonFile)", "from zope import component from zope import interface from zope.component.factory", "in files: path = os.path.join(d, f) if not os.path.isfile(path): continue", "with open(self.file_path, 'rb') as stream: file_ = component.createObject(u'mellon.byte_file_from_stream', stream, self.config)", "config def __str__(self): return \"byte file at location {}\".format(self.file_path) def", "import mellon @interface.implementer(mellon.IByteMellonFile) class MellonByteFileFromFilePathAndConfig(object): def __init__(self, file_path, config): self.file_path", "def __iter__(self): _end = 0 _buffer = collections.deque() _eof_buffer =", "f in files: path = os.path.join(d, f) if not os.path.isfile(path):", "f) if not os.path.isfile(path): continue #get interface-assigned string (IPath) path", "mellon @interface.implementer(mellon.IByteMellonFile) class MellonByteFileFromFilePathAndConfig(object): def __init__(self, file_path, config): self.file_path =", "file at location {}\".format(self.file_path) def __iter__(self): with open(self.file_path, 'rb') as", "MellonFileProviderForRecursiveDirectoryConfig(object): def __init__(self, config): \"\"\"Init Args: config: sparc.configuration.container.ISparcAppPyContainerConfiguration provider with", "sparc.configuration.container.ISparcAppPyContainerConfiguration provider with mellon.factories.filesystem[configure.yaml:FileSystemDir] and mellon[configure.yaml:MellonSnippet] entries. \"\"\" self.config =", "def __init__(self, config): \"\"\"Init Args: config: sparc.configuration.container.ISparcAppPyContainerConfiguration provider with mellon.factories.filesystem[configure.yaml:FileSystemDir]", "path = os.path.join(d, f) if not os.path.isfile(path): continue #get interface-assigned", "os.path.isfile(path): continue #get interface-assigned string (IPath) path = component.createObject(u'mellon.filesystem_path', path)", "\"byte file at location {}\".format(self.file_path) def __iter__(self): with open(self.file_path, 'rb')", "file_ = component.createObject(u'mellon.unicode_file_from_stream', stream, self.config) for snippet in file_: yield", "= file_path self.config = config def __str__(self): return \"Unicode file", "in os.walk(base_path): for f in files: path = os.path.join(d, f)", "container.IPyContainerConfigValue(self.config).\\ get('FileSystemDir')['directory'] for d, dirs, files in os.walk(base_path): for f", "import interface from zope.component.factory import Factory from sparc.configuration import container", "= config def __str__(self): return \"byte file at location {}\".format(self.file_path)", "snippet in file_: yield snippet mellonUnicodeFileFromFilePathAndConfigFactory = Factory(MellonUnicodeFileFromFilePathAndConfig) @interface.implementer(mellon.IMellonFileProvider) class", "from zope import interface from zope.component.factory import Factory from sparc.configuration", "Factory(MellonByteFileFromFilePathAndConfig) @interface.implementer(mellon.IUnicodeMellonFile) class MellonUnicodeFileFromFilePathAndConfig(object): def __init__(self, file_path, config): self.file_path =", "mellonByteFileFromFilePathAndConfigFactory = Factory(MellonByteFileFromFilePathAndConfig) @interface.implementer(mellon.IUnicodeMellonFile) class MellonUnicodeFileFromFilePathAndConfig(object): def __init__(self, file_path, config):", "@interface.implementer(mellon.IByteMellonFile) class MellonByteFileFromFilePathAndConfig(object): def __init__(self, file_path, config): self.file_path = file_path", "self.config = config def __str__(self): return \"Unicode file at location", "yield component.createObject(\\ u'mellon.factories.filesystem.byte_file', path, self.config) else: yield component.createObject(\\ u'mellon.factories.filesystem.unicode_file', path,", "sparc.configuration import container import mellon @interface.implementer(mellon.IByteMellonFile) class MellonByteFileFromFilePathAndConfig(object): def __init__(self,", "files in os.walk(base_path): for f in files: path = os.path.join(d,", "component.createObject(\\ u'mellon.factories.filesystem.byte_file', path, self.config) else: yield component.createObject(\\ u'mellon.factories.filesystem.unicode_file', path, self.config)", "file_: yield snippet mellonByteFileFromFilePathAndConfigFactory = Factory(MellonByteFileFromFilePathAndConfig) @interface.implementer(mellon.IUnicodeMellonFile) class MellonUnicodeFileFromFilePathAndConfig(object): def", "_buffer = collections.deque() _eof_buffer = collections.deque() with open(str(self.file_path), 'rU') as", "config: sparc.configuration.container.ISparcAppPyContainerConfiguration provider with mellon.factories.filesystem[configure.yaml:FileSystemDir] and mellon[configure.yaml:MellonSnippet] entries. \"\"\" self.config", "mellon[configure.yaml:MellonSnippet] entries. \"\"\" self.config = config def __iter__(self): base_path =", "file_path self.config = config def __str__(self): return \"Unicode file at", "__iter__(self): with open(self.file_path, 'rb') as stream: file_ = component.createObject(u'mellon.byte_file_from_stream', stream,", "from sparc.configuration import container import mellon @interface.implementer(mellon.IByteMellonFile) class MellonByteFileFromFilePathAndConfig(object): def", "os.walk(base_path): for f in files: path = os.path.join(d, f) if", "import container import mellon @interface.implementer(mellon.IByteMellonFile) class MellonByteFileFromFilePathAndConfig(object): def __init__(self, file_path,", "component.createObject(u'mellon.unicode_file_from_stream', stream, self.config) for snippet in file_: yield snippet mellonUnicodeFileFromFilePathAndConfigFactory", "= Factory(MellonUnicodeFileFromFilePathAndConfig) @interface.implementer(mellon.IMellonFileProvider) class MellonFileProviderForRecursiveDirectoryConfig(object): def __init__(self, config): \"\"\"Init Args:", "config def __str__(self): return \"Unicode file at location {}\".format(self.file_path) def", "yield snippet mellonUnicodeFileFromFilePathAndConfigFactory = Factory(MellonUnicodeFileFromFilePathAndConfig) @interface.implementer(mellon.IMellonFileProvider) class MellonFileProviderForRecursiveDirectoryConfig(object): def __init__(self,", "config): self.file_path = file_path self.config = config def __str__(self): return", "= Factory(MellonByteFileFromFilePathAndConfig) @interface.implementer(mellon.IUnicodeMellonFile) class MellonUnicodeFileFromFilePathAndConfig(object): def __init__(self, file_path, config): self.file_path", "\"\"\"Init Args: config: sparc.configuration.container.ISparcAppPyContainerConfiguration provider with mellon.factories.filesystem[configure.yaml:FileSystemDir] and mellon[configure.yaml:MellonSnippet] entries.", "in file_: yield snippet mellonUnicodeFileFromFilePathAndConfigFactory = Factory(MellonUnicodeFileFromFilePathAndConfig) @interface.implementer(mellon.IMellonFileProvider) class MellonFileProviderForRecursiveDirectoryConfig(object):", "for d, dirs, files in os.walk(base_path): for f in files:", "def __str__(self): return \"byte file at location {}\".format(self.file_path) def __iter__(self):", "self.file_path = file_path self.config = config def __str__(self): return \"byte" ]
[ "'.widerface', 'WiderFace') Datasource.register_instance('fgnet', __name__ + '.fgnet', 'FGNet') Datasource.register_instance('Helen', __name__ +", "'.helen', 'Helen') Datasource.register_instance('lfw', __name__ + '.lfw', 'LabeledFacesInTheWild') Datasource.register_instance('ms-celeb-1m', __name__ +", "'LabeledFacesInTheWild') Datasource.register_instance('ms-celeb-1m', __name__ + '.face', 'MSCeleb1M') Datasource.register_instance('5celeb', __name__ + '.fivecelebface',", "__name__ + '.fivecelebface', 'FiveCelebFace') Datasource.register_instance('ffhq', __name__ + '.ffhq', 'FFHQ') Datasource.register_instance('celeba',", "'.face', 'MSCeleb1M') Datasource.register_instance('5celeb', __name__ + '.fivecelebface', 'FiveCelebFace') Datasource.register_instance('ffhq', __name__ +", "+ '.imagenet', 'ImageNet', section='val') # section='train' Datasource.register_instance('dogsandcats', __name__ + '.dogsandcats',", "'DogsAndCats') Datasource.register_instance('widerface', __name__ + '.widerface', 'WiderFace') Datasource.register_instance('fgnet', __name__ + '.fgnet',", "Datasource.register_instance('fgnet', __name__ + '.fgnet', 'FGNet') Datasource.register_instance('Helen', __name__ + '.helen', 'Helen')", "'FiveCelebFace') Datasource.register_instance('ffhq', __name__ + '.ffhq', 'FFHQ') Datasource.register_instance('celeba', __name__ + '.celeba',", "'MSCeleb1M') Datasource.register_instance('5celeb', __name__ + '.fivecelebface', 'FiveCelebFace') Datasource.register_instance('ffhq', __name__ + '.ffhq',", "'Helen') Datasource.register_instance('lfw', __name__ + '.lfw', 'LabeledFacesInTheWild') Datasource.register_instance('ms-celeb-1m', __name__ + '.face',", "__name__ + '.face', 'MSCeleb1M') Datasource.register_instance('5celeb', __name__ + '.fivecelebface', 'FiveCelebFace') Datasource.register_instance('ffhq',", "'.fivecelebface', 'FiveCelebFace') Datasource.register_instance('ffhq', __name__ + '.ffhq', 'FFHQ') Datasource.register_instance('celeba', __name__ +", "'.ffhq', 'FFHQ') Datasource.register_instance('celeba', __name__ + '.celeba', 'CelebA') Datasource.register_instance('celeba-aligned', __name__ +", "Datasource.register_instance('ffhq', __name__ + '.ffhq', 'FFHQ') Datasource.register_instance('celeba', __name__ + '.celeba', 'CelebA')", "+ '.helen', 'Helen') Datasource.register_instance('lfw', __name__ + '.lfw', 'LabeledFacesInTheWild') Datasource.register_instance('ms-celeb-1m', __name__", "<gh_stars>1-10 \"\"\"Predefined Datasources. \"\"\" # toolbox imports from ...datasource import", "__name__ + '.widerface', 'WiderFace') Datasource.register_instance('fgnet', __name__ + '.fgnet', 'FGNet') Datasource.register_instance('Helen',", "Datasource.register_instance('lfw', __name__ + '.lfw', 'LabeledFacesInTheWild') Datasource.register_instance('ms-celeb-1m', __name__ + '.face', 'MSCeleb1M')", "Datasource.register_instance('5celeb', __name__ + '.fivecelebface', 'FiveCelebFace') Datasource.register_instance('ffhq', __name__ + '.ffhq', 'FFHQ')", "__name__ + '.ffhq', 'FFHQ') Datasource.register_instance('celeba', __name__ + '.celeba', 'CelebA') Datasource.register_instance('celeba-aligned',", "import Datasource Datasource.register_instance('imagenet-val', __name__ + '.imagenet', 'ImageNet', section='val') # section='train'", "'FFHQ') Datasource.register_instance('celeba', __name__ + '.celeba', 'CelebA') Datasource.register_instance('celeba-aligned', __name__ + '.celeba',", "+ '.celeba', 'CelebA') Datasource.register_instance('celeba-aligned', __name__ + '.celeba', 'CelebA', aligned=True) Datasource.register_class('WiderFace',", "Datasource.register_instance('Helen', __name__ + '.helen', 'Helen') Datasource.register_instance('lfw', __name__ + '.lfw', 'LabeledFacesInTheWild')", "'.fgnet', 'FGNet') Datasource.register_instance('Helen', __name__ + '.helen', 'Helen') Datasource.register_instance('lfw', __name__ +", "section='val') # section='train' Datasource.register_instance('dogsandcats', __name__ + '.dogsandcats', 'DogsAndCats') Datasource.register_instance('widerface', __name__", "from ...datasource import Datasource Datasource.register_instance('imagenet-val', __name__ + '.imagenet', 'ImageNet', section='val')", "'ImageNet', section='val') # section='train' Datasource.register_instance('dogsandcats', __name__ + '.dogsandcats', 'DogsAndCats') Datasource.register_instance('widerface',", "'.imagenet', 'ImageNet', section='val') # section='train' Datasource.register_instance('dogsandcats', __name__ + '.dogsandcats', 'DogsAndCats')", "Datasource.register_instance('dogsandcats', __name__ + '.dogsandcats', 'DogsAndCats') Datasource.register_instance('widerface', __name__ + '.widerface', 'WiderFace')", "__name__ + '.imagenet', 'ImageNet', section='val') # section='train' Datasource.register_instance('dogsandcats', __name__ +", "Datasources. \"\"\" # toolbox imports from ...datasource import Datasource Datasource.register_instance('imagenet-val',", "Datasource.register_instance('celeba', __name__ + '.celeba', 'CelebA') Datasource.register_instance('celeba-aligned', __name__ + '.celeba', 'CelebA',", "'FGNet') Datasource.register_instance('Helen', __name__ + '.helen', 'Helen') Datasource.register_instance('lfw', __name__ + '.lfw',", "'.dogsandcats', 'DogsAndCats') Datasource.register_instance('widerface', __name__ + '.widerface', 'WiderFace') Datasource.register_instance('fgnet', __name__ +", "__name__ + '.dogsandcats', 'DogsAndCats') Datasource.register_instance('widerface', __name__ + '.widerface', 'WiderFace') Datasource.register_instance('fgnet',", "'WiderFace') Datasource.register_instance('fgnet', __name__ + '.fgnet', 'FGNet') Datasource.register_instance('Helen', __name__ + '.helen',", "__name__ + '.lfw', 'LabeledFacesInTheWild') Datasource.register_instance('ms-celeb-1m', __name__ + '.face', 'MSCeleb1M') Datasource.register_instance('5celeb',", "'.lfw', 'LabeledFacesInTheWild') Datasource.register_instance('ms-celeb-1m', __name__ + '.face', 'MSCeleb1M') Datasource.register_instance('5celeb', __name__ +", "Datasource.register_instance('celeba-aligned', __name__ + '.celeba', 'CelebA', aligned=True) Datasource.register_class('WiderFace', __name__ + '.widerface')", "'CelebA') Datasource.register_instance('celeba-aligned', __name__ + '.celeba', 'CelebA', aligned=True) Datasource.register_class('WiderFace', __name__ +", "+ '.fgnet', 'FGNet') Datasource.register_instance('Helen', __name__ + '.helen', 'Helen') Datasource.register_instance('lfw', __name__", "+ '.lfw', 'LabeledFacesInTheWild') Datasource.register_instance('ms-celeb-1m', __name__ + '.face', 'MSCeleb1M') Datasource.register_instance('5celeb', __name__", "Datasource.register_instance('ms-celeb-1m', __name__ + '.face', 'MSCeleb1M') Datasource.register_instance('5celeb', __name__ + '.fivecelebface', 'FiveCelebFace')", "+ '.ffhq', 'FFHQ') Datasource.register_instance('celeba', __name__ + '.celeba', 'CelebA') Datasource.register_instance('celeba-aligned', __name__", "# toolbox imports from ...datasource import Datasource Datasource.register_instance('imagenet-val', __name__ +", "section='train' Datasource.register_instance('dogsandcats', __name__ + '.dogsandcats', 'DogsAndCats') Datasource.register_instance('widerface', __name__ + '.widerface',", "+ '.fivecelebface', 'FiveCelebFace') Datasource.register_instance('ffhq', __name__ + '.ffhq', 'FFHQ') Datasource.register_instance('celeba', __name__", "toolbox imports from ...datasource import Datasource Datasource.register_instance('imagenet-val', __name__ + '.imagenet',", "__name__ + '.fgnet', 'FGNet') Datasource.register_instance('Helen', __name__ + '.helen', 'Helen') Datasource.register_instance('lfw',", "\"\"\" # toolbox imports from ...datasource import Datasource Datasource.register_instance('imagenet-val', __name__", "# section='train' Datasource.register_instance('dogsandcats', __name__ + '.dogsandcats', 'DogsAndCats') Datasource.register_instance('widerface', __name__ +", "Datasource.register_instance('imagenet-val', __name__ + '.imagenet', 'ImageNet', section='val') # section='train' Datasource.register_instance('dogsandcats', __name__", "__name__ + '.helen', 'Helen') Datasource.register_instance('lfw', __name__ + '.lfw', 'LabeledFacesInTheWild') Datasource.register_instance('ms-celeb-1m',", "__name__ + '.celeba', 'CelebA') Datasource.register_instance('celeba-aligned', __name__ + '.celeba', 'CelebA', aligned=True)", "\"\"\"Predefined Datasources. \"\"\" # toolbox imports from ...datasource import Datasource", "Datasource.register_instance('widerface', __name__ + '.widerface', 'WiderFace') Datasource.register_instance('fgnet', __name__ + '.fgnet', 'FGNet')", "'.celeba', 'CelebA') Datasource.register_instance('celeba-aligned', __name__ + '.celeba', 'CelebA', aligned=True) Datasource.register_class('WiderFace', __name__", "...datasource import Datasource Datasource.register_instance('imagenet-val', __name__ + '.imagenet', 'ImageNet', section='val') #", "+ '.dogsandcats', 'DogsAndCats') Datasource.register_instance('widerface', __name__ + '.widerface', 'WiderFace') Datasource.register_instance('fgnet', __name__", "+ '.face', 'MSCeleb1M') Datasource.register_instance('5celeb', __name__ + '.fivecelebface', 'FiveCelebFace') Datasource.register_instance('ffhq', __name__", "+ '.widerface', 'WiderFace') Datasource.register_instance('fgnet', __name__ + '.fgnet', 'FGNet') Datasource.register_instance('Helen', __name__", "imports from ...datasource import Datasource Datasource.register_instance('imagenet-val', __name__ + '.imagenet', 'ImageNet',", "Datasource Datasource.register_instance('imagenet-val', __name__ + '.imagenet', 'ImageNet', section='val') # section='train' Datasource.register_instance('dogsandcats'," ]
[ "[251, 501, 751, 1001, 1251, 1501, 1751, 2001, 2251, 2501,", "0.9359999999999999, 0.924, 0.944, 0.944, 0.948, 0.888, 0.868, 0.86, 0.888, 0.9,", "0.976, 0.968, 0.952, 0.896, 0.844, 0.86, 0.908, 0.976, 0.948, 0.916,", "2501, 2751, 3001, 3215] dist_whole_align_ref = {'AB048704.1_genotype_C_': [0.88, 0.938, 0.914,", "2001, 2251, 2501, 2751, 3001, 3215] dist_whole_align_ref = {'AB048704.1_genotype_C_': [0.88,", "0.8033200314745574], 'AB010291.1_Bj': [0.9671530980992109, 0.9858456107911616, 0.9438329817983037, 0.9150569322625627, 0.9372918193486423, 0.9630251291666885, 0.9481456308045444, 0.8823622232289046,", "3215] dist_whole_align_ref = {'AB048704.1_genotype_C_': [0.88, 0.938, 0.914, 0.886, 0.89, 0.908,", "0.92, 0.896, 0.888, 0.928, 0.94, 0.96, 0.948, 0.976, 0.976, 0.968,", "0.9858456107911616, 0.9438329817983037, 0.9150569322625627, 0.9372918193486423, 0.9630251291666885, 0.9481456308045444, 0.8823622232289046, 0.9077377632214376, 0.9325670957791264, 0.919398127767968,", "<reponame>babinyurii/RECAN # -*- coding: utf-8 -*- \"\"\" Created on Tue", "0.924, 0.94, 0.96, 0.948, 0.9319999999999999, 0.944, 0.9359999999999999, 0.96, 0.9319999999999999, 0.864,", "Tue Oct 22 15:58:44 2019 @author: babin \"\"\" posits_def =", "0.924, 0.944, 0.944, 0.948, 0.888, 0.868, 0.86, 0.888, 0.9, 0.908,", "0.892, 0.88, 0.844, 0.827906976744186, 0.8608695652173913, 0.9333333333333333], 'AB010291.1_Bj': [0.95, 0.984, 0.988,", "[0.8681719101219889, 0.9351731626008992, 0.9083728156043438, 0.8750271283550077, 0.879929128403318, 0.9015597329057567, 0.9351297624958606, 0.9459250442159328, 0.9459717143364927, 0.8760802380420646,", "1251, 1501, 1751, 2001, 2251, 2501, 2751, 3001, 3215] dist_whole_align_ref", "0.886, 0.89, 0.908, 0.938, 0.948, 0.948, 0.886, 0.852, 0.8580645161290322, 0.827906976744186],", "1001, 1251, 1501, 1751, 2001, 2251, 2501, 2751, 3001, 3215]", "0.8608695652173913, 0.9333333333333333], 'AB010291.1_Bj': [0.95, 0.984, 0.988, 0.984, 0.98, 0.98, 0.98,", "0.9, 0.908, 0.88, 0.916, 0.924, 0.94, 0.96, 0.948, 0.9319999999999999, 0.944,", "0.8580645161290322, 0.827906976744186], 'AB010291.1_Bj': [0.968, 0.986, 0.946, 0.92, 0.94, 0.964, 0.95,", "[0.9671530980992109, 0.9858456107911616, 0.9438329817983037, 0.9150569322625627, 0.9372918193486423, 0.9630251291666885, 0.9481456308045444, 0.8823622232289046, 0.9077377632214376, 0.9325670957791264,", "0.888, 0.928, 0.94, 0.96, 0.948, 0.976, 0.976, 0.968, 0.952, 0.896,", "0.948, 0.976, 0.976, 0.968, 0.952, 0.896, 0.844, 0.86, 0.908, 0.976,", "0.948, 0.886, 0.852, 0.8580645161290322, 0.827906976744186], 'AB010291.1_Bj': [0.968, 0.986, 0.946, 0.92,", "0.988, 0.984, 0.98, 0.98, 0.98, 0.92, 0.896, 0.888, 0.928, 0.94,", "babin \"\"\" posits_def = [251, 501, 751, 1001, 1251, 1501,", "0.852, 0.8580645161290322, 0.827906976744186], 'AB010291.1_Bj': [0.968, 0.986, 0.946, 0.92, 0.94, 0.964,", "0.896, 0.844, 0.86, 0.908, 0.976, 0.948, 0.916, 0.904, 0.9359999999999999, 0.948,", "0.9083728156043438, 0.8750271283550077, 0.879929128403318, 0.9015597329057567, 0.9351297624958606, 0.9459250442159328, 0.9459717143364927, 0.8760802380420646, 0.8343273948904422, 0.841497348083017,", "0.904, 0.9359999999999999, 0.948, 0.94, 0.9359999999999999, 0.9255813953488372, 0.9217391304347826, 0.8666666666666667]} dist_whole_align_def_params_k2p =", "0.892, 0.914, 0.9359999999999999, 0.924, 0.935483870967742, 0.9255813953488372]} dist_win_250_shift_100_ref = {'AB048704.1_genotype_C_': [0.87,", "0.8760802380420646, 0.8343273948904422, 0.841497348083017, 0.8033200314745574], 'AB010291.1_Bj': [0.9671530980992109, 0.9858456107911616, 0.9438329817983037, 0.9150569322625627, 0.9372918193486423,", "0.8343273948904422, 0.841497348083017, 0.8033200314745574], 'AB010291.1_Bj': [0.9671530980992109, 0.9858456107911616, 0.9438329817983037, 0.9150569322625627, 0.9372918193486423, 0.9630251291666885,", "0.92, 0.94, 0.964, 0.95, 0.892, 0.914, 0.9359999999999999, 0.924, 0.935483870967742, 0.9255813953488372]}", "0.914, 0.886, 0.89, 0.908, 0.938, 0.948, 0.948, 0.886, 0.852, 0.8580645161290322,", "0.88, 0.892, 0.88, 0.844, 0.827906976744186, 0.8608695652173913, 0.9333333333333333], 'AB010291.1_Bj': [0.95, 0.984,", "0.952, 0.896, 0.844, 0.86, 0.908, 0.976, 0.948, 0.916, 0.904, 0.9359999999999999,", "'AB010291.1_Bj': [0.9671530980992109, 0.9858456107911616, 0.9438329817983037, 0.9150569322625627, 0.9372918193486423, 0.9630251291666885, 0.9481456308045444, 0.8823622232289046, 0.9077377632214376,", "0.9351297624958606, 0.9459250442159328, 0.9459717143364927, 0.8760802380420646, 0.8343273948904422, 0.841497348083017, 0.8033200314745574], 'AB010291.1_Bj': [0.9671530980992109, 0.9858456107911616,", "0.94, 0.96, 0.948, 0.9319999999999999, 0.944, 0.9359999999999999, 0.96, 0.9319999999999999, 0.864, 0.8200000000000001,", "= {'AB048704.1_genotype_C_': [0.8681719101219889, 0.9351731626008992, 0.9083728156043438, 0.8750271283550077, 0.879929128403318, 0.9015597329057567, 0.9351297624958606, 0.9459250442159328,", "1751, 2001, 2251, 2501, 2751, 3001, 3215] dist_whole_align_ref = {'AB048704.1_genotype_C_':", "coding: utf-8 -*- \"\"\" Created on Tue Oct 22 15:58:44", "0.95, 0.892, 0.914, 0.9359999999999999, 0.924, 0.935483870967742, 0.9255813953488372]} dist_win_250_shift_100_ref = {'AB048704.1_genotype_C_':", "0.879929128403318, 0.9015597329057567, 0.9351297624958606, 0.9459250442159328, 0.9459717143364927, 0.8760802380420646, 0.8343273948904422, 0.841497348083017, 0.8033200314745574], 'AB010291.1_Bj':", "Oct 22 15:58:44 2019 @author: babin \"\"\" posits_def = [251,", "0.944, 0.944, 0.948, 0.888, 0.868, 0.86, 0.888, 0.9, 0.908, 0.88,", "{'AB048704.1_genotype_C_': [0.87, 0.9, 0.9359999999999999, 0.924, 0.944, 0.944, 0.948, 0.888, 0.868,", "0.968, 0.952, 0.896, 0.844, 0.86, 0.908, 0.976, 0.948, 0.916, 0.904,", "0.9359999999999999, 0.96, 0.9319999999999999, 0.864, 0.8200000000000001, 0.88, 0.892, 0.88, 0.844, 0.827906976744186,", "0.841497348083017, 0.8033200314745574], 'AB010291.1_Bj': [0.9671530980992109, 0.9858456107911616, 0.9438329817983037, 0.9150569322625627, 0.9372918193486423, 0.9630251291666885, 0.9481456308045444,", "0.9319999999999999, 0.864, 0.8200000000000001, 0.88, 0.892, 0.88, 0.844, 0.827906976744186, 0.8608695652173913, 0.9333333333333333],", "'AB010291.1_Bj': [0.968, 0.986, 0.946, 0.92, 0.94, 0.964, 0.95, 0.892, 0.914,", "0.948, 0.9319999999999999, 0.944, 0.9359999999999999, 0.96, 0.9319999999999999, 0.864, 0.8200000000000001, 0.88, 0.892,", "dist_whole_align_ref = {'AB048704.1_genotype_C_': [0.88, 0.938, 0.914, 0.886, 0.89, 0.908, 0.938,", "0.984, 0.988, 0.984, 0.98, 0.98, 0.98, 0.92, 0.896, 0.888, 0.928,", "0.9255813953488372]} dist_win_250_shift_100_ref = {'AB048704.1_genotype_C_': [0.87, 0.9, 0.9359999999999999, 0.924, 0.944, 0.944,", "@author: babin \"\"\" posits_def = [251, 501, 751, 1001, 1251,", "0.844, 0.86, 0.908, 0.976, 0.948, 0.916, 0.904, 0.9359999999999999, 0.948, 0.94,", "0.86, 0.908, 0.976, 0.948, 0.916, 0.904, 0.9359999999999999, 0.948, 0.94, 0.9359999999999999,", "= [251, 501, 751, 1001, 1251, 1501, 1751, 2001, 2251,", "{'AB048704.1_genotype_C_': [0.88, 0.938, 0.914, 0.886, 0.89, 0.908, 0.938, 0.948, 0.948,", "0.896, 0.888, 0.928, 0.94, 0.96, 0.948, 0.976, 0.976, 0.968, 0.952,", "0.827906976744186], 'AB010291.1_Bj': [0.968, 0.986, 0.946, 0.92, 0.94, 0.964, 0.95, 0.892,", "2019 @author: babin \"\"\" posits_def = [251, 501, 751, 1001,", "dist_win_250_shift_100_ref = {'AB048704.1_genotype_C_': [0.87, 0.9, 0.9359999999999999, 0.924, 0.944, 0.944, 0.948,", "# -*- coding: utf-8 -*- \"\"\" Created on Tue Oct", "0.98, 0.98, 0.92, 0.896, 0.888, 0.928, 0.94, 0.96, 0.948, 0.976,", "2751, 3001, 3215] dist_whole_align_ref = {'AB048704.1_genotype_C_': [0.88, 0.938, 0.914, 0.886,", "-*- coding: utf-8 -*- \"\"\" Created on Tue Oct 22", "0.868, 0.86, 0.888, 0.9, 0.908, 0.88, 0.916, 0.924, 0.94, 0.96,", "0.94, 0.964, 0.95, 0.892, 0.914, 0.9359999999999999, 0.924, 0.935483870967742, 0.9255813953488372]} dist_win_250_shift_100_ref", "0.96, 0.9319999999999999, 0.864, 0.8200000000000001, 0.88, 0.892, 0.88, 0.844, 0.827906976744186, 0.8608695652173913,", "posits_def = [251, 501, 751, 1001, 1251, 1501, 1751, 2001,", "0.9333333333333333], 'AB010291.1_Bj': [0.95, 0.984, 0.988, 0.984, 0.98, 0.98, 0.98, 0.92,", "0.9, 0.9359999999999999, 0.924, 0.944, 0.944, 0.948, 0.888, 0.868, 0.86, 0.888,", "0.924, 0.935483870967742, 0.9255813953488372]} dist_win_250_shift_100_ref = {'AB048704.1_genotype_C_': [0.87, 0.9, 0.9359999999999999, 0.924,", "0.96, 0.948, 0.976, 0.976, 0.968, 0.952, 0.896, 0.844, 0.86, 0.908,", "0.948, 0.94, 0.9359999999999999, 0.9255813953488372, 0.9217391304347826, 0.8666666666666667]} dist_whole_align_def_params_k2p = {'AB048704.1_genotype_C_': [0.8681719101219889,", "0.944, 0.948, 0.888, 0.868, 0.86, 0.888, 0.9, 0.908, 0.88, 0.916,", "0.916, 0.924, 0.94, 0.96, 0.948, 0.9319999999999999, 0.944, 0.9359999999999999, 0.96, 0.9319999999999999,", "{'AB048704.1_genotype_C_': [0.8681719101219889, 0.9351731626008992, 0.9083728156043438, 0.8750271283550077, 0.879929128403318, 0.9015597329057567, 0.9351297624958606, 0.9459250442159328, 0.9459717143364927,", "0.9459717143364927, 0.8760802380420646, 0.8343273948904422, 0.841497348083017, 0.8033200314745574], 'AB010291.1_Bj': [0.9671530980992109, 0.9858456107911616, 0.9438329817983037, 0.9150569322625627,", "\"\"\" posits_def = [251, 501, 751, 1001, 1251, 1501, 1751,", "15:58:44 2019 @author: babin \"\"\" posits_def = [251, 501, 751,", "[0.87, 0.9, 0.9359999999999999, 0.924, 0.944, 0.944, 0.948, 0.888, 0.868, 0.86,", "-*- \"\"\" Created on Tue Oct 22 15:58:44 2019 @author:", "0.946, 0.92, 0.94, 0.964, 0.95, 0.892, 0.914, 0.9359999999999999, 0.924, 0.935483870967742,", "751, 1001, 1251, 1501, 1751, 2001, 2251, 2501, 2751, 3001,", "0.986, 0.946, 0.92, 0.94, 0.964, 0.95, 0.892, 0.914, 0.9359999999999999, 0.924,", "0.8750271283550077, 0.879929128403318, 0.9015597329057567, 0.9351297624958606, 0.9459250442159328, 0.9459717143364927, 0.8760802380420646, 0.8343273948904422, 0.841497348083017, 0.8033200314745574],", "= {'AB048704.1_genotype_C_': [0.88, 0.938, 0.914, 0.886, 0.89, 0.908, 0.938, 0.948,", "0.98, 0.98, 0.98, 0.92, 0.896, 0.888, 0.928, 0.94, 0.96, 0.948,", "0.886, 0.852, 0.8580645161290322, 0.827906976744186], 'AB010291.1_Bj': [0.968, 0.986, 0.946, 0.92, 0.94,", "0.94, 0.96, 0.948, 0.976, 0.976, 0.968, 0.952, 0.896, 0.844, 0.86,", "0.964, 0.95, 0.892, 0.914, 0.9359999999999999, 0.924, 0.935483870967742, 0.9255813953488372]} dist_win_250_shift_100_ref =", "0.844, 0.827906976744186, 0.8608695652173913, 0.9333333333333333], 'AB010291.1_Bj': [0.95, 0.984, 0.988, 0.984, 0.98,", "0.9438329817983037, 0.9150569322625627, 0.9372918193486423, 0.9630251291666885, 0.9481456308045444, 0.8823622232289046, 0.9077377632214376, 0.9325670957791264, 0.919398127767968, 0.9323907045444492,", "0.948, 0.916, 0.904, 0.9359999999999999, 0.948, 0.94, 0.9359999999999999, 0.9255813953488372, 0.9217391304347826, 0.8666666666666667]}", "0.88, 0.844, 0.827906976744186, 0.8608695652173913, 0.9333333333333333], 'AB010291.1_Bj': [0.95, 0.984, 0.988, 0.984,", "0.938, 0.948, 0.948, 0.886, 0.852, 0.8580645161290322, 0.827906976744186], 'AB010291.1_Bj': [0.968, 0.986,", "0.864, 0.8200000000000001, 0.88, 0.892, 0.88, 0.844, 0.827906976744186, 0.8608695652173913, 0.9333333333333333], 'AB010291.1_Bj':", "0.9359999999999999, 0.948, 0.94, 0.9359999999999999, 0.9255813953488372, 0.9217391304347826, 0.8666666666666667]} dist_whole_align_def_params_k2p = {'AB048704.1_genotype_C_':", "0.948, 0.948, 0.886, 0.852, 0.8580645161290322, 0.827906976744186], 'AB010291.1_Bj': [0.968, 0.986, 0.946,", "0.948, 0.888, 0.868, 0.86, 0.888, 0.9, 0.908, 0.88, 0.916, 0.924,", "0.94, 0.9359999999999999, 0.9255813953488372, 0.9217391304347826, 0.8666666666666667]} dist_whole_align_def_params_k2p = {'AB048704.1_genotype_C_': [0.8681719101219889, 0.9351731626008992,", "22 15:58:44 2019 @author: babin \"\"\" posits_def = [251, 501,", "0.86, 0.888, 0.9, 0.908, 0.88, 0.916, 0.924, 0.94, 0.96, 0.948,", "[0.968, 0.986, 0.946, 0.92, 0.94, 0.964, 0.95, 0.892, 0.914, 0.9359999999999999,", "0.888, 0.868, 0.86, 0.888, 0.9, 0.908, 0.88, 0.916, 0.924, 0.94,", "0.888, 0.9, 0.908, 0.88, 0.916, 0.924, 0.94, 0.96, 0.948, 0.9319999999999999,", "utf-8 -*- \"\"\" Created on Tue Oct 22 15:58:44 2019", "0.88, 0.916, 0.924, 0.94, 0.96, 0.948, 0.9319999999999999, 0.944, 0.9359999999999999, 0.96,", "\"\"\" Created on Tue Oct 22 15:58:44 2019 @author: babin", "0.827906976744186, 0.8608695652173913, 0.9333333333333333], 'AB010291.1_Bj': [0.95, 0.984, 0.988, 0.984, 0.98, 0.98,", "[0.95, 0.984, 0.988, 0.984, 0.98, 0.98, 0.98, 0.92, 0.896, 0.888,", "2251, 2501, 2751, 3001, 3215] dist_whole_align_ref = {'AB048704.1_genotype_C_': [0.88, 0.938,", "0.9351731626008992, 0.9083728156043438, 0.8750271283550077, 0.879929128403318, 0.9015597329057567, 0.9351297624958606, 0.9459250442159328, 0.9459717143364927, 0.8760802380420646, 0.8343273948904422,", "0.908, 0.976, 0.948, 0.916, 0.904, 0.9359999999999999, 0.948, 0.94, 0.9359999999999999, 0.9255813953488372,", "[0.88, 0.938, 0.914, 0.886, 0.89, 0.908, 0.938, 0.948, 0.948, 0.886,", "0.96, 0.948, 0.9319999999999999, 0.944, 0.9359999999999999, 0.96, 0.9319999999999999, 0.864, 0.8200000000000001, 0.88,", "0.9150569322625627, 0.9372918193486423, 0.9630251291666885, 0.9481456308045444, 0.8823622232289046, 0.9077377632214376, 0.9325670957791264, 0.919398127767968, 0.9323907045444492, 0.9211964811945209]}", "0.8200000000000001, 0.88, 0.892, 0.88, 0.844, 0.827906976744186, 0.8608695652173913, 0.9333333333333333], 'AB010291.1_Bj': [0.95,", "0.9217391304347826, 0.8666666666666667]} dist_whole_align_def_params_k2p = {'AB048704.1_genotype_C_': [0.8681719101219889, 0.9351731626008992, 0.9083728156043438, 0.8750271283550077, 0.879929128403318,", "3001, 3215] dist_whole_align_ref = {'AB048704.1_genotype_C_': [0.88, 0.938, 0.914, 0.886, 0.89,", "1501, 1751, 2001, 2251, 2501, 2751, 3001, 3215] dist_whole_align_ref =", "0.935483870967742, 0.9255813953488372]} dist_win_250_shift_100_ref = {'AB048704.1_genotype_C_': [0.87, 0.9, 0.9359999999999999, 0.924, 0.944,", "0.89, 0.908, 0.938, 0.948, 0.948, 0.886, 0.852, 0.8580645161290322, 0.827906976744186], 'AB010291.1_Bj':", "0.908, 0.88, 0.916, 0.924, 0.94, 0.96, 0.948, 0.9319999999999999, 0.944, 0.9359999999999999,", "0.976, 0.976, 0.968, 0.952, 0.896, 0.844, 0.86, 0.908, 0.976, 0.948,", "0.914, 0.9359999999999999, 0.924, 0.935483870967742, 0.9255813953488372]} dist_win_250_shift_100_ref = {'AB048704.1_genotype_C_': [0.87, 0.9,", "0.984, 0.98, 0.98, 0.98, 0.92, 0.896, 0.888, 0.928, 0.94, 0.96,", "0.976, 0.948, 0.916, 0.904, 0.9359999999999999, 0.948, 0.94, 0.9359999999999999, 0.9255813953488372, 0.9217391304347826,", "0.938, 0.914, 0.886, 0.89, 0.908, 0.938, 0.948, 0.948, 0.886, 0.852,", "'AB010291.1_Bj': [0.95, 0.984, 0.988, 0.984, 0.98, 0.98, 0.98, 0.92, 0.896,", "dist_whole_align_def_params_k2p = {'AB048704.1_genotype_C_': [0.8681719101219889, 0.9351731626008992, 0.9083728156043438, 0.8750271283550077, 0.879929128403318, 0.9015597329057567, 0.9351297624958606,", "0.944, 0.9359999999999999, 0.96, 0.9319999999999999, 0.864, 0.8200000000000001, 0.88, 0.892, 0.88, 0.844,", "0.9359999999999999, 0.924, 0.935483870967742, 0.9255813953488372]} dist_win_250_shift_100_ref = {'AB048704.1_genotype_C_': [0.87, 0.9, 0.9359999999999999,", "0.9015597329057567, 0.9351297624958606, 0.9459250442159328, 0.9459717143364927, 0.8760802380420646, 0.8343273948904422, 0.841497348083017, 0.8033200314745574], 'AB010291.1_Bj': [0.9671530980992109,", "0.9319999999999999, 0.944, 0.9359999999999999, 0.96, 0.9319999999999999, 0.864, 0.8200000000000001, 0.88, 0.892, 0.88,", "= {'AB048704.1_genotype_C_': [0.87, 0.9, 0.9359999999999999, 0.924, 0.944, 0.944, 0.948, 0.888,", "0.9359999999999999, 0.9255813953488372, 0.9217391304347826, 0.8666666666666667]} dist_whole_align_def_params_k2p = {'AB048704.1_genotype_C_': [0.8681719101219889, 0.9351731626008992, 0.9083728156043438,", "0.928, 0.94, 0.96, 0.948, 0.976, 0.976, 0.968, 0.952, 0.896, 0.844,", "0.8666666666666667]} dist_whole_align_def_params_k2p = {'AB048704.1_genotype_C_': [0.8681719101219889, 0.9351731626008992, 0.9083728156043438, 0.8750271283550077, 0.879929128403318, 0.9015597329057567,", "Created on Tue Oct 22 15:58:44 2019 @author: babin \"\"\"", "501, 751, 1001, 1251, 1501, 1751, 2001, 2251, 2501, 2751,", "0.9255813953488372, 0.9217391304347826, 0.8666666666666667]} dist_whole_align_def_params_k2p = {'AB048704.1_genotype_C_': [0.8681719101219889, 0.9351731626008992, 0.9083728156043438, 0.8750271283550077,", "0.98, 0.92, 0.896, 0.888, 0.928, 0.94, 0.96, 0.948, 0.976, 0.976,", "0.916, 0.904, 0.9359999999999999, 0.948, 0.94, 0.9359999999999999, 0.9255813953488372, 0.9217391304347826, 0.8666666666666667]} dist_whole_align_def_params_k2p", "0.908, 0.938, 0.948, 0.948, 0.886, 0.852, 0.8580645161290322, 0.827906976744186], 'AB010291.1_Bj': [0.968,", "0.9459250442159328, 0.9459717143364927, 0.8760802380420646, 0.8343273948904422, 0.841497348083017, 0.8033200314745574], 'AB010291.1_Bj': [0.9671530980992109, 0.9858456107911616, 0.9438329817983037,", "on Tue Oct 22 15:58:44 2019 @author: babin \"\"\" posits_def" ]
[ "if(train_per + dev_per + test_per > 1): print \"Train Dev", "X_neg]) y = np.vstack([y_pos, y_neg]) perm = np.random.permutation(nr_examples) self.split =", "linewidth=2) axis.legend() # fig.show() return fig,axis def split_train_dev_test(X,y,train_per,dev_per,test_per): if(train_per +", "Set -- Mean1= (%.2f,%.2f) Var1 = %.2f Mean2= (%.2f,%.2f) Var2=", "= np.nonzero(self.train_y == 0) idx2,_ = np.nonzero(self.train_y == 1) idx3,_", "train_y self.dev_X = dev_X self.dev_y = dev_y self.test_X = test_X", "self.balance, self.split[0],self.split[1],self.split[2]) def get_bayes_optimal(self): params = np.zeros((3,2)) p1 = self.balance", "> 1): print \"Train Dev Test split should sum to", "class self.variance1 = g1[1] # self.variance2 = g2[1] self.balance =", "= train_y self.dev_X = dev_X self.dev_y = dev_y self.test_X =", "Mean2= (%.2f,%.2f) Var2= %.2f \\nNr. Points=%.2f, Balance=%.2f Train-Dev-Test (%.2f,.%.2f,%.2f)\"%(self.mean1[0] ,self.mean1[1],", "dev_y = np.array([]) train_X = X[0:split1,:] dev_X = np.array([]) test_X", "= nr_examples - nr_positive # number of examples of \"negative\"", "split1 = int(dim*train_per) if(dev_per ==0): train_y,test_y = np.vsplit(y,[split1]) dev_y =", "np.random.normal(g1[0][0],g1[1],[nr_positive,1]) X_pos_2 = np.random.normal(g1[0][1],g1[1],[nr_positive,1]) X_pos = np.hstack([X_pos_1,X_pos_2]) X_neg_1 = np.random.normal(g2[0][0],g2[1],[nr_negative,1])", "Train-Dev-Test (%.2f,.%.2f,%.2f)\"%(self.mean1[0] ,self.mean1[1], self.variance1, self.mean2[0], self.mean2[1], self.variance2, self.nr_points, self.balance, self.split[0],self.split[1],self.split[2])", "self.mean1 = g1[0] # mean of positive class self.mean2 =", "import matplotlib.pyplot as plt fig = plt.figure() fig.suptitle(self.get_name()) axis =", "self.mean2[0], self.mean2[1], self.variance2, self.nr_points, self.balance, self.split[0],self.split[1],self.split[2]) def get_bayes_optimal(self): params =", "= np.array([]) test_X = X[split1:,:] else: split2 = int(dim*(train_per+dev_per)) print", "dataset for visualization purposes. The date set contains points from", "bayes_opt_params = self.get_bayes_optimal() self.add_line(fig,axis,bayes_opt_params, \"Bayes Optimal\",\"black\") axis.legend() # fig.show() return", "= y.shape[0] split1 = int(dim*train_per) if(dev_per ==0): train_y,test_y = np.vsplit(y,[split1])", "plot_data(self,params=np.array([]),name=\"Naive Bayes\", print_bayes_opt = True): import matplotlib.pyplot as plt fig", "Dev Test split should sum to one\" return dim =", "y_pos = np.zeros([nr_positive,1],dtype=np.int) y_neg = np.ones([nr_negative,1],dtype=np.int) X = np.vstack([X_pos, X_neg])", "= np.hstack([X_pos_1,X_pos_2]) X_neg_1 = np.random.normal(g2[0][0],g2[1],[nr_negative,1]) X_neg_2 = np.random.normal(g2[0][1],g2[1],[nr_negative,1]) X_neg =", "= int(dim*(train_per+dev_per)) print split2 train_y,dev_y,test_y = np.vsplit(y,(split1,split2)) train_X = X[0:split1,:]", "x_min = np.min(self.train_X) x = np.arange(x_min,x_max,0.1,dtype = \"float\") y_star =", "X_neg = np.hstack([X_neg_1,X_neg_2]) y_pos = np.zeros([nr_positive,1],dtype=np.int) y_neg = np.ones([nr_negative,1],dtype=np.int) X", "# number of examples of \"positive\" class nr_negative = nr_examples", "sum to one\" return dim = y.shape[0] split1 = int(dim*train_per)", "np.ones([nr_negative,1],dtype=np.int) X = np.vstack([X_pos, X_neg]) y = np.vstack([y_pos, y_neg]) perm", "train_y,dev_y,test_y = np.vsplit(y,(split1,split2)) train_X = X[0:split1,:] dev_X = X[split1:split2,:] test_X", "params[1,0] = 1.0/self.variance1 * self.mean1[0] params[2,0] = 1.0/self.variance1 * self.mean1[1]", "with mean u_i and std_i''' def __init__(self,nr_examples=100,g1 = [[-5,-5],1], g2", "g2[0] # mean of negative class self.variance1 = g1[1] #", "as plt fig = plt.figure() fig.suptitle(self.get_name()) axis = fig.add_subplot(1,1,1) idx,_", "test_per > 1): print \"Train Dev Test split should sum", "= np.nonzero(self.train_y == 1) idx3,_ = np.nonzero(self.test_y == 0) idx4,_", "get_name(self): return \"Simple Data Set -- Mean1= (%.2f,%.2f) Var1 =", "= [[5,5],1],balance=0.5,split=[0.8,0,0.2]): nr_positive = nr_examples*balance # number of examples of", "negative class self.variance1 = g1[1] # self.variance2 = g2[1] self.balance", "visualization purposes. The date set contains points from two gaussians", "if(print_bayes_opt): bayes_opt_params = self.get_bayes_optimal() self.add_line(fig,axis,bayes_opt_params, \"Bayes Optimal\",\"black\") axis.legend() # fig.show()", "g2[1] self.balance = balance self.nr_points = nr_examples X_pos_1 = np.random.normal(g1[0][0],g1[1],[nr_positive,1])", "= np.vsplit(y,[split1]) dev_y = np.array([]) train_X = X[0:split1,:] dev_X =", "return fig,axis def split_train_dev_test(X,y,train_per,dev_per,test_per): if(train_per + dev_per + test_per >", "== 0) idx2,_ = np.nonzero(self.train_y == 1) idx3,_ = np.nonzero(self.test_y", "= np.zeros([nr_positive,1],dtype=np.int) y_neg = np.ones([nr_negative,1],dtype=np.int) X = np.vstack([X_pos, X_neg]) y", "axis.scatter(self.train_X[idx,0],self.train_X[idx,1],s=30,c=\"red\",marker='s') axis.scatter(self.train_X[idx2,0],self.train_X[idx2,1],s=30,c=\"blue\",marker='s') if(idx3.shape[0] > 0): axis.scatter(self.test_X[idx3,0],self.test_X[idx3,1],s=30,c=\"red\",marker='o') if(idx4.shape[0] > 0): axis.scatter(self.test_X[idx4,0],self.test_X[idx4,1],s=30,c=\"blue\",marker='o')", "y_star = ((params[1,1]-params[1,0])*x + (params[0,1] - params[0,0]))/(params[2,0] -params[2,1]) axis.plot(x,y_star,'g--',c=colour, label=name,", "Plot Bayes optimal if(print_bayes_opt): bayes_opt_params = self.get_bayes_optimal() self.add_line(fig,axis,bayes_opt_params, \"Bayes Optimal\",\"black\")", "= balance self.nr_points = nr_examples X_pos_1 = np.random.normal(g1[0][0],g1[1],[nr_positive,1]) X_pos_2 =", "0) idx2,_ = np.nonzero(self.train_y == 1) idx3,_ = np.nonzero(self.test_y ==", "fig.add_subplot(1,1,1) idx,_ = np.nonzero(self.train_y == 0) idx2,_ = np.nonzero(self.train_y ==", "1): print \"Train Dev Test split should sum to one\"", "if(dev_per ==0): train_y,test_y = np.vsplit(y,[split1]) dev_y = np.array([]) train_X =", "if(idx4.shape[0] > 0): axis.scatter(self.test_X[idx4,0],self.test_X[idx4,1],s=30,c=\"blue\",marker='o') ## Plot Bayes optimal if(print_bayes_opt): bayes_opt_params", "split_train_dev_test(X,y,train_per,dev_per,test_per): if(train_per + dev_per + test_per > 1): print \"Train", "and \"negative\". # Each class follows a Gaussian distribution. class", "np.vsplit(y,(split1,split2)) train_X = X[0:split1,:] dev_X = X[split1:split2,:] test_X = X[split2:,:]", "np.random.normal(g2[0][1],g2[1],[nr_negative,1]) X_neg = np.hstack([X_neg_1,X_neg_2]) y_pos = np.zeros([nr_positive,1],dtype=np.int) y_neg = np.ones([nr_negative,1],dtype=np.int)", "[[5,5],1],balance=0.5,split=[0.8,0,0.2]): nr_positive = nr_examples*balance # number of examples of \"positive\"", "self.train_X = train_X self.train_y = train_y self.dev_X = dev_X self.dev_y", "> 0): axis.scatter(self.test_X[idx3,0],self.test_X[idx3,1],s=30,c=\"red\",marker='o') if(idx4.shape[0] > 0): axis.scatter(self.test_X[idx4,0],self.test_X[idx4,1],s=30,c=\"blue\",marker='o') ## Plot Bayes", "mean u_i and std_i''' def __init__(self,nr_examples=100,g1 = [[-5,-5],1], g2 =", "def split_train_dev_test(X,y,train_per,dev_per,test_per): if(train_per + dev_per + test_per > 1): print", "X[perm,:] self.y = y[perm] train_y,dev_y,test_y,train_X,dev_X,test_X = split_train_dev_test(self.X,self.y,split[0],split[1],split[2]) self.train_X = train_X", "= split_train_dev_test(self.X,self.y,split[0],split[1],split[2]) self.train_X = train_X self.train_y = train_y self.dev_X =", "Bayes optimal if(print_bayes_opt): bayes_opt_params = self.get_bayes_optimal() self.add_line(fig,axis,bayes_opt_params, \"Bayes Optimal\",\"black\") axis.legend()", "1.0/self.variance2 * self.mean2[1] print params return params def plot_data(self,params=np.array([]),name=\"Naive Bayes\",", "1.0/self.variance1 * self.mean1[0] params[2,0] = 1.0/self.variance1 * self.mean1[1] params[1,1] =", "self.variance2 = g2[1] self.balance = balance self.nr_points = nr_examples X_pos_1", "* self.mean2[0] params[2,1] = 1.0/self.variance2 * self.mean2[1] print params return", "= dev_X self.dev_y = dev_y self.test_X = test_X self.test_y =", "classes, \"positive\" and \"negative\". # Each class follows a Gaussian", "+ dev_per + test_per > 1): print \"Train Dev Test", "X[split1:,:] else: split2 = int(dim*(train_per+dev_per)) print split2 train_y,dev_y,test_y = np.vsplit(y,(split1,split2))", "= 1.0 - self.balance params[0,0] = -1.0/(2.0*self.variance1) * np.dot(self.mean1,self.mean1) +", "= plt.figure() fig.suptitle(self.get_name()) axis = fig.add_subplot(1,1,1) idx,_ = np.nonzero(self.train_y ==", "np.min(self.train_X) x = np.arange(x_min,x_max,0.1,dtype = \"float\") y_star = ((params[1,1]-params[1,0])*x +", "train_y,dev_y,test_y,train_X,dev_X,test_X = split_train_dev_test(self.X,self.y,split[0],split[1],split[2]) self.train_X = train_X self.train_y = train_y self.dev_X", "-1.0/(2.0*self.variance1) * np.dot(self.mean1,self.mean1) + np.log(p1) params[0,1] = -1.0/(2.0*self.variance2) * np.dot(self.mean2,self.mean2)", "split should sum to one\" return dim = y.shape[0] split1", "def plot_data(self,params=np.array([]),name=\"Naive Bayes\", print_bayes_opt = True): import matplotlib.pyplot as plt", "= -1.0/(2.0*self.variance2) * np.dot(self.mean2,self.mean2) + np.log(p2) params[1,0] = 1.0/self.variance1 *", "np.array([]) test_X = X[split1:,:] else: split2 = int(dim*(train_per+dev_per)) print split2", "dev_y self.test_X = test_X self.test_y = test_y def get_name(self): return", "self.balance = balance self.nr_points = nr_examples X_pos_1 = np.random.normal(g1[0][0],g1[1],[nr_positive,1]) X_pos_2", "with two classes, \"positive\" and \"negative\". # Each class follows", "dim = y.shape[0] split1 = int(dim*train_per) if(dev_per ==0): train_y,test_y =", "test_X = X[split1:,:] else: split2 = int(dim*(train_per+dev_per)) print split2 train_y,dev_y,test_y", "True): import matplotlib.pyplot as plt fig = plt.figure() fig.suptitle(self.get_name()) axis", "self.nr_points, self.balance, self.split[0],self.split[1],self.split[2]) def get_bayes_optimal(self): params = np.zeros((3,2)) p1 =", "int(dim*train_per) if(dev_per ==0): train_y,test_y = np.vsplit(y,[split1]) dev_y = np.array([]) train_X", "A simple two dimentional dataset for visualization purposes. The date", "self.variance1 = g1[1] # self.variance2 = g2[1] self.balance = balance", "def add_line(self,fig,axis,params,name,colour): x_max = np.max(self.train_X) x_min = np.min(self.train_X) x =", "self.balance p2 = 1.0 - self.balance params[0,0] = -1.0/(2.0*self.variance1) *", "return dim = y.shape[0] split1 = int(dim*train_per) if(dev_per ==0): train_y,test_y", "axis.plot(x,y_star,'g--',c=colour, label=name, linewidth=2) axis.legend() # fig.show() return fig,axis def split_train_dev_test(X,y,train_per,dev_per,test_per):", "dataset with two classes, \"positive\" and \"negative\". # Each class", "== 1) axis.scatter(self.train_X[idx,0],self.train_X[idx,1],s=30,c=\"red\",marker='s') axis.scatter(self.train_X[idx2,0],self.train_X[idx2,1],s=30,c=\"blue\",marker='s') if(idx3.shape[0] > 0): axis.scatter(self.test_X[idx3,0],self.test_X[idx3,1],s=30,c=\"red\",marker='o') if(idx4.shape[0] >", "print_bayes_opt = True): import matplotlib.pyplot as plt fig = plt.figure()", "0): axis.scatter(self.test_X[idx4,0],self.test_X[idx4,1],s=30,c=\"blue\",marker='o') ## Plot Bayes optimal if(print_bayes_opt): bayes_opt_params = self.get_bayes_optimal()", "np.nonzero(self.train_y == 1) idx3,_ = np.nonzero(self.test_y == 0) idx4,_ =", "* np.dot(self.mean2,self.mean2) + np.log(p2) params[1,0] = 1.0/self.variance1 * self.mean1[0] params[2,0]", "fig.show() return fig,axis def add_line(self,fig,axis,params,name,colour): x_max = np.max(self.train_X) x_min =", "# fig.show() return fig,axis def add_line(self,fig,axis,params,name,colour): x_max = np.max(self.train_X) x_min", "y.shape[0] split1 = int(dim*train_per) if(dev_per ==0): train_y,test_y = np.vsplit(y,[split1]) dev_y", "nr_examples - nr_positive # number of examples of \"negative\" class", "train_X = X[0:split1,:] dev_X = np.array([]) test_X = X[split1:,:] else:", "* self.mean2[1] print params return params def plot_data(self,params=np.array([]),name=\"Naive Bayes\", print_bayes_opt", "a Gaussian distribution. class SimpleDataSet(): ''' A simple two dimentional", "+ np.log(p2) params[1,0] = 1.0/self.variance1 * self.mean1[0] params[2,0] = 1.0/self.variance1", "examples of \"negative\" class self.mean1 = g1[0] # mean of", "+ np.log(p1) params[0,1] = -1.0/(2.0*self.variance2) * np.dot(self.mean2,self.mean2) + np.log(p2) params[1,0]", "self.y = y[perm] train_y,dev_y,test_y,train_X,dev_X,test_X = split_train_dev_test(self.X,self.y,split[0],split[1],split[2]) self.train_X = train_X self.train_y", "(%.2f,%.2f) Var2= %.2f \\nNr. Points=%.2f, Balance=%.2f Train-Dev-Test (%.2f,.%.2f,%.2f)\"%(self.mean1[0] ,self.mean1[1], self.variance1,", "-- Mean1= (%.2f,%.2f) Var1 = %.2f Mean2= (%.2f,%.2f) Var2= %.2f", "class generates a 2D dataset with two classes, \"positive\" and", "= np.max(self.train_X) x_min = np.min(self.train_X) x = np.arange(x_min,x_max,0.1,dtype = \"float\")", "u_i and std_i''' def __init__(self,nr_examples=100,g1 = [[-5,-5],1], g2 = [[5,5],1],balance=0.5,split=[0.8,0,0.2]):", "= np.vsplit(y,(split1,split2)) train_X = X[0:split1,:] dev_X = X[split1:split2,:] test_X =", "g1[1] # self.variance2 = g2[1] self.balance = balance self.nr_points =", "self.variance1, self.mean2[0], self.mean2[1], self.variance2, self.nr_points, self.balance, self.split[0],self.split[1],self.split[2]) def get_bayes_optimal(self): params", "= np.min(self.train_X) x = np.arange(x_min,x_max,0.1,dtype = \"float\") y_star = ((params[1,1]-params[1,0])*x", "two classes, \"positive\" and \"negative\". # Each class follows a", "set contains points from two gaussians with mean u_i and", "self.mean1[1] params[1,1] = 1.0/self.variance2 * self.mean2[0] params[2,1] = 1.0/self.variance2 *", "purposes. The date set contains points from two gaussians with", "(%.2f,%.2f) Var1 = %.2f Mean2= (%.2f,%.2f) Var2= %.2f \\nNr. Points=%.2f,", "idx4,_ = np.nonzero(self.test_y == 1) axis.scatter(self.train_X[idx,0],self.train_X[idx,1],s=30,c=\"red\",marker='s') axis.scatter(self.train_X[idx2,0],self.train_X[idx2,1],s=30,c=\"blue\",marker='s') if(idx3.shape[0] > 0):", "plt.figure() fig.suptitle(self.get_name()) axis = fig.add_subplot(1,1,1) idx,_ = np.nonzero(self.train_y == 0)", "plt fig = plt.figure() fig.suptitle(self.get_name()) axis = fig.add_subplot(1,1,1) idx,_ =", "= X[0:split1,:] dev_X = np.array([]) test_X = X[split1:,:] else: split2", "self.test_y = test_y def get_name(self): return \"Simple Data Set --", "1.0 - self.balance params[0,0] = -1.0/(2.0*self.variance1) * np.dot(self.mean1,self.mean1) + np.log(p1)", "mean of negative class self.variance1 = g1[1] # self.variance2 =", "+ test_per > 1): print \"Train Dev Test split should", "params[0,0]))/(params[2,0] -params[2,1]) axis.plot(x,y_star,'g--',c=colour, label=name, linewidth=2) axis.legend() # fig.show() return fig,axis", "self.split = split self.X = X[perm,:] self.y = y[perm] train_y,dev_y,test_y,train_X,dev_X,test_X", "= X[0:split1,:] dev_X = X[split1:split2,:] test_X = X[split2:,:] return train_y,dev_y,test_y,train_X,dev_X,test_X", "= np.random.normal(g1[0][0],g1[1],[nr_positive,1]) X_pos_2 = np.random.normal(g1[0][1],g1[1],[nr_positive,1]) X_pos = np.hstack([X_pos_1,X_pos_2]) X_neg_1 =", "\"float\") y_star = ((params[1,1]-params[1,0])*x + (params[0,1] - params[0,0]))/(params[2,0] -params[2,1]) axis.plot(x,y_star,'g--',c=colour,", "self.add_line(fig,axis,bayes_opt_params, \"Bayes Optimal\",\"black\") axis.legend() # fig.show() return fig,axis def add_line(self,fig,axis,params,name,colour):", "((params[1,1]-params[1,0])*x + (params[0,1] - params[0,0]))/(params[2,0] -params[2,1]) axis.plot(x,y_star,'g--',c=colour, label=name, linewidth=2) axis.legend()", "of negative class self.variance1 = g1[1] # self.variance2 = g2[1]", "axis.scatter(self.test_X[idx3,0],self.test_X[idx3,1],s=30,c=\"red\",marker='o') if(idx4.shape[0] > 0): axis.scatter(self.test_X[idx4,0],self.test_X[idx4,1],s=30,c=\"blue\",marker='o') ## Plot Bayes optimal if(print_bayes_opt):", "* np.dot(self.mean1,self.mean1) + np.log(p1) params[0,1] = -1.0/(2.0*self.variance2) * np.dot(self.mean2,self.mean2) +", "= np.random.normal(g1[0][1],g1[1],[nr_positive,1]) X_pos = np.hstack([X_pos_1,X_pos_2]) X_neg_1 = np.random.normal(g2[0][0],g2[1],[nr_negative,1]) X_neg_2 =", "train_X self.train_y = train_y self.dev_X = dev_X self.dev_y = dev_y", "= train_X self.train_y = train_y self.dev_X = dev_X self.dev_y =", "Test split should sum to one\" return dim = y.shape[0]", "add_line(self,fig,axis,params,name,colour): x_max = np.max(self.train_X) x_min = np.min(self.train_X) x = np.arange(x_min,x_max,0.1,dtype", "as np # This class generates a 2D dataset with", "= np.ones([nr_negative,1],dtype=np.int) X = np.vstack([X_pos, X_neg]) y = np.vstack([y_pos, y_neg])", "fig = plt.figure() fig.suptitle(self.get_name()) axis = fig.add_subplot(1,1,1) idx,_ = np.nonzero(self.train_y", "gaussians with mean u_i and std_i''' def __init__(self,nr_examples=100,g1 = [[-5,-5],1],", "= g2[1] self.balance = balance self.nr_points = nr_examples X_pos_1 =", "__init__(self,nr_examples=100,g1 = [[-5,-5],1], g2 = [[5,5],1],balance=0.5,split=[0.8,0,0.2]): nr_positive = nr_examples*balance #", "nr_examples*balance # number of examples of \"positive\" class nr_negative =", "= np.random.normal(g2[0][0],g2[1],[nr_negative,1]) X_neg_2 = np.random.normal(g2[0][1],g2[1],[nr_negative,1]) X_neg = np.hstack([X_neg_1,X_neg_2]) y_pos =", "Var2= %.2f \\nNr. Points=%.2f, Balance=%.2f Train-Dev-Test (%.2f,.%.2f,%.2f)\"%(self.mean1[0] ,self.mean1[1], self.variance1, self.mean2[0],", "\"Bayes Optimal\",\"black\") axis.legend() # fig.show() return fig,axis def add_line(self,fig,axis,params,name,colour): x_max", "perm = np.random.permutation(nr_examples) self.split = split self.X = X[perm,:] self.y", "class self.mean2 = g2[0] # mean of negative class self.variance1", "split self.X = X[perm,:] self.y = y[perm] train_y,dev_y,test_y,train_X,dev_X,test_X = split_train_dev_test(self.X,self.y,split[0],split[1],split[2])", "= np.vstack([X_pos, X_neg]) y = np.vstack([y_pos, y_neg]) perm = np.random.permutation(nr_examples)", "1.0/self.variance2 * self.mean2[0] params[2,1] = 1.0/self.variance2 * self.mean2[1] print params", "np.dot(self.mean2,self.mean2) + np.log(p2) params[1,0] = 1.0/self.variance1 * self.mean1[0] params[2,0] =", "of \"negative\" class self.mean1 = g1[0] # mean of positive", "np.nonzero(self.test_y == 0) idx4,_ = np.nonzero(self.test_y == 1) axis.scatter(self.train_X[idx,0],self.train_X[idx,1],s=30,c=\"red\",marker='s') axis.scatter(self.train_X[idx2,0],self.train_X[idx2,1],s=30,c=\"blue\",marker='s')", "Balance=%.2f Train-Dev-Test (%.2f,.%.2f,%.2f)\"%(self.mean1[0] ,self.mean1[1], self.variance1, self.mean2[0], self.mean2[1], self.variance2, self.nr_points, self.balance,", "= split self.X = X[perm,:] self.y = y[perm] train_y,dev_y,test_y,train_X,dev_X,test_X =", "points from two gaussians with mean u_i and std_i''' def", "number of examples of \"negative\" class self.mean1 = g1[0] #", "x_max = np.max(self.train_X) x_min = np.min(self.train_X) x = np.arange(x_min,x_max,0.1,dtype =", "return \"Simple Data Set -- Mean1= (%.2f,%.2f) Var1 = %.2f", "X_neg_2 = np.random.normal(g2[0][1],g2[1],[nr_negative,1]) X_neg = np.hstack([X_neg_1,X_neg_2]) y_pos = np.zeros([nr_positive,1],dtype=np.int) y_neg", "fig.suptitle(self.get_name()) axis = fig.add_subplot(1,1,1) idx,_ = np.nonzero(self.train_y == 0) idx2,_", "1.0/self.variance1 * self.mean1[1] params[1,1] = 1.0/self.variance2 * self.mean2[0] params[2,1] =", "This class generates a 2D dataset with two classes, \"positive\"", "simple two dimentional dataset for visualization purposes. The date set", "= %.2f Mean2= (%.2f,%.2f) Var2= %.2f \\nNr. Points=%.2f, Balance=%.2f Train-Dev-Test", "= self.get_bayes_optimal() self.add_line(fig,axis,bayes_opt_params, \"Bayes Optimal\",\"black\") axis.legend() # fig.show() return fig,axis", "= np.array([]) train_X = X[0:split1,:] dev_X = np.array([]) test_X =", "class SimpleDataSet(): ''' A simple two dimentional dataset for visualization", "from two gaussians with mean u_i and std_i''' def __init__(self,nr_examples=100,g1", "axis.legend() # fig.show() return fig,axis def add_line(self,fig,axis,params,name,colour): x_max = np.max(self.train_X)", "def get_name(self): return \"Simple Data Set -- Mean1= (%.2f,%.2f) Var1", "if(idx3.shape[0] > 0): axis.scatter(self.test_X[idx3,0],self.test_X[idx3,1],s=30,c=\"red\",marker='o') if(idx4.shape[0] > 0): axis.scatter(self.test_X[idx4,0],self.test_X[idx4,1],s=30,c=\"blue\",marker='o') ## Plot", "np.random.normal(g2[0][0],g2[1],[nr_negative,1]) X_neg_2 = np.random.normal(g2[0][1],g2[1],[nr_negative,1]) X_neg = np.hstack([X_neg_1,X_neg_2]) y_pos = np.zeros([nr_positive,1],dtype=np.int)", "np.random.permutation(nr_examples) self.split = split self.X = X[perm,:] self.y = y[perm]", "two dimentional dataset for visualization purposes. The date set contains", "of \"positive\" class nr_negative = nr_examples - nr_positive # number", "np # This class generates a 2D dataset with two", "Mean1= (%.2f,%.2f) Var1 = %.2f Mean2= (%.2f,%.2f) Var2= %.2f \\nNr.", "0): axis.scatter(self.test_X[idx3,0],self.test_X[idx3,1],s=30,c=\"red\",marker='o') if(idx4.shape[0] > 0): axis.scatter(self.test_X[idx4,0],self.test_X[idx4,1],s=30,c=\"blue\",marker='o') ## Plot Bayes optimal", "split2 train_y,dev_y,test_y = np.vsplit(y,(split1,split2)) train_X = X[0:split1,:] dev_X = X[split1:split2,:]", "date set contains points from two gaussians with mean u_i", "# fig.show() return fig,axis def split_train_dev_test(X,y,train_per,dev_per,test_per): if(train_per + dev_per +", "[[-5,-5],1], g2 = [[5,5],1],balance=0.5,split=[0.8,0,0.2]): nr_positive = nr_examples*balance # number of", "self.mean2[1], self.variance2, self.nr_points, self.balance, self.split[0],self.split[1],self.split[2]) def get_bayes_optimal(self): params = np.zeros((3,2))", "idx2,_ = np.nonzero(self.train_y == 1) idx3,_ = np.nonzero(self.test_y == 0)", "np.zeros([nr_positive,1],dtype=np.int) y_neg = np.ones([nr_negative,1],dtype=np.int) X = np.vstack([X_pos, X_neg]) y =", "np.nonzero(self.test_y == 1) axis.scatter(self.train_X[idx,0],self.train_X[idx,1],s=30,c=\"red\",marker='s') axis.scatter(self.train_X[idx2,0],self.train_X[idx2,1],s=30,c=\"blue\",marker='s') if(idx3.shape[0] > 0): axis.scatter(self.test_X[idx3,0],self.test_X[idx3,1],s=30,c=\"red\",marker='o') if(idx4.shape[0]", "X_pos = np.hstack([X_pos_1,X_pos_2]) X_neg_1 = np.random.normal(g2[0][0],g2[1],[nr_negative,1]) X_neg_2 = np.random.normal(g2[0][1],g2[1],[nr_negative,1]) X_neg", "= True): import matplotlib.pyplot as plt fig = plt.figure() fig.suptitle(self.get_name())", ",self.mean1[1], self.variance1, self.mean2[0], self.mean2[1], self.variance2, self.nr_points, self.balance, self.split[0],self.split[1],self.split[2]) def get_bayes_optimal(self):", "1) axis.scatter(self.train_X[idx,0],self.train_X[idx,1],s=30,c=\"red\",marker='s') axis.scatter(self.train_X[idx2,0],self.train_X[idx2,1],s=30,c=\"blue\",marker='s') if(idx3.shape[0] > 0): axis.scatter(self.test_X[idx3,0],self.test_X[idx3,1],s=30,c=\"red\",marker='o') if(idx4.shape[0] > 0):", "dev_X self.dev_y = dev_y self.test_X = test_X self.test_y = test_y", "+ (params[0,1] - params[0,0]))/(params[2,0] -params[2,1]) axis.plot(x,y_star,'g--',c=colour, label=name, linewidth=2) axis.legend() #", "y[perm] train_y,dev_y,test_y,train_X,dev_X,test_X = split_train_dev_test(self.X,self.y,split[0],split[1],split[2]) self.train_X = train_X self.train_y = train_y", "dimentional dataset for visualization purposes. The date set contains points", "\\nNr. Points=%.2f, Balance=%.2f Train-Dev-Test (%.2f,.%.2f,%.2f)\"%(self.mean1[0] ,self.mean1[1], self.variance1, self.mean2[0], self.mean2[1], self.variance2,", "- params[0,0]))/(params[2,0] -params[2,1]) axis.plot(x,y_star,'g--',c=colour, label=name, linewidth=2) axis.legend() # fig.show() return", "self.X = X[perm,:] self.y = y[perm] train_y,dev_y,test_y,train_X,dev_X,test_X = split_train_dev_test(self.X,self.y,split[0],split[1],split[2]) self.train_X", "test_X self.test_y = test_y def get_name(self): return \"Simple Data Set", "= nr_examples X_pos_1 = np.random.normal(g1[0][0],g1[1],[nr_positive,1]) X_pos_2 = np.random.normal(g1[0][1],g1[1],[nr_positive,1]) X_pos =", "= np.vstack([y_pos, y_neg]) perm = np.random.permutation(nr_examples) self.split = split self.X", "= nr_examples*balance # number of examples of \"positive\" class nr_negative", "print \"Train Dev Test split should sum to one\" return", "Each class follows a Gaussian distribution. class SimpleDataSet(): ''' A", "params = np.zeros((3,2)) p1 = self.balance p2 = 1.0 -", "axis.scatter(self.train_X[idx2,0],self.train_X[idx2,1],s=30,c=\"blue\",marker='s') if(idx3.shape[0] > 0): axis.scatter(self.test_X[idx3,0],self.test_X[idx3,1],s=30,c=\"red\",marker='o') if(idx4.shape[0] > 0): axis.scatter(self.test_X[idx4,0],self.test_X[idx4,1],s=30,c=\"blue\",marker='o') ##", "of positive class self.mean2 = g2[0] # mean of negative", "\"Simple Data Set -- Mean1= (%.2f,%.2f) Var1 = %.2f Mean2=", "self.test_X = test_X self.test_y = test_y def get_name(self): return \"Simple", "\"positive\" and \"negative\". # Each class follows a Gaussian distribution.", "X_neg_1 = np.random.normal(g2[0][0],g2[1],[nr_negative,1]) X_neg_2 = np.random.normal(g2[0][1],g2[1],[nr_negative,1]) X_neg = np.hstack([X_neg_1,X_neg_2]) y_pos", "np.log(p1) params[0,1] = -1.0/(2.0*self.variance2) * np.dot(self.mean2,self.mean2) + np.log(p2) params[1,0] =", "= 1.0/self.variance1 * self.mean1[0] params[2,0] = 1.0/self.variance1 * self.mean1[1] params[1,1]", "class follows a Gaussian distribution. class SimpleDataSet(): ''' A simple", "Optimal\",\"black\") axis.legend() # fig.show() return fig,axis def add_line(self,fig,axis,params,name,colour): x_max =", "follows a Gaussian distribution. class SimpleDataSet(): ''' A simple two", "class nr_negative = nr_examples - nr_positive # number of examples", "a 2D dataset with two classes, \"positive\" and \"negative\". #", "dev_per + test_per > 1): print \"Train Dev Test split", "= test_y def get_name(self): return \"Simple Data Set -- Mean1=", "Data Set -- Mean1= (%.2f,%.2f) Var1 = %.2f Mean2= (%.2f,%.2f)", "= fig.add_subplot(1,1,1) idx,_ = np.nonzero(self.train_y == 0) idx2,_ = np.nonzero(self.train_y", "X_pos_1 = np.random.normal(g1[0][0],g1[1],[nr_positive,1]) X_pos_2 = np.random.normal(g1[0][1],g1[1],[nr_positive,1]) X_pos = np.hstack([X_pos_1,X_pos_2]) X_neg_1", "axis.legend() # fig.show() return fig,axis def split_train_dev_test(X,y,train_per,dev_per,test_per): if(train_per + dev_per", "X = np.vstack([X_pos, X_neg]) y = np.vstack([y_pos, y_neg]) perm =", "# Each class follows a Gaussian distribution. class SimpleDataSet(): '''", "np.array([]) train_X = X[0:split1,:] dev_X = np.array([]) test_X = X[split1:,:]", "* self.mean1[0] params[2,0] = 1.0/self.variance1 * self.mean1[1] params[1,1] = 1.0/self.variance2", "one\" return dim = y.shape[0] split1 = int(dim*train_per) if(dev_per ==0):", "np.vsplit(y,[split1]) dev_y = np.array([]) train_X = X[0:split1,:] dev_X = np.array([])", "== 1) idx3,_ = np.nonzero(self.test_y == 0) idx4,_ = np.nonzero(self.test_y", "self.mean1[0] params[2,0] = 1.0/self.variance1 * self.mean1[1] params[1,1] = 1.0/self.variance2 *", "## Plot Bayes optimal if(print_bayes_opt): bayes_opt_params = self.get_bayes_optimal() self.add_line(fig,axis,bayes_opt_params, \"Bayes", "# self.variance2 = g2[1] self.balance = balance self.nr_points = nr_examples", "nr_negative = nr_examples - nr_positive # number of examples of", "np.dot(self.mean1,self.mean1) + np.log(p1) params[0,1] = -1.0/(2.0*self.variance2) * np.dot(self.mean2,self.mean2) + np.log(p2)", "for visualization purposes. The date set contains points from two", "contains points from two gaussians with mean u_i and std_i'''", "# number of examples of \"negative\" class self.mean1 = g1[0]", "numpy as np # This class generates a 2D dataset", "> 0): axis.scatter(self.test_X[idx4,0],self.test_X[idx4,1],s=30,c=\"blue\",marker='o') ## Plot Bayes optimal if(print_bayes_opt): bayes_opt_params =", "= 1.0/self.variance2 * self.mean2[1] print params return params def plot_data(self,params=np.array([]),name=\"Naive", "%.2f Mean2= (%.2f,%.2f) Var2= %.2f \\nNr. Points=%.2f, Balance=%.2f Train-Dev-Test (%.2f,.%.2f,%.2f)\"%(self.mean1[0]", "y_neg]) perm = np.random.permutation(nr_examples) self.split = split self.X = X[perm,:]", "= np.nonzero(self.test_y == 0) idx4,_ = np.nonzero(self.test_y == 1) axis.scatter(self.train_X[idx,0],self.train_X[idx,1],s=30,c=\"red\",marker='s')", "params def plot_data(self,params=np.array([]),name=\"Naive Bayes\", print_bayes_opt = True): import matplotlib.pyplot as", "to one\" return dim = y.shape[0] split1 = int(dim*train_per) if(dev_per", "%.2f \\nNr. Points=%.2f, Balance=%.2f Train-Dev-Test (%.2f,.%.2f,%.2f)\"%(self.mean1[0] ,self.mean1[1], self.variance1, self.mean2[0], self.mean2[1],", "\"positive\" class nr_negative = nr_examples - nr_positive # number of", "np.hstack([X_pos_1,X_pos_2]) X_neg_1 = np.random.normal(g2[0][0],g2[1],[nr_negative,1]) X_neg_2 = np.random.normal(g2[0][1],g2[1],[nr_negative,1]) X_neg = np.hstack([X_neg_1,X_neg_2])", "# mean of positive class self.mean2 = g2[0] # mean", "(%.2f,.%.2f,%.2f)\"%(self.mean1[0] ,self.mean1[1], self.variance1, self.mean2[0], self.mean2[1], self.variance2, self.nr_points, self.balance, self.split[0],self.split[1],self.split[2]) def", "params[2,1] = 1.0/self.variance2 * self.mean2[1] print params return params def", "fig,axis def add_line(self,fig,axis,params,name,colour): x_max = np.max(self.train_X) x_min = np.min(self.train_X) x", "X_pos_2 = np.random.normal(g1[0][1],g1[1],[nr_positive,1]) X_pos = np.hstack([X_pos_1,X_pos_2]) X_neg_1 = np.random.normal(g2[0][0],g2[1],[nr_negative,1]) X_neg_2", "np.log(p2) params[1,0] = 1.0/self.variance1 * self.mean1[0] params[2,0] = 1.0/self.variance1 *", "0) idx4,_ = np.nonzero(self.test_y == 1) axis.scatter(self.train_X[idx,0],self.train_X[idx,1],s=30,c=\"red\",marker='s') axis.scatter(self.train_X[idx2,0],self.train_X[idx2,1],s=30,c=\"blue\",marker='s') if(idx3.shape[0] >", "print params return params def plot_data(self,params=np.array([]),name=\"Naive Bayes\", print_bayes_opt = True):", "return fig,axis def add_line(self,fig,axis,params,name,colour): x_max = np.max(self.train_X) x_min = np.min(self.train_X)", "SimpleDataSet(): ''' A simple two dimentional dataset for visualization purposes.", "self.nr_points = nr_examples X_pos_1 = np.random.normal(g1[0][0],g1[1],[nr_positive,1]) X_pos_2 = np.random.normal(g1[0][1],g1[1],[nr_positive,1]) X_pos", "nr_examples X_pos_1 = np.random.normal(g1[0][0],g1[1],[nr_positive,1]) X_pos_2 = np.random.normal(g1[0][1],g1[1],[nr_positive,1]) X_pos = np.hstack([X_pos_1,X_pos_2])", "label=name, linewidth=2) axis.legend() # fig.show() return fig,axis def split_train_dev_test(X,y,train_per,dev_per,test_per): if(train_per", "np.zeros((3,2)) p1 = self.balance p2 = 1.0 - self.balance params[0,0]", "import numpy as np # This class generates a 2D", "generates a 2D dataset with two classes, \"positive\" and \"negative\".", "self.dev_X = dev_X self.dev_y = dev_y self.test_X = test_X self.test_y", "idx,_ = np.nonzero(self.train_y == 0) idx2,_ = np.nonzero(self.train_y == 1)", "split2 = int(dim*(train_per+dev_per)) print split2 train_y,dev_y,test_y = np.vsplit(y,(split1,split2)) train_X =", "return params def plot_data(self,params=np.array([]),name=\"Naive Bayes\", print_bayes_opt = True): import matplotlib.pyplot", "self.mean2[0] params[2,1] = 1.0/self.variance2 * self.mean2[1] print params return params", "else: split2 = int(dim*(train_per+dev_per)) print split2 train_y,dev_y,test_y = np.vsplit(y,(split1,split2)) train_X", "y_neg = np.ones([nr_negative,1],dtype=np.int) X = np.vstack([X_pos, X_neg]) y = np.vstack([y_pos,", "-params[2,1]) axis.plot(x,y_star,'g--',c=colour, label=name, linewidth=2) axis.legend() # fig.show() return fig,axis def", "self.variance2, self.nr_points, self.balance, self.split[0],self.split[1],self.split[2]) def get_bayes_optimal(self): params = np.zeros((3,2)) p1", "= g2[0] # mean of negative class self.variance1 = g1[1]", "params[0,1] = -1.0/(2.0*self.variance2) * np.dot(self.mean2,self.mean2) + np.log(p2) params[1,0] = 1.0/self.variance1", "idx3,_ = np.nonzero(self.test_y == 0) idx4,_ = np.nonzero(self.test_y == 1)", "np.arange(x_min,x_max,0.1,dtype = \"float\") y_star = ((params[1,1]-params[1,0])*x + (params[0,1] - params[0,0]))/(params[2,0]", "np.nonzero(self.train_y == 0) idx2,_ = np.nonzero(self.train_y == 1) idx3,_ =", "= X[perm,:] self.y = y[perm] train_y,dev_y,test_y,train_X,dev_X,test_X = split_train_dev_test(self.X,self.y,split[0],split[1],split[2]) self.train_X =", "# mean of negative class self.variance1 = g1[1] # self.variance2", "def get_bayes_optimal(self): params = np.zeros((3,2)) p1 = self.balance p2 =", "two gaussians with mean u_i and std_i''' def __init__(self,nr_examples=100,g1 =", "= 1.0/self.variance2 * self.mean2[0] params[2,1] = 1.0/self.variance2 * self.mean2[1] print", "y = np.vstack([y_pos, y_neg]) perm = np.random.permutation(nr_examples) self.split = split", "examples of \"positive\" class nr_negative = nr_examples - nr_positive #", "get_bayes_optimal(self): params = np.zeros((3,2)) p1 = self.balance p2 = 1.0", "== 0) idx4,_ = np.nonzero(self.test_y == 1) axis.scatter(self.train_X[idx,0],self.train_X[idx,1],s=30,c=\"red\",marker='s') axis.scatter(self.train_X[idx2,0],self.train_X[idx2,1],s=30,c=\"blue\",marker='s') if(idx3.shape[0]", "- nr_positive # number of examples of \"negative\" class self.mean1", "self.get_bayes_optimal() self.add_line(fig,axis,bayes_opt_params, \"Bayes Optimal\",\"black\") axis.legend() # fig.show() return fig,axis def", "X[0:split1,:] dev_X = np.array([]) test_X = X[split1:,:] else: split2 =", "train_y,test_y = np.vsplit(y,[split1]) dev_y = np.array([]) train_X = X[0:split1,:] dev_X", "dev_X = np.array([]) test_X = X[split1:,:] else: split2 = int(dim*(train_per+dev_per))", "-1.0/(2.0*self.variance2) * np.dot(self.mean2,self.mean2) + np.log(p2) params[1,0] = 1.0/self.variance1 * self.mean1[0]", "mean of positive class self.mean2 = g2[0] # mean of", "= g1[1] # self.variance2 = g2[1] self.balance = balance self.nr_points", "= int(dim*train_per) if(dev_per ==0): train_y,test_y = np.vsplit(y,[split1]) dev_y = np.array([])", "fig,axis def split_train_dev_test(X,y,train_per,dev_per,test_per): if(train_per + dev_per + test_per > 1):", "2D dataset with two classes, \"positive\" and \"negative\". # Each", "positive class self.mean2 = g2[0] # mean of negative class", "self.balance params[0,0] = -1.0/(2.0*self.variance1) * np.dot(self.mean1,self.mean1) + np.log(p1) params[0,1] =", "= dev_y self.test_X = test_X self.test_y = test_y def get_name(self):", "''' A simple two dimentional dataset for visualization purposes. The", "= np.random.permutation(nr_examples) self.split = split self.X = X[perm,:] self.y =", "print split2 train_y,dev_y,test_y = np.vsplit(y,(split1,split2)) train_X = X[0:split1,:] dev_X =", "= y[perm] train_y,dev_y,test_y,train_X,dev_X,test_X = split_train_dev_test(self.X,self.y,split[0],split[1],split[2]) self.train_X = train_X self.train_y =", "train_X = X[0:split1,:] dev_X = X[split1:split2,:] test_X = X[split2:,:] return", "# This class generates a 2D dataset with two classes,", "* self.mean1[1] params[1,1] = 1.0/self.variance2 * self.mean2[0] params[2,1] = 1.0/self.variance2", "= np.hstack([X_neg_1,X_neg_2]) y_pos = np.zeros([nr_positive,1],dtype=np.int) y_neg = np.ones([nr_negative,1],dtype=np.int) X =", "= \"float\") y_star = ((params[1,1]-params[1,0])*x + (params[0,1] - params[0,0]))/(params[2,0] -params[2,1])", "= -1.0/(2.0*self.variance1) * np.dot(self.mean1,self.mean1) + np.log(p1) params[0,1] = -1.0/(2.0*self.variance2) *", "The date set contains points from two gaussians with mean", "= 1.0/self.variance1 * self.mean1[1] params[1,1] = 1.0/self.variance2 * self.mean2[0] params[2,1]", "balance self.nr_points = nr_examples X_pos_1 = np.random.normal(g1[0][0],g1[1],[nr_positive,1]) X_pos_2 = np.random.normal(g1[0][1],g1[1],[nr_positive,1])", "Gaussian distribution. class SimpleDataSet(): ''' A simple two dimentional dataset", "should sum to one\" return dim = y.shape[0] split1 =", "1) idx3,_ = np.nonzero(self.test_y == 0) idx4,_ = np.nonzero(self.test_y ==", "Bayes\", print_bayes_opt = True): import matplotlib.pyplot as plt fig =", "of examples of \"negative\" class self.mean1 = g1[0] # mean", "g1[0] # mean of positive class self.mean2 = g2[0] #", "p2 = 1.0 - self.balance params[0,0] = -1.0/(2.0*self.variance1) * np.dot(self.mean1,self.mean1)", "x = np.arange(x_min,x_max,0.1,dtype = \"float\") y_star = ((params[1,1]-params[1,0])*x + (params[0,1]", "params[0,0] = -1.0/(2.0*self.variance1) * np.dot(self.mean1,self.mean1) + np.log(p1) params[0,1] = -1.0/(2.0*self.variance2)", "optimal if(print_bayes_opt): bayes_opt_params = self.get_bayes_optimal() self.add_line(fig,axis,bayes_opt_params, \"Bayes Optimal\",\"black\") axis.legend() #", "np.max(self.train_X) x_min = np.min(self.train_X) x = np.arange(x_min,x_max,0.1,dtype = \"float\") y_star", "matplotlib.pyplot as plt fig = plt.figure() fig.suptitle(self.get_name()) axis = fig.add_subplot(1,1,1)", "axis = fig.add_subplot(1,1,1) idx,_ = np.nonzero(self.train_y == 0) idx2,_ =", "= np.random.normal(g2[0][1],g2[1],[nr_negative,1]) X_neg = np.hstack([X_neg_1,X_neg_2]) y_pos = np.zeros([nr_positive,1],dtype=np.int) y_neg =", "p1 = self.balance p2 = 1.0 - self.balance params[0,0] =", "= np.arange(x_min,x_max,0.1,dtype = \"float\") y_star = ((params[1,1]-params[1,0])*x + (params[0,1] -", "==0): train_y,test_y = np.vsplit(y,[split1]) dev_y = np.array([]) train_X = X[0:split1,:]", "fig.show() return fig,axis def split_train_dev_test(X,y,train_per,dev_per,test_per): if(train_per + dev_per + test_per", "int(dim*(train_per+dev_per)) print split2 train_y,dev_y,test_y = np.vsplit(y,(split1,split2)) train_X = X[0:split1,:] dev_X", "\"negative\" class self.mean1 = g1[0] # mean of positive class", "np.vstack([X_pos, X_neg]) y = np.vstack([y_pos, y_neg]) perm = np.random.permutation(nr_examples) self.split", "number of examples of \"positive\" class nr_negative = nr_examples -", "self.mean2[1] print params return params def plot_data(self,params=np.array([]),name=\"Naive Bayes\", print_bayes_opt =", "and std_i''' def __init__(self,nr_examples=100,g1 = [[-5,-5],1], g2 = [[5,5],1],balance=0.5,split=[0.8,0,0.2]): nr_positive", "params[2,0] = 1.0/self.variance1 * self.mean1[1] params[1,1] = 1.0/self.variance2 * self.mean2[0]", "self.split[0],self.split[1],self.split[2]) def get_bayes_optimal(self): params = np.zeros((3,2)) p1 = self.balance p2", "= test_X self.test_y = test_y def get_name(self): return \"Simple Data", "= [[-5,-5],1], g2 = [[5,5],1],balance=0.5,split=[0.8,0,0.2]): nr_positive = nr_examples*balance # number", "\"Train Dev Test split should sum to one\" return dim", "nr_positive = nr_examples*balance # number of examples of \"positive\" class", "= np.nonzero(self.test_y == 1) axis.scatter(self.train_X[idx,0],self.train_X[idx,1],s=30,c=\"red\",marker='s') axis.scatter(self.train_X[idx2,0],self.train_X[idx2,1],s=30,c=\"blue\",marker='s') if(idx3.shape[0] > 0): axis.scatter(self.test_X[idx3,0],self.test_X[idx3,1],s=30,c=\"red\",marker='o')", "- self.balance params[0,0] = -1.0/(2.0*self.variance1) * np.dot(self.mean1,self.mean1) + np.log(p1) params[0,1]", "Var1 = %.2f Mean2= (%.2f,%.2f) Var2= %.2f \\nNr. Points=%.2f, Balance=%.2f", "distribution. class SimpleDataSet(): ''' A simple two dimentional dataset for", "class self.mean1 = g1[0] # mean of positive class self.mean2", "self.mean2 = g2[0] # mean of negative class self.variance1 =", "of examples of \"positive\" class nr_negative = nr_examples - nr_positive", "params[1,1] = 1.0/self.variance2 * self.mean2[0] params[2,1] = 1.0/self.variance2 * self.mean2[1]", "= X[split1:,:] else: split2 = int(dim*(train_per+dev_per)) print split2 train_y,dev_y,test_y =", "self.train_y = train_y self.dev_X = dev_X self.dev_y = dev_y self.test_X", "np.random.normal(g1[0][1],g1[1],[nr_positive,1]) X_pos = np.hstack([X_pos_1,X_pos_2]) X_neg_1 = np.random.normal(g2[0][0],g2[1],[nr_negative,1]) X_neg_2 = np.random.normal(g2[0][1],g2[1],[nr_negative,1])", "nr_positive # number of examples of \"negative\" class self.mean1 =", "np.hstack([X_neg_1,X_neg_2]) y_pos = np.zeros([nr_positive,1],dtype=np.int) y_neg = np.ones([nr_negative,1],dtype=np.int) X = np.vstack([X_pos,", "Points=%.2f, Balance=%.2f Train-Dev-Test (%.2f,.%.2f,%.2f)\"%(self.mean1[0] ,self.mean1[1], self.variance1, self.mean2[0], self.mean2[1], self.variance2, self.nr_points,", "\"negative\". # Each class follows a Gaussian distribution. class SimpleDataSet():", "= ((params[1,1]-params[1,0])*x + (params[0,1] - params[0,0]))/(params[2,0] -params[2,1]) axis.plot(x,y_star,'g--',c=colour, label=name, linewidth=2)", "split_train_dev_test(self.X,self.y,split[0],split[1],split[2]) self.train_X = train_X self.train_y = train_y self.dev_X = dev_X", "= self.balance p2 = 1.0 - self.balance params[0,0] = -1.0/(2.0*self.variance1)", "g2 = [[5,5],1],balance=0.5,split=[0.8,0,0.2]): nr_positive = nr_examples*balance # number of examples", "= np.zeros((3,2)) p1 = self.balance p2 = 1.0 - self.balance", "def __init__(self,nr_examples=100,g1 = [[-5,-5],1], g2 = [[5,5],1],balance=0.5,split=[0.8,0,0.2]): nr_positive = nr_examples*balance", "self.dev_y = dev_y self.test_X = test_X self.test_y = test_y def", "(params[0,1] - params[0,0]))/(params[2,0] -params[2,1]) axis.plot(x,y_star,'g--',c=colour, label=name, linewidth=2) axis.legend() # fig.show()", "test_y def get_name(self): return \"Simple Data Set -- Mean1= (%.2f,%.2f)", "np.vstack([y_pos, y_neg]) perm = np.random.permutation(nr_examples) self.split = split self.X =", "axis.scatter(self.test_X[idx4,0],self.test_X[idx4,1],s=30,c=\"blue\",marker='o') ## Plot Bayes optimal if(print_bayes_opt): bayes_opt_params = self.get_bayes_optimal() self.add_line(fig,axis,bayes_opt_params,", "std_i''' def __init__(self,nr_examples=100,g1 = [[-5,-5],1], g2 = [[5,5],1],balance=0.5,split=[0.8,0,0.2]): nr_positive =", "params return params def plot_data(self,params=np.array([]),name=\"Naive Bayes\", print_bayes_opt = True): import", "= g1[0] # mean of positive class self.mean2 = g2[0]" ]
[ "transposed = zip_longest(*blocks, fillvalue=0) likely_key = b\"\".join( single_byte_xor(tblock, key=True) for", "repeating_key_xor, single_byte_xor # Lookup table for the number of 1", "this challenge than any of the # other ones. We", "on. Breaking # repeating-key XOR (\"Vigenère\") statistically is obviously an", "know the KEYSIZE: break the ciphertext into # blocks of", "single_byte_xor # Lookup table for the number of 1 bits", "split=False) ptext, key, high_score = b\"\", b\"\", 0 for size", "normalized_distance = 0 for i in range(0, len(bs) - size", "Here's how: # # 1. Let KEYSIZE be the guessed", "on the mike while the fly girls yell # In", "'{key.decode()}'\") print() print(ptext.decode()) if __name__ == \"__main__\": try: main() except", "and find the edit distance between them. # Normalize this", "bell # A rockin' on the mike while the fly", "utf-8 -*- # # Break repeating-key XOR # # It", "A rockin' on the mike while the fly girls yell", "them # together and you have the key. # #", "the first KEYSIZE worth of bytes, and the # second", "important. # # No, that's not a mistake. # #", "NIBBLE_BITS[b >> 4] + NIBBLE_BITS[b & 0xF] return distance def", "challenge isn't conceptually hard, but it involves actual # error-prone", "the Hamming distance between two bytestrings.\"\"\" distance = 0 for", "def main(): ctext = loader(\"6.txt\", \"base64\", split=False) ptext, key, high_score", "while the fly girls yell # In ecstasy in the", "second KEYSIZE worth of bytes, and find the edit distance", "2-3 KEYSIZE values. # Or take 4 KEYSIZE blocks instead", "a nibble. (Nybble, quartet, etc.) NIBBLE_BITS = [0, 1, 1,", "back and I'm ringin' the bell # A rockin' on", "error-prone coding. The other challenges in this set are there", "cuttin' all them Z's # Hittin' hard and the girlies", "b\"\", b\"\", 0 for size in likely_key_sizes(ctext): blocks = [ctext[i", "this # one, you're probably just fine up to Set", "number of differing # bits. The distance between: # #", "by KEYSIZE. # 4. The KEYSIZE with the smallest normalized", "from itertools import zip_longest sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(lambda: 0))))) from util.loader import loader", "# Or take 4 KEYSIZE blocks instead of 2 and", "# # No, that's not a mistake. # # We", "# Normalize this result by dividing by KEYSIZE. # 4.", "block, and a block that is the second byte of", "two bytestrings.\"\"\" distance = 0 for b1, b2 in zip_longest(bs1,", ") candidate = repeating_key_xor(ctext, likely_key) score = englishness(candidate) if score", "b1 ^ b2 distance += NIBBLE_BITS[b >> 4] + NIBBLE_BITS[b", "We get more tech support questions for this challenge than", "first KEYSIZE worth of bytes, and the # second KEYSIZE", "# histogram is the repeating-key XOR key byte for that", "distance is probably the # key. You could proceed perhaps", "3, 1, 2, 2, 3, 2, 3, 3, 4] def", "proceed.* # 3. For each KEYSIZE, take the first KEYSIZE", "do this # one, you're probably just fine up to", "# you up to speed. This one is there to", "KEYSIZE be the guessed length of the key; try values", "# wokka wokka!!! # # is 37. *Make sure your", "each block, the single-byte XOR key that produces the best", "hard, but it involves actual # error-prone coding. The other", "from 2 to # (say) 40. # 2. Write a", "but it involves actual # error-prone coding. The other challenges", "byte for that block. Put them # together and you", "main() except KeyboardInterrupt: pass # Output: # # Key: 'Terminator", "+ 1): normalized_distance = 0 for i in range(0, len(bs)", "file here: # # http://cryptopals.com/static/challenge-data/6.txt # # It's been base64'd", "from util.text import englishness, repeating_key_xor, single_byte_xor # Lookup table for", "# -*- coding: utf-8 -*- # # Break repeating-key XOR", "a file here: # # http://cryptopals.com/static/challenge-data/6.txt # # It's been", "between: # # this is a test # # and", "is 37. # import inspect import os import sys from", "In ecstasy in the back of me # Well that's", "normalized_distance += hamming_distance(bs1, bs2) / 2 sizes.update({size: normalized_distance}) return sorted(sizes,", "was single-character XOR. You already have # code to do", "repeating-key-XOR'd ciphertext's most likely key sizes.\"\"\" sizes = {} for", "wokka!!! # # is 37. *Make sure your code agrees", "englishness(candidate) if score > high_score: ptext, key, high_score = candidate,", "surprisingly useful later on. Breaking # repeating-key XOR (\"Vigenère\") statistically", "3, 3, 4] def likely_key_sizes(bs, lower=2, upper=40, n=3): \"\"\"Finds a", "quartet, etc.) NIBBLE_BITS = [0, 1, 1, 2, 1, 2,", "5. Now that you probably know the KEYSIZE: break the", "a test # # and # # wokka wokka!!! #", "the # key. You could proceed perhaps with the smallest", "for i in range(0, len(bs) - size * 2, size", "X: Bring the noise' (29 bytes) # # I'm back", "2 to # (say) 40. # 2. Write a function", "in this text. # In particular: the \"wokka wokka!!!\" edit", "academic # exercise, a \"Crypto 101\" thing. But more people", "distance/Hamming distance between # two strings. The Hamming distance is", "distance between them. # Normalize this result by dividing by", "qualify you. If you can do this # one, you're", "edit distance between them. # Normalize this result by dividing", "have # code to do this. # 8. For each", "tech support questions for this challenge than any of the", "it was single-character XOR. You already have # code to", "blocks instead of 2 and average the distances. # 5.", "Well that's my DJ Deshay cuttin' all them Z's #", "you have the key. # # This code is going", "the key. # # This code is going to turn", "KEYSIZE worth of bytes, and find the edit distance between", "# important. # # No, that's not a mistake. #", "2, 1, 2, 2, 3, 1, 2, 2, 3, 2,", "something much more # important. # # No, that's not", "+ size], bs[i + size : i + size *", "my DJ Deshay cuttin' all them Z's # Hittin' hard", "second byte of every block, and so on. # 7.", "# code to do this. # 8. For each block,", "instead of 2 and average the distances. # 5. Now", "in range(0, len(bs) - size * 2, size * 2):", "each KEYSIZE, take the first KEYSIZE worth of bytes, and", "number of 1 bits in a nibble. (Nybble, quartet, etc.)", "for tblock in transposed ) candidate = repeating_key_xor(ctext, likely_key) score", "print(f\"Key: '{key.decode()}'\") print() print(ptext.decode()) if __name__ == \"__main__\": try: main()", "with the smallest normalized edit distance is probably the #", "Put them # together and you have the key. #", "of 2 and average the distances. # 5. Now that", "Deshay cuttin' all them Z's # Hittin' hard and the", "distance def main(): ctext = loader(\"6.txt\", \"base64\", split=False) ptext, key,", "of every block, and so on. # 7. Solve each", "in range(0, len(ctext), size)] transposed = zip_longest(*blocks, fillvalue=0) likely_key =", "bs2 = bs[i : i + size], bs[i + size", "compute the edit distance/Hamming distance between # two strings. The", "os import sys from itertools import zip_longest sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(lambda: 0))))) from", "Solve each block as if it was single-character XOR. You", "python3 # -*- coding: utf-8 -*- # # Break repeating-key", "that you probably know the KEYSIZE: break the ciphertext into", "block, and so on. # 7. Solve each block as", "for this challenge than any of the # other ones.", "Hamming distance between two bytestrings.\"\"\" distance = 0 for b1,", "= englishness(candidate) if score > high_score: ptext, key, high_score =", "Vanilla's on the mike, man I'm not lazy. # #", "the ciphertext into # blocks of KEYSIZE length. # 6.", "Or take 4 KEYSIZE blocks instead of 2 and average", "bits in a nibble. (Nybble, quartet, etc.) NIBBLE_BITS = [0,", ": i + size * 2] normalized_distance += hamming_distance(bs1, bs2)", "in zip_longest(bs1, bs2, fillvalue=0): b = b1 ^ b2 distance", "2, 3, 2, 3, 3, 4] def likely_key_sizes(bs, lower=2, upper=40,", "key=True) for tblock in transposed ) candidate = repeating_key_xor(ctext, likely_key)", "# 2. Write a function to compute the edit distance/Hamming", "now. # # This challenge isn't conceptually hard, but it", "the distances. # 5. Now that you probably know the", "# second KEYSIZE worth of bytes, and find the edit", "*Make sure your code agrees before you proceed.* # 3.", "is officially on, now. # # This challenge isn't conceptually", "# It's been base64'd after being encrypted with repeating-key XOR.", "2] normalized_distance += hamming_distance(bs1, bs2) / 2 sizes.update({size: normalized_distance}) return", "questions for this challenge than any of the # other", "distance = 0 for b1, b2 in zip_longest(bs1, bs2, fillvalue=0):", "make a block that is the first byte of every", "# Break repeating-key XOR # # It is officially on,", "Break repeating-key XOR # # It is officially on, now.", "DJ Deshay cuttin' all them Z's # Hittin' hard and", "Decrypt it. # # Here's how: # # 1. Let", "englishness, repeating_key_xor, single_byte_xor # Lookup table for the number of", "much more # important. # # No, that's not a", "the noise' (29 bytes) # # I'm back and I'm", "# 4. The KEYSIZE with the smallest normalized edit distance", "bs2): \"\"\"Finds the Hamming distance between two bytestrings.\"\"\" distance =", "# bits. The distance between: # # this is a", "* 2] normalized_distance += hamming_distance(bs1, bs2) / 2 sizes.update({size: normalized_distance})", "average the distances. # 5. Now that you probably know", "0 for b1, b2 in zip_longest(bs1, bs2, fillvalue=0): b =", "ringin' the bell # A rockin' on the mike while", "here: # # http://cryptopals.com/static/challenge-data/6.txt # # It's been base64'd after", "transpose the blocks: make a block that is the first", "# # This code is going to turn out to", "except KeyboardInterrupt: pass # Output: # # Key: 'Terminator X:", "to bring # you up to speed. This one is", "XOR key that produces the best looking # histogram is", "key byte for that block. Put them # together and", "Output: # # Key: 'Terminator X: Bring the noise' (29", "# and # # wokka wokka!!! # # is 37.", "it, and a similar technique breaks something much more #", "Breaking # repeating-key XOR (\"Vigenère\") statistically is obviously an academic", "it than # can actually break it, and a similar", "be surprisingly useful later on. Breaking # repeating-key XOR (\"Vigenère\")", "hard and the girlies goin' crazy # Vanilla's on the", "set are there to bring # you up to speed.", "proceed perhaps with the smallest 2-3 KEYSIZE values. # Or", "if it was single-character XOR. You already have # code", "3, 4] def likely_key_sizes(bs, lower=2, upper=40, n=3): \"\"\"Finds a repeating-key-XOR'd", "# We get more tech support questions for this challenge", "\"know how\" to break it than # can actually break", "the bell # A rockin' on the mike while the", "high_score = candidate, likely_key, score print(f\"Key: '{key.decode()}'\") print() print(ptext.decode()) if", "crazy # Vanilla's on the mike, man I'm not lazy.", "a block that is the first byte of every #", "between two bytestrings.\"\"\" distance = 0 for b1, b2 in", "not a mistake. # # We get more tech support", "I'm back and I'm ringin' the bell # A rockin'", "noise' (29 bytes) # # I'm back and I'm ringin'", "wokka wokka!!! # # is 37. *Make sure your code", "try: main() except KeyboardInterrupt: pass # Output: # # Key:", "distance between: # # this is a test # #", ": i + size] for i in range(0, len(ctext), size)]", "# # Decrypt it. # # Here's how: # #", "people \"know how\" to break it than # can actually", "# exercise, a \"Crypto 101\" thing. But more people \"know", "2, size * 2): bs1, bs2 = bs[i : i", "been base64'd after being encrypted with repeating-key XOR. # #", "edit distance really is 37. # import inspect import os", "this set are there to bring # you up to", "bits. The distance between: # # this is a test", "girls yell # In ecstasy in the back of me", "import loader from util.text import englishness, repeating_key_xor, single_byte_xor # Lookup", "to compute the edit distance/Hamming distance between # two strings.", "the edit distance between them. # Normalize this result by", "distance between # two strings. The Hamming distance is just", "that produces the best looking # histogram is the repeating-key", "2, 2, 3, 2, 3, 3, 4] def likely_key_sizes(bs, lower=2,", "blocks: make a block that is the first byte of", "len(ctext), size)] transposed = zip_longest(*blocks, fillvalue=0) likely_key = b\"\".join( single_byte_xor(tblock,", "challenge than any of the # other ones. We promise,", "# In ecstasy in the back of me # Well", "# 7. Solve each block as if it was single-character", "I'm ringin' the bell # A rockin' on the mike", "how\" to break it than # can actually break it,", "smallest 2-3 KEYSIZE values. # Or take 4 KEYSIZE blocks", "most likely key sizes.\"\"\" sizes = {} for size in", "sorted(sizes, key=lambda k: sizes[k])[:n] def hamming_distance(bs1, bs2): \"\"\"Finds the Hamming", "and # # wokka wokka!!! # # is 37. *Make", "sizes = {} for size in range(lower, upper + 1):", "# Well that's my DJ Deshay cuttin' all them Z's", "worth of bytes, and the # second KEYSIZE worth of", "KEYSIZE. # 4. The KEYSIZE with the smallest normalized edit", "probably just fine up to Set 6. # # There's", "particular: the \"wokka wokka!!!\" edit distance really is 37. #", "a similar technique breaks something much more # important. #", "Set 6. # # There's a file here: # #", "the mike while the fly girls yell # In ecstasy", "this is a test # # and # # wokka", "# # We get more tech support questions for this", "of the # other ones. We promise, there aren't any", "repeating_key_xor(ctext, likely_key) score = englishness(candidate) if score > high_score: ptext,", "I'm not lazy. # # <remainder of output omitted> #", "later on. Breaking # repeating-key XOR (\"Vigenère\") statistically is obviously", "there to bring # you up to speed. This one", "print() print(ptext.decode()) if __name__ == \"__main__\": try: main() except KeyboardInterrupt:", "to speed. This one is there to qualify you. If", "Now transpose the blocks: make a block that is the", "to qualify you. If you can do this # one,", "6. # # There's a file here: # # http://cryptopals.com/static/challenge-data/6.txt", "transposed ) candidate = repeating_key_xor(ctext, likely_key) score = englishness(candidate) if", ": i + size], bs[i + size : i +", "statistically is obviously an academic # exercise, a \"Crypto 101\"", "function to compute the edit distance/Hamming distance between # two", "of every # block, and a block that is the", "in the back of me # Well that's my DJ", "a \"Crypto 101\" thing. But more people \"know how\" to", "hamming_distance(bs1, bs2) / 2 sizes.update({size: normalized_distance}) return sorted(sizes, key=lambda k:", "KEYSIZE values. # Or take 4 KEYSIZE blocks instead of", "probably the # key. You could proceed perhaps with the", "get more tech support questions for this challenge than any", "the best looking # histogram is the repeating-key XOR key", "# # It is officially on, now. # # This", "actual # error-prone coding. The other challenges in this set", "6. Now transpose the blocks: make a block that is", "+= NIBBLE_BITS[b >> 4] + NIBBLE_BITS[b & 0xF] return distance", "range(0, len(ctext), size)] transposed = zip_longest(*blocks, fillvalue=0) likely_key = b\"\".join(", "that is the second byte of every block, and so", "util.loader import loader from util.text import englishness, repeating_key_xor, single_byte_xor #", "print(ptext.decode()) if __name__ == \"__main__\": try: main() except KeyboardInterrupt: pass", "there to qualify you. If you can do this #", "wokka!!!\" edit distance really is 37. # import inspect import", "so on. # 7. Solve each block as if it", "\"\"\"Finds a repeating-key-XOR'd ciphertext's most likely key sizes.\"\"\" sizes =", "normalized edit distance is probably the # key. You could", "this. # 8. For each block, the single-byte XOR key", "8. For each block, the single-byte XOR key that produces", "all them Z's # Hittin' hard and the girlies goin'", "inspect import os import sys from itertools import zip_longest sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(lambda:", "could proceed perhaps with the smallest 2-3 KEYSIZE values. #", "for that block. Put them # together and you have", "an academic # exercise, a \"Crypto 101\" thing. But more", "Now that you probably know the KEYSIZE: break the ciphertext", "and average the distances. # 5. Now that you probably", "is the first byte of every # block, and a", "that's not a mistake. # # We get more tech", "more people \"know how\" to break it than # can", "in this set are there to bring # you up", "# # and # # wokka wokka!!! # # is", "For each KEYSIZE, take the first KEYSIZE worth of bytes,", "# I'm back and I'm ringin' the bell # A", "size], bs[i + size : i + size * 2]", "blatant errors in this text. # In particular: the \"wokka", "40. # 2. Write a function to compute the edit", "2, 3, 3, 4] def likely_key_sizes(bs, lower=2, upper=40, n=3): \"\"\"Finds", "(29 bytes) # # I'm back and I'm ringin' the", "your code agrees before you proceed.* # 3. For each", "the edit distance/Hamming distance between # two strings. The Hamming", "code to do this. # 8. For each block, the", "the \"wokka wokka!!!\" edit distance really is 37. # import", "more # important. # # No, that's not a mistake.", "\"base64\", split=False) ptext, key, high_score = b\"\", b\"\", 0 for", "# Here's how: # # 1. Let KEYSIZE be the", "to be surprisingly useful later on. Breaking # repeating-key XOR", "every # block, and a block that is the second", "in range(lower, upper + 1): normalized_distance = 0 for i", "size * 2): bs1, bs2 = bs[i : i +", "XOR (\"Vigenère\") statistically is obviously an academic # exercise, a", "(\"Vigenère\") statistically is obviously an academic # exercise, a \"Crypto", "i in range(0, len(ctext), size)] transposed = zip_longest(*blocks, fillvalue=0) likely_key", "out to be surprisingly useful later on. Breaking # repeating-key", "for i in range(0, len(ctext), size)] transposed = zip_longest(*blocks, fillvalue=0)", "0 for size in likely_key_sizes(ctext): blocks = [ctext[i : i", "other ones. We promise, there aren't any blatant errors in", "i + size] for i in range(0, len(ctext), size)] transposed", "you can do this # one, you're probably just fine", "It is officially on, now. # # This challenge isn't", "together and you have the key. # # This code", "mistake. # # We get more tech support questions for", "+ NIBBLE_BITS[b & 0xF] return distance def main(): ctext =", "# # I'm back and I'm ringin' the bell #", "the single-byte XOR key that produces the best looking #", "KEYSIZE: break the ciphertext into # blocks of KEYSIZE length.", "= repeating_key_xor(ctext, likely_key) score = englishness(candidate) if score > high_score:", "byte of every # block, and a block that is", "up to Set 6. # # There's a file here:", "2. Write a function to compute the edit distance/Hamming distance", "score > high_score: ptext, key, high_score = candidate, likely_key, score", "the mike, man I'm not lazy. # # <remainder of", "import sys from itertools import zip_longest sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(lambda: 0))))) from util.loader", "+ size] for i in range(0, len(ctext), size)] transposed =", "tblock in transposed ) candidate = repeating_key_xor(ctext, likely_key) score =", "me # Well that's my DJ Deshay cuttin' all them", "(say) 40. # 2. Write a function to compute the", "# 3. For each KEYSIZE, take the first KEYSIZE worth", "result by dividing by KEYSIZE. # 4. The KEYSIZE with", "every block, and so on. # 7. Solve each block", "likely_key_sizes(ctext): blocks = [ctext[i : i + size] for i", "goin' crazy # Vanilla's on the mike, man I'm not", "itertools import zip_longest sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(lambda: 0))))) from util.loader import loader from", "key, high_score = candidate, likely_key, score print(f\"Key: '{key.decode()}'\") print() print(ptext.decode())", "etc.) NIBBLE_BITS = [0, 1, 1, 2, 1, 2, 2,", "lower=2, upper=40, n=3): \"\"\"Finds a repeating-key-XOR'd ciphertext's most likely key", "a repeating-key-XOR'd ciphertext's most likely key sizes.\"\"\" sizes = {}", "== \"__main__\": try: main() except KeyboardInterrupt: pass # Output: #", "and so on. # 7. Solve each block as if", "errors in this text. # In particular: the \"wokka wokka!!!\"", "edit distance is probably the # key. You could proceed", "- size * 2, size * 2): bs1, bs2 =", "1): normalized_distance = 0 for i in range(0, len(bs) -", "between them. # Normalize this result by dividing by KEYSIZE.", "pass # Output: # # Key: 'Terminator X: Bring the", "key; try values from 2 to # (say) 40. #", "this text. # In particular: the \"wokka wokka!!!\" edit distance", "it. # # Here's how: # # 1. Let KEYSIZE", "b2 in zip_longest(bs1, bs2, fillvalue=0): b = b1 ^ b2", "size * 2] normalized_distance += hamming_distance(bs1, bs2) / 2 sizes.update({size:", "sys from itertools import zip_longest sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(lambda: 0))))) from util.loader import", "2 sizes.update({size: normalized_distance}) return sorted(sizes, key=lambda k: sizes[k])[:n] def hamming_distance(bs1,", "smallest normalized edit distance is probably the # key. You", "you proceed.* # 3. For each KEYSIZE, take the first", "of bytes, and the # second KEYSIZE worth of bytes,", "coding. The other challenges in this set are there to", "and I'm ringin' the bell # A rockin' on the", "challenges in this set are there to bring # you", "by dividing by KEYSIZE. # 4. The KEYSIZE with the", "# It is officially on, now. # # This challenge", "You already have # code to do this. # 8.", "loader(\"6.txt\", \"base64\", split=False) ptext, key, high_score = b\"\", b\"\", 0", "take 4 KEYSIZE blocks instead of 2 and average the", "= b\"\".join( single_byte_xor(tblock, key=True) for tblock in transposed ) candidate", "bs2) / 2 sizes.update({size: normalized_distance}) return sorted(sizes, key=lambda k: sizes[k])[:n]", "key that produces the best looking # histogram is the", "the smallest 2-3 KEYSIZE values. # Or take 4 KEYSIZE", "sure your code agrees before you proceed.* # 3. For", "worth of bytes, and find the edit distance between them.", "single-character XOR. You already have # code to do this.", "\"Crypto 101\" thing. But more people \"know how\" to break", "1 bits in a nibble. (Nybble, quartet, etc.) NIBBLE_BITS =", "ptext, key, high_score = b\"\", b\"\", 0 for size in", "The KEYSIZE with the smallest normalized edit distance is probably", "try values from 2 to # (say) 40. # 2.", "values. # Or take 4 KEYSIZE blocks instead of 2", "It's been base64'd after being encrypted with repeating-key XOR. #", "nibble. (Nybble, quartet, etc.) NIBBLE_BITS = [0, 1, 1, 2,", "size * 2, size * 2): bs1, bs2 = bs[i", "if __name__ == \"__main__\": try: main() except KeyboardInterrupt: pass #", "+ size : i + size * 2] normalized_distance +=", "do this. # 8. For each block, the single-byte XOR", "(Nybble, quartet, etc.) NIBBLE_BITS = [0, 1, 1, 2, 1,", "encrypted with repeating-key XOR. # # Decrypt it. # #", "in transposed ) candidate = repeating_key_xor(ctext, likely_key) score = englishness(candidate)", "size : i + size * 2] normalized_distance += hamming_distance(bs1,", "you up to speed. This one is there to qualify", "to # (say) 40. # 2. Write a function to", "turn out to be surprisingly useful later on. Breaking #", "= [ctext[i : i + size] for i in range(0,", "* 2): bs1, bs2 = bs[i : i + size],", "likely key sizes.\"\"\" sizes = {} for size in range(lower,", "find the edit distance between them. # Normalize this result", "is there to qualify you. If you can do this", "# http://cryptopals.com/static/challenge-data/6.txt # # It's been base64'd after being encrypted", "base64'd after being encrypted with repeating-key XOR. # # Decrypt", "blocks of KEYSIZE length. # 6. Now transpose the blocks:", "4. The KEYSIZE with the smallest normalized edit distance is", "If you can do this # one, you're probably just", "is obviously an academic # exercise, a \"Crypto 101\" thing.", "the # other ones. We promise, there aren't any blatant", "# two strings. The Hamming distance is just the number", "# # Here's how: # # 1. Let KEYSIZE be", "are there to bring # you up to speed. This", "to do this. # 8. For each block, the single-byte", "bs1, bs2 = bs[i : i + size], bs[i +", "# error-prone coding. The other challenges in this set are", "test # # and # # wokka wokka!!! # #", "KEYSIZE length. # 6. Now transpose the blocks: make a", "already have # code to do this. # 8. For", "= candidate, likely_key, score print(f\"Key: '{key.decode()}'\") print() print(ptext.decode()) if __name__", "guessed length of the key; try values from 2 to", "XOR # # It is officially on, now. # #", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Break", "you're probably just fine up to Set 6. # #", "you. If you can do this # one, you're probably", "2): bs1, bs2 = bs[i : i + size], bs[i", "i + size], bs[i + size : i + size", "101\" thing. But more people \"know how\" to break it", "# 8. For each block, the single-byte XOR key that", "isn't conceptually hard, but it involves actual # error-prone coding.", "exercise, a \"Crypto 101\" thing. But more people \"know how\"", "import zip_longest sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(lambda: 0))))) from util.loader import loader from util.text", "single-byte XOR key that produces the best looking # histogram", "more tech support questions for this challenge than any of", "produces the best looking # histogram is the repeating-key XOR", "sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(lambda: 0))))) from util.loader import loader from util.text import englishness,", "key. # # This code is going to turn out", "you probably know the KEYSIZE: break the ciphertext into #", "# this is a test # # and # #", "def hamming_distance(bs1, bs2): \"\"\"Finds the Hamming distance between two bytestrings.\"\"\"", "# Lookup table for the number of 1 bits in", "fillvalue=0): b = b1 ^ b2 distance += NIBBLE_BITS[b >>", "promise, there aren't any blatant errors in this text. #", "b1, b2 in zip_longest(bs1, bs2, fillvalue=0): b = b1 ^", "going to turn out to be surprisingly useful later on.", "# This challenge isn't conceptually hard, but it involves actual", "man I'm not lazy. # # <remainder of output omitted>", "length of the key; try values from 2 to #", "# together and you have the key. # # This", "hamming_distance(bs1, bs2): \"\"\"Finds the Hamming distance between two bytestrings.\"\"\" distance", "repeating-key XOR key byte for that block. Put them #", "of 1 bits in a nibble. (Nybble, quartet, etc.) NIBBLE_BITS", "of bytes, and find the edit distance between them. #", "zip_longest(*blocks, fillvalue=0) likely_key = b\"\".join( single_byte_xor(tblock, key=True) for tblock in", "37. *Make sure your code agrees before you proceed.* #", "KEYSIZE blocks instead of 2 and average the distances. #", "b\"\".join( single_byte_xor(tblock, key=True) for tblock in transposed ) candidate =", "37. # import inspect import os import sys from itertools", "1, 2, 2, 3, 1, 2, 2, 3, 2, 3,", "technique breaks something much more # important. # # No,", "obviously an academic # exercise, a \"Crypto 101\" thing. But", "that is the first byte of every # block, and", "and the girlies goin' crazy # Vanilla's on the mike,", "ciphertext into # blocks of KEYSIZE length. # 6. Now", "the fly girls yell # In ecstasy in the back", "http://cryptopals.com/static/challenge-data/6.txt # # It's been base64'd after being encrypted with", "[0, 1, 1, 2, 1, 2, 2, 3, 1, 2,", "4] def likely_key_sizes(bs, lower=2, upper=40, n=3): \"\"\"Finds a repeating-key-XOR'd ciphertext's", "before you proceed.* # 3. For each KEYSIZE, take the", "n=3): \"\"\"Finds a repeating-key-XOR'd ciphertext's most likely key sizes.\"\"\" sizes", "Z's # Hittin' hard and the girlies goin' crazy #", "that's my DJ Deshay cuttin' all them Z's # Hittin'", "* 2, size * 2): bs1, bs2 = bs[i :", "thing. But more people \"know how\" to break it than", "officially on, now. # # This challenge isn't conceptually hard,", "ptext, key, high_score = candidate, likely_key, score print(f\"Key: '{key.decode()}'\") print()", "two strings. The Hamming distance is just the number of", "code is going to turn out to be surprisingly useful", "loader from util.text import englishness, repeating_key_xor, single_byte_xor # Lookup table", "edit distance/Hamming distance between # two strings. The Hamming distance", "is 37. *Make sure your code agrees before you proceed.*", "Key: 'Terminator X: Bring the noise' (29 bytes) # #", "code agrees before you proceed.* # 3. For each KEYSIZE,", "& 0xF] return distance def main(): ctext = loader(\"6.txt\", \"base64\",", "likely_key, score print(f\"Key: '{key.decode()}'\") print() print(ptext.decode()) if __name__ == \"__main__\":", "is the second byte of every block, and so on.", "agrees before you proceed.* # 3. For each KEYSIZE, take", "a function to compute the edit distance/Hamming distance between #", "one is there to qualify you. If you can do", "is probably the # key. You could proceed perhaps with", "the repeating-key XOR key byte for that block. Put them", "to turn out to be surprisingly useful later on. Breaking", "2, 2, 3, 1, 2, 2, 3, 2, 3, 3,", "/ 2 sizes.update({size: normalized_distance}) return sorted(sizes, key=lambda k: sizes[k])[:n] def", "# is 37. *Make sure your code agrees before you", "+= hamming_distance(bs1, bs2) / 2 sizes.update({size: normalized_distance}) return sorted(sizes, key=lambda", "candidate, likely_key, score print(f\"Key: '{key.decode()}'\") print() print(ptext.decode()) if __name__ ==", "ones. We promise, there aren't any blatant errors in this", "sizes.\"\"\" sizes = {} for size in range(lower, upper +", "b = b1 ^ b2 distance += NIBBLE_BITS[b >> 4]", "of the key; try values from 2 to # (say)", "Hittin' hard and the girlies goin' crazy # Vanilla's on", "break it, and a similar technique breaks something much more", "values from 2 to # (say) 40. # 2. Write", "import os import sys from itertools import zip_longest sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(lambda: 0)))))", "1, 2, 1, 2, 2, 3, 1, 2, 2, 3,", "likely_key = b\"\".join( single_byte_xor(tblock, key=True) for tblock in transposed )", "# block, and a block that is the second byte", "bytestrings.\"\"\" distance = 0 for b1, b2 in zip_longest(bs1, bs2,", "in a nibble. (Nybble, quartet, etc.) NIBBLE_BITS = [0, 1,", "b\"\", 0 for size in likely_key_sizes(ctext): blocks = [ctext[i :", "looking # histogram is the repeating-key XOR key byte for", "fly girls yell # In ecstasy in the back of", "being encrypted with repeating-key XOR. # # Decrypt it. #", "likely_key) score = englishness(candidate) if score > high_score: ptext, key,", "can do this # one, you're probably just fine up", "= b1 ^ b2 distance += NIBBLE_BITS[b >> 4] +", "You could proceed perhaps with the smallest 2-3 KEYSIZE values.", "{} for size in range(lower, upper + 1): normalized_distance =", "For each block, the single-byte XOR key that produces the", "# # wokka wokka!!! # # is 37. *Make sure", "fine up to Set 6. # # There's a file", "is the repeating-key XOR key byte for that block. Put", "of me # Well that's my DJ Deshay cuttin' all", "for size in range(lower, upper + 1): normalized_distance = 0", "key. You could proceed perhaps with the smallest 2-3 KEYSIZE", "perhaps with the smallest 2-3 KEYSIZE values. # Or take", "them Z's # Hittin' hard and the girlies goin' crazy", "block, the single-byte XOR key that produces the best looking", "back of me # Well that's my DJ Deshay cuttin'", "# There's a file here: # # http://cryptopals.com/static/challenge-data/6.txt # #", "-*- coding: utf-8 -*- # # Break repeating-key XOR #", "just fine up to Set 6. # # There's a", "# one, you're probably just fine up to Set 6.", "key, high_score = b\"\", b\"\", 0 for size in likely_key_sizes(ctext):", "block that is the first byte of every # block,", "break it than # can actually break it, and a", "conceptually hard, but it involves actual # error-prone coding. The", "The other challenges in this set are there to bring", "# # is 37. *Make sure your code agrees before", "2 and average the distances. # 5. Now that you", "as if it was single-character XOR. You already have #", "XOR. # # Decrypt it. # # Here's how: #", "ecstasy in the back of me # Well that's my", "other challenges in this set are there to bring #", "distance really is 37. # import inspect import os import", "__name__ == \"__main__\": try: main() except KeyboardInterrupt: pass # Output:", "strings. The Hamming distance is just the number of differing", "import englishness, repeating_key_xor, single_byte_xor # Lookup table for the number", "up to speed. This one is there to qualify you.", "aren't any blatant errors in this text. # In particular:", "just the number of differing # bits. The distance between:", "KEYSIZE worth of bytes, and the # second KEYSIZE worth", "1, 1, 2, 1, 2, 2, 3, 1, 2, 2,", "them. # Normalize this result by dividing by KEYSIZE. #", "We promise, there aren't any blatant errors in this text.", "the KEYSIZE: break the ciphertext into # blocks of KEYSIZE", "Let KEYSIZE be the guessed length of the key; try", "# Key: 'Terminator X: Bring the noise' (29 bytes) #", "girlies goin' crazy # Vanilla's on the mike, man I'm", "repeating-key XOR # # It is officially on, now. #", "the smallest normalized edit distance is probably the # key.", "to break it than # can actually break it, and", "= [0, 1, 1, 2, 1, 2, 2, 3, 1,", "= b\"\", b\"\", 0 for size in likely_key_sizes(ctext): blocks =", "1. Let KEYSIZE be the guessed length of the key;", "there aren't any blatant errors in this text. # In", "= zip_longest(*blocks, fillvalue=0) likely_key = b\"\".join( single_byte_xor(tblock, key=True) for tblock", "XOR key byte for that block. Put them # together", "the back of me # Well that's my DJ Deshay", "The distance between: # # this is a test #", "break the ciphertext into # blocks of KEYSIZE length. #", "2, 3, 1, 2, 2, 3, 2, 3, 3, 4]", "really is 37. # import inspect import os import sys", "# (say) 40. # 2. Write a function to compute", "# blocks of KEYSIZE length. # 6. Now transpose the", "0 for i in range(0, len(bs) - size * 2,", "block as if it was single-character XOR. You already have", "any blatant errors in this text. # In particular: the", "0xF] return distance def main(): ctext = loader(\"6.txt\", \"base64\", split=False)", "for b1, b2 in zip_longest(bs1, bs2, fillvalue=0): b = b1", "i in range(0, len(bs) - size * 2, size *", "that block. Put them # together and you have the", "# A rockin' on the mike while the fly girls", "4 KEYSIZE blocks instead of 2 and average the distances.", "7. Solve each block as if it was single-character XOR.", "bs[i + size : i + size * 2] normalized_distance", "with repeating-key XOR. # # Decrypt it. # # Here's", "any of the # other ones. We promise, there aren't", "# import inspect import os import sys from itertools import", "k: sizes[k])[:n] def hamming_distance(bs1, bs2): \"\"\"Finds the Hamming distance between", "best looking # histogram is the repeating-key XOR key byte", "byte of every block, and so on. # 7. Solve", "yell # In ecstasy in the back of me #", "on. # 7. Solve each block as if it was", "the number of 1 bits in a nibble. (Nybble, quartet,", "is just the number of differing # bits. The distance", "^ b2 distance += NIBBLE_BITS[b >> 4] + NIBBLE_BITS[b &", "KEYSIZE, take the first KEYSIZE worth of bytes, and the", "key sizes.\"\"\" sizes = {} for size in range(lower, upper", "repeating-key XOR (\"Vigenère\") statistically is obviously an academic # exercise,", "Write a function to compute the edit distance/Hamming distance between", "is going to turn out to be surprisingly useful later", "the guessed length of the key; try values from 2", "+ size * 2] normalized_distance += hamming_distance(bs1, bs2) / 2", "distance between two bytestrings.\"\"\" distance = 0 for b1, b2", "# # There's a file here: # # http://cryptopals.com/static/challenge-data/6.txt #", "and you have the key. # # This code is", "Lookup table for the number of 1 bits in a", "rockin' on the mike while the fly girls yell #", "table for the number of 1 bits in a nibble.", "of KEYSIZE length. # 6. Now transpose the blocks: make", "block that is the second byte of every block, and", "\"wokka wokka!!!\" edit distance really is 37. # import inspect", "mike while the fly girls yell # In ecstasy in", "score print(f\"Key: '{key.decode()}'\") print() print(ptext.decode()) if __name__ == \"__main__\": try:", "repeating-key XOR. # # Decrypt it. # # Here's how:", "be the guessed length of the key; try values from", "# 1. Let KEYSIZE be the guessed length of the", "Normalize this result by dividing by KEYSIZE. # 4. The", "len(bs) - size * 2, size * 2): bs1, bs2", "= loader(\"6.txt\", \"base64\", split=False) ptext, key, high_score = b\"\", b\"\",", "size)] transposed = zip_longest(*blocks, fillvalue=0) likely_key = b\"\".join( single_byte_xor(tblock, key=True)", "dividing by KEYSIZE. # 4. The KEYSIZE with the smallest", "one, you're probably just fine up to Set 6. #", "\"__main__\": try: main() except KeyboardInterrupt: pass # Output: # #", "length. # 6. Now transpose the blocks: make a block", "the first byte of every # block, and a block", "the second byte of every block, and so on. #", "a mistake. # # We get more tech support questions", "the key; try values from 2 to # (say) 40.", "ctext = loader(\"6.txt\", \"base64\", split=False) ptext, key, high_score = b\"\",", "the girlies goin' crazy # Vanilla's on the mike, man", "candidate = repeating_key_xor(ctext, likely_key) score = englishness(candidate) if score >", "text. # In particular: the \"wokka wokka!!!\" edit distance really", "fillvalue=0) likely_key = b\"\".join( single_byte_xor(tblock, key=True) for tblock in transposed", "for size in likely_key_sizes(ctext): blocks = [ctext[i : i +", "main(): ctext = loader(\"6.txt\", \"base64\", split=False) ptext, key, high_score =", "# Vanilla's on the mike, man I'm not lazy. #", "# # Key: 'Terminator X: Bring the noise' (29 bytes)", "on, now. # # This challenge isn't conceptually hard, but", "distance is just the number of differing # bits. The", "XOR. You already have # code to do this. #", "0))))) from util.loader import loader from util.text import englishness, repeating_key_xor,", "blocks = [ctext[i : i + size] for i in", "There's a file here: # # http://cryptopals.com/static/challenge-data/6.txt # # It's", "and the # second KEYSIZE worth of bytes, and find", "# # This challenge isn't conceptually hard, but it involves", "In particular: the \"wokka wokka!!!\" edit distance really is 37.", "# 5. Now that you probably know the KEYSIZE: break", "actually break it, and a similar technique breaks something much", "speed. This one is there to qualify you. If you", "# No, that's not a mistake. # # We get", "zip_longest(bs1, bs2, fillvalue=0): b = b1 ^ b2 distance +=", "for the number of 1 bits in a nibble. (Nybble,", "high_score: ptext, key, high_score = candidate, likely_key, score print(f\"Key: '{key.decode()}'\")", "# This code is going to turn out to be", "high_score = b\"\", b\"\", 0 for size in likely_key_sizes(ctext): blocks", "support questions for this challenge than any of the #", "KeyboardInterrupt: pass # Output: # # Key: 'Terminator X: Bring", "bs2, fillvalue=0): b = b1 ^ b2 distance += NIBBLE_BITS[b", "differing # bits. The distance between: # # this is", "useful later on. Breaking # repeating-key XOR (\"Vigenère\") statistically is", "normalized_distance}) return sorted(sizes, key=lambda k: sizes[k])[:n] def hamming_distance(bs1, bs2): \"\"\"Finds", "score = englishness(candidate) if score > high_score: ptext, key, high_score", "of differing # bits. The distance between: # # this", "upper + 1): normalized_distance = 0 for i in range(0,", "upper=40, n=3): \"\"\"Finds a repeating-key-XOR'd ciphertext's most likely key sizes.\"\"\"", "and a block that is the second byte of every", "3, 2, 3, 3, 4] def likely_key_sizes(bs, lower=2, upper=40, n=3):", "into # blocks of KEYSIZE length. # 6. Now transpose", "# # 1. Let KEYSIZE be the guessed length of", "single_byte_xor(tblock, key=True) for tblock in transposed ) candidate = repeating_key_xor(ctext,", "'Terminator X: Bring the noise' (29 bytes) # # I'm", "than # can actually break it, and a similar technique", "it involves actual # error-prone coding. The other challenges in", "ciphertext's most likely key sizes.\"\"\" sizes = {} for size", "# other ones. We promise, there aren't any blatant errors", "This code is going to turn out to be surprisingly", "this result by dividing by KEYSIZE. # 4. The KEYSIZE", "similar technique breaks something much more # important. # #", "bs[i : i + size], bs[i + size : i", "the # second KEYSIZE worth of bytes, and find the", "bytes, and the # second KEYSIZE worth of bytes, and", "# repeating-key XOR (\"Vigenère\") statistically is obviously an academic #", "= bs[i : i + size], bs[i + size :", "return sorted(sizes, key=lambda k: sizes[k])[:n] def hamming_distance(bs1, bs2): \"\"\"Finds the", "bytes) # # I'm back and I'm ringin' the bell", "= {} for size in range(lower, upper + 1): normalized_distance", "take the first KEYSIZE worth of bytes, and the #", "> high_score: ptext, key, high_score = candidate, likely_key, score print(f\"Key:", "distance += NIBBLE_BITS[b >> 4] + NIBBLE_BITS[b & 0xF] return", "return distance def main(): ctext = loader(\"6.txt\", \"base64\", split=False) ptext,", "can actually break it, and a similar technique breaks something", "involves actual # error-prone coding. The other challenges in this", ">> 4] + NIBBLE_BITS[b & 0xF] return distance def main():", "This challenge isn't conceptually hard, but it involves actual #", "bytes, and find the edit distance between them. # Normalize", "b2 distance += NIBBLE_BITS[b >> 4] + NIBBLE_BITS[b & 0xF]", "3. For each KEYSIZE, take the first KEYSIZE worth of", "# 6. Now transpose the blocks: make a block that", "# can actually break it, and a similar technique breaks", "is a test # # and # # wokka wokka!!!", "1, 2, 2, 3, 2, 3, 3, 4] def likely_key_sizes(bs,", "coding: utf-8 -*- # # Break repeating-key XOR # #", "key=lambda k: sizes[k])[:n] def hamming_distance(bs1, bs2): \"\"\"Finds the Hamming distance", "# Hittin' hard and the girlies goin' crazy # Vanilla's", "bring # you up to speed. This one is there", "The Hamming distance is just the number of differing #", "a block that is the second byte of every block,", "# # this is a test # # and #", "range(lower, upper + 1): normalized_distance = 0 for i in", "size in range(lower, upper + 1): normalized_distance = 0 for", "after being encrypted with repeating-key XOR. # # Decrypt it.", "with the smallest 2-3 KEYSIZE values. # Or take 4", "probably know the KEYSIZE: break the ciphertext into # blocks", "This one is there to qualify you. If you can", "# # Break repeating-key XOR # # It is officially", "import inspect import os import sys from itertools import zip_longest", "No, that's not a mistake. # # We get more", "than any of the # other ones. We promise, there", "have the key. # # This code is going to", "in likely_key_sizes(ctext): blocks = [ctext[i : i + size] for", "range(0, len(bs) - size * 2, size * 2): bs1,", "But more people \"know how\" to break it than #", "mike, man I'm not lazy. # # <remainder of output", "likely_key_sizes(bs, lower=2, upper=40, n=3): \"\"\"Finds a repeating-key-XOR'd ciphertext's most likely", "KEYSIZE with the smallest normalized edit distance is probably the", "block. Put them # together and you have the key.", "breaks something much more # important. # # No, that's", "# # It's been base64'd after being encrypted with repeating-key", "# # http://cryptopals.com/static/challenge-data/6.txt # # It's been base64'd after being", "how: # # 1. Let KEYSIZE be the guessed length", "4] + NIBBLE_BITS[b & 0xF] return distance def main(): ctext", "= 0 for i in range(0, len(bs) - size *", "between # two strings. The Hamming distance is just the", "# key. You could proceed perhaps with the smallest 2-3", "\"\"\"Finds the Hamming distance between two bytestrings.\"\"\" distance = 0", "-*- # # Break repeating-key XOR # # It is", "on the mike, man I'm not lazy. # # <remainder", "distances. # 5. Now that you probably know the KEYSIZE:", "size] for i in range(0, len(ctext), size)] transposed = zip_longest(*blocks,", "the blocks: make a block that is the first byte", "from util.loader import loader from util.text import englishness, repeating_key_xor, single_byte_xor", "def likely_key_sizes(bs, lower=2, upper=40, n=3): \"\"\"Finds a repeating-key-XOR'd ciphertext's most", "size in likely_key_sizes(ctext): blocks = [ctext[i : i + size]", "i + size * 2] normalized_distance += hamming_distance(bs1, bs2) /", "util.text import englishness, repeating_key_xor, single_byte_xor # Lookup table for the", "sizes.update({size: normalized_distance}) return sorted(sizes, key=lambda k: sizes[k])[:n] def hamming_distance(bs1, bs2):", "zip_longest sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(lambda: 0))))) from util.loader import loader from util.text import", "the number of differing # bits. The distance between: #", "NIBBLE_BITS[b & 0xF] return distance def main(): ctext = loader(\"6.txt\",", "if score > high_score: ptext, key, high_score = candidate, likely_key,", "to Set 6. # # There's a file here: #", "each block as if it was single-character XOR. You already", "first byte of every # block, and a block that", "= 0 for b1, b2 in zip_longest(bs1, bs2, fillvalue=0): b", "Hamming distance is just the number of differing # bits.", "and a similar technique breaks something much more # important.", "sizes[k])[:n] def hamming_distance(bs1, bs2): \"\"\"Finds the Hamming distance between two", "Bring the noise' (29 bytes) # # I'm back and", "histogram is the repeating-key XOR key byte for that block.", "# Decrypt it. # # Here's how: # # 1.", "[ctext[i : i + size] for i in range(0, len(ctext),", "# In particular: the \"wokka wokka!!!\" edit distance really is", "NIBBLE_BITS = [0, 1, 1, 2, 1, 2, 2, 3,", "# Output: # # Key: 'Terminator X: Bring the noise'" ]
[ "code_char_rep: ``(batch_size, max_doc_len, max_word_len)`` - code_len: ``(batch_size)`` - summ_word_rep: ``(batch_size,", "self.transformer_d(tgt_words, tgt_emb, memory_bank, state[1], step=step) f_t = self.fusion_sigmoid(torch.cat([copier_out, dec_out], dim=-1))", "def decode(self, tgt_words, tgt_emb, memory_bank, state, step=None, layer_wise_coverage=None): if self.split_decoder:", "- summ_len: ``(batch_size)`` - tgt_seq: ``(batch_size, max_len)`` Output: - ``(batch_size,", "code_len, summ_word_rep, summ_char_rep, summ_len, tgt_seq, src_map, alignment, **kwargs): batch_size =", "attentions = [] dec_log_probs = [] acc_dec_outs = [] max_mem_len", "if isinstance(params['memory_bank'], list) else params['memory_bank'].shape[1] dec_states = self.decoder.init_decoder(params['src_len'], max_mem_len) attns", "max_mem_len) attns = {\"coverage\": None} enc_outputs = params['layer_wise_outputs'] if self.layer_wise_attn", "here if needed if kwargs['code_mask_rep'] is not None: mask =", "layer_wise_outputs if self.layer_wise_attn else memory_bank layer_wise_dec_out, attns = self.decoder(enc_outputs, code_len,", "nn.CrossEntropyLoss(reduction='none') def _run_forward_ml(self, code_word_rep, code_char_rep, code_type_rep, code_len, summ_word_rep, summ_char_rep, summ_len,", "self._copy: mask = tgt.gt(len(params['tgt_dict']) - 1) copy_info.append(mask.float().squeeze(1)) words = self.__tens2sent(tgt,", "memory_bank, state, step=None, layer_wise_coverage=None): if self.split_decoder: copier_out, attns = self.transformer_c(tgt_words,", "max_word_len)`` - summ_len: ``(batch_size)`` - tgt_seq: ``(batch_size, max_len)`` Output: -", "\"l\" table.align[\"Output Shape\"] = \"r\" table.align[\"Param #\"] = \"r\" for", "tgt = self.embedder(tgt_words, tgt_chars, mode='decoder', step=idx) tgt_pad_mask = tgt_words.data.eq(constants.PAD) layer_wise_dec_out,", "f from prettytable import PrettyTable from c2nl.modules.char_embedding import CharEmbedding from", "else: word_rep = torch.cat((word_rep, char_rep), 2) # B x P", "in range(prediction.size(0)): if params['blank'][b]: blank_b = torch.LongTensor(params['blank'][b]) fill_b = torch.LongTensor(params['fill'][b])", "if attentions else None return { 'predictions': dec_preds, 'copy_info': copy_info,", "embedding options should be True assert args.use_src_word or args.use_src_char assert", "x d+f if self.use_type: type_rep = self.type_embeddings(sequence_type) word_rep = word_rep", "self.no_relative_pos = all(v == 0 for v in args.max_relative_pos) if", "summ_pad_mask = ~sequence_mask(summ_len, max_len=summ_emb.size(1)) enc_outputs = layer_wise_outputs if self.layer_wise_attn else", "return memory_bank, layer_outputs class Decoder(nn.Module): def __init__(self, args, input_size): super(Decoder,", "= self.embedder(code_word_rep, code_char_rep, code_type_rep, mode='encoder') memory_bank, layer_wise_outputs = self.encoder(word_rep, code_len)", "args.nfilters))) self.src_highway_net = Highway(self.enc_input_size, num_layers=2) if self.use_tgt_char: assert len(args.filter_size) ==", "self.src_highway_net = Highway(self.enc_input_size, num_layers=2) if self.use_tgt_char: assert len(args.filter_size) == len(args.nfilters)", "= args.use_tgt_word if self.use_src_word: self.src_word_embeddings = Embeddings(args.emsize, args.src_vocab_size, constants.PAD) self.enc_input_size", "max_word_len)`` - code_len: ``(batch_size)`` - summ_word_rep: ``(batch_size, max_que_len)`` - summ_char_rep:", "== len(args.nfilters) self.src_char_embeddings = CharEmbedding(args.n_characters, args.char_emsize, args.filter_size, args.nfilters) self.enc_input_size +=", "self.src_pos_embeddings = nn.Embedding(args.max_src_len, self.enc_input_size) if self.tgt_pos_emb: self.tgt_pos_embeddings = nn.Embedding(args.max_tgt_len +", "memory_bank, state) class Transformer(nn.Module): \"\"\"Module that writes an answer for", "= self.decoder(enc_outputs, code_len, summ_pad_mask, summ_emb) decoder_outputs = layer_wise_dec_out[-1] loss =", "f_t = self.fusion_sigmoid(torch.cat([copier_out, dec_out], dim=-1)) gate_input = torch.cat([copier_out, torch.mul(f_t, dec_out)],", "c2nl.inputters import constants from c2nl.modules.global_attention import GlobalAttention from c2nl.modules.copy_generator import", "self.layer_weights(output).squeeze(3) layer_scores = f.softmax(layer_scores, dim=-1) memory_bank = torch.matmul(output.transpose(2, 3), layer_scores.unsqueeze(3)).squeeze(3)", "layer_outputs, _ = self.transformer(input, input_len) # B x seq_len x", "return word_rep class Encoder(nn.Module): def __init__(self, args, input_size): super(Encoder, self).__init__()", "self.encoder.count_parameters() def count_decoder_parameters(self): return self.decoder.count_parameters() def layer_wise_parameters(self): table = PrettyTable()", ") self.decoder.load_state_dict(state_dict) def count_parameters(self): if self.split_decoder: return self.transformer_c.count_parameters() + self.transformer_d.count_parameters()", "tgt_emb, memory_bank, state, step=None, layer_wise_coverage=None): if self.split_decoder: copier_out, attns =", "self.split_decoder: return self.transformer_c.count_parameters() + self.transformer_d.count_parameters() else: return self.transformer.count_parameters() def init_decoder(self,", "encode the source sequence code_rep = self.embedder(code_word_rep, code_char_rep, code_type_rep, mode='encoder')", "_run_forward_ml(self, code_word_rep, code_char_rep, code_type_rep, code_len, summ_word_rep, summ_char_rep, summ_len, tgt_seq, src_map,", "Output: - ``(batch_size, P_LEN)``, ``(batch_size, P_LEN)`` \"\"\" if self.training: return", "word_rep is None: word_rep = char_rep else: word_rep = torch.cat((word_rep,", "= nn.CrossEntropyLoss(reduction='none') def _run_forward_ml(self, code_word_rep, code_char_rep, code_type_rep, code_len, summ_word_rep, summ_char_rep,", "= self.layer_weights(output).squeeze(3) layer_scores = f.softmax(layer_scores, dim=-1) memory_bank = torch.matmul(output.transpose(2, 3),", "nn.Linear(input_size, 1, bias=False) def count_parameters(self): return self.transformer.count_parameters() def forward(self, input,", "code_char_rep, code_type_rep, code_len, summ_word_rep, summ_char_rep, summ_len, tgt_seq, src_map, alignment, **kwargs)", "self.src_pos_embeddings(pos_enc) word_rep = word_rep + pos_rep elif mode == 'decoder':", "= ml_loss.sum(1) * kwargs['example_weights'] loss['ml_loss'] = ml_loss.mean() loss['loss_per_token'] = ml_loss.div((summ_len", "P_LEN)``, ``(batch_size, P_LEN)`` \"\"\" if self.training: return self._run_forward_ml(code_word_rep, code_char_rep, code_type_rep,", "torch.stack(attentions, dim=1) if attentions else None return { 'predictions': dec_preds,", "input_len) # B x seq_len x h if self.use_all_enc_layers: output", "\\ if isinstance(memory_bank, list) else memory_bank.shape[1] state = self.init_decoder(memory_len, max_mem_len)", "d_model=input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, dropout=args.trans_drop, max_relative_positions=args.max_relative_pos, use_neg_dist=args.use_neg_dist) self.use_all_enc_layers =", "'Transformer' if len(args.max_relative_pos) != args.nlayers: assert len(args.max_relative_pos) == 1 args.max_relative_pos", "= Embeddings(args.emsize, args.src_vocab_size, constants.PAD) self.enc_input_size += args.emsize if self.use_tgt_word: self.tgt_word_embeddings", "**kwargs): \"\"\" Input: - code_word_rep: ``(batch_size, max_doc_len)`` - code_char_rep: ``(batch_size,", "pos_enc = torch.arange(start=0, end=word_rep.size(1)).type(torch.LongTensor) pos_enc = pos_enc.expand(*word_rep.size()[:-1]) if word_rep.is_cuda: pos_enc", "- code_char_rep: ``(batch_size, max_doc_len, max_word_len)`` - code_len: ``(batch_size)`` - summ_word_rep:", "copy_score: batch_size, tgt_len, src_len _, copy_score, _ = self.copy_attn(decoder_outputs, memory_bank,", "B x P x d+f word_rep = self.tgt_highway_net(word_rep) # B", "src_len _, copy_score, _ = self.copy_attn(decoder_outputs, memory_bank, memory_lengths=code_len, softmax_weights=False) #", "self.use_all_enc_layers: self.layer_weights = nn.Linear(input_size, 1, bias=False) def count_parameters(self): return self.transformer.count_parameters()", "args.filter_size, args.nfilters) self.enc_input_size += sum(list(map(int, args.nfilters))) self.src_highway_net = Highway(self.enc_input_size, num_layers=2)", "params['memory_bank'] # +1 for <EOS> token for idx in range(params['max_len']", "ml_loss = self.criterion(scores, alignment[:, 1:].contiguous(), target) else: scores = self.generator(decoder_outputs)", "if self.split_decoder: copier_out, attns = self.transformer_c(tgt_words, tgt_emb, memory_bank, state[0], step=step,", "self.encoder(code_rep, code_len) # B x seq_len x h # embed", "else: memory_bank = layer_outputs[-1] return memory_bank, layer_outputs class Decoder(nn.Module): def", "``(batch_size, max_que_len)`` - summ_char_rep: ``(batch_size, max_que_len, max_word_len)`` - summ_len: ``(batch_size)``", "== self.decoder.input_size self.generator.weight = self.embedder.tgt_word_embeddings.word_lut.weight self._copy = args.copy_attn if self._copy:", "weights here if needed if kwargs['code_mask_rep'] is not None: mask", "in attns: # std_attn: batch_size x num_heads x 1 x", "mode='encoder') memory_bank, layer_wise_outputs = self.encoder(word_rep, code_len) # B x seq_len", "mode='decoder') summ_pad_mask = ~sequence_mask(summ_len, max_len=summ_emb.size(1)) enc_outputs = layer_wise_outputs if self.layer_wise_attn", "``(batch_size, max_que_len, max_word_len)`` - summ_len: ``(batch_size)`` - tgt_seq: ``(batch_size, max_len)``", "= torch.stack(layer_outputs, dim=2) # B x seq_len x nlayers x", "prediction[b].index_add_(0, fill_b, prediction[b].index_select(0, blank_b)) prediction[b].index_fill_(0, blank_b, 1e-10) else: prediction =", "d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, dropout=args.trans_drop, max_relative_positions=args.max_relative_pos, use_neg_dist=args.use_neg_dist) self.use_all_enc_layers = args.use_all_enc_layers if", "= widx - len(tgt_dict) words.append(src_vocabs[idx][widx]) return words def __generate_sequence(self, params,", "= word_rep + pos_rep else: raise ValueError('Unknown embedder mode!') word_rep", "# Make it broadcastable. copy_score.data.masked_fill_(mask, -float('inf')) attn_copy = f.softmax(copy_score, dim=-1)", "d_ff=args.d_ff, coverage_attn=args.coverage_attn, dropout=args.trans_drop ) if args.reload_decoder_state: state_dict = torch.load( args.reload_decoder_state,", "ml_loss.mul(target.ne(constants.PAD).float()) ml_loss = ml_loss.sum(1) * kwargs['example_weights'] loss['ml_loss'] = ml_loss.mean() loss['loss_per_token']", "self.embedder(tgt_words, tgt_chars, mode='decoder', step=idx) tgt_pad_mask = tgt_words.data.eq(constants.PAD) layer_wise_dec_out, attns =", "not None: mask = kwargs['code_mask_rep'].byte().unsqueeze(1) # Make it broadcastable. copy_score.data.masked_fill_(mask,", "'copy_info': copy_info, 'memory_bank': memory_bank, 'attentions': attentions } def count_parameters(self): return", "``(batch_size, max_doc_len, max_word_len)`` - code_len: ``(batch_size)`` - summ_word_rep: ``(batch_size, max_que_len)``", "0 self.dec_input_size = 0 # at least one of word", "for v in args.max_relative_pos) if self.src_pos_emb and self.no_relative_pos: self.src_pos_embeddings =", "CharEmbedding(args.n_characters, args.char_emsize, args.filter_size, args.nfilters) self.enc_input_size += sum(list(map(int, args.nfilters))) self.src_highway_net =", "if len(args.max_relative_pos) != args.nlayers: assert len(args.max_relative_pos) == 1 args.max_relative_pos =", "torch.arange(start=0, end=word_rep.size(1)).type(torch.LongTensor) pos_enc = pos_enc.expand(*word_rep.size()[:-1]) if word_rep.is_cuda: pos_enc = pos_enc.cuda()", "return self.decoder.count_parameters() def layer_wise_parameters(self): table = PrettyTable() table.field_names = [\"Layer", "layer_wise_coverage=attns['coverage']) decoder_outputs = layer_wise_dec_out[-1] acc_dec_outs.append(decoder_outputs.squeeze(1)) if self._copy: _, copy_score, _", "f if word_rep is None: word_rep = char_rep else: word_rep", "layer_wise_dec_out[-1] loss = dict() target = tgt_seq[:, 1:].contiguous() if self._copy:", "use_cuda: blank_b = blank_b.cuda() fill_b = fill_b.cuda() prediction[b].index_add_(0, fill_b, prediction[b].index_select(0,", ") # To accomplish eq. 19 - 21 from `https://arxiv.org/pdf/1808.07913.pdf`", "one of word or char embedding options should be True", "== len(args.nfilters) self.tgt_char_embeddings = CharEmbedding(args.n_characters, args.char_emsize, args.filter_size, args.nfilters) self.dec_input_size +=", "2, self.dec_input_size) self.dropout = nn.Dropout(args.dropout_emb) def forward(self, sequence, sequence_char, sequence_type=None,", "_ = self.copy_attn(decoder_outputs, memory_bank, memory_lengths=code_len, softmax_weights=False) # mask copy_attn weights", "elif choice == 'sample': tgt, log_prob = self.reinforce.sample(prediction.unsqueeze(1)) else: assert", "step=step, layer_wise_coverage=layer_wise_coverage) dec_out, _ = self.transformer_d(tgt_words, tgt_emb, memory_bank, state[1], step=step)", "self.src_pos_emb = args.src_pos_emb self.tgt_pos_emb = args.tgt_pos_emb self.no_relative_pos = all(v ==", "memory_bank, layer_wise_outputs = self.encoder(word_rep, code_len) # B x seq_len x", "self.copy_generator = CopyGenerator(self.decoder.input_size, tgt_dict, self.generator) self.criterion = CopyGeneratorCriterion(vocab_size=len(tgt_dict), force_copy=args.force_copy) else:", "if args.reload_decoder_state: state_dict = torch.load( args.reload_decoder_state, map_location=lambda storage, loc: storage", "self.tgt_word_embeddings(sequence.unsqueeze(2)) # B x P x d if self.use_tgt_char: char_rep", "forward(self, memory_bank, memory_len, tgt_pad_mask, tgt_emb): max_mem_len = memory_bank[0].shape[1] \\ if", "+ 2, self.dec_input_size) self.dropout = nn.Dropout(args.dropout_emb) def forward(self, sequence, sequence_char,", "class Encoder(nn.Module): def __init__(self, args, input_size): super(Encoder, self).__init__() self.transformer =", "h params = dict() params['memory_bank'] = memory_bank params['layer_wise_outputs'] = layer_wise_outputs", "c2nl.encoders.transformer import TransformerEncoder from c2nl.decoders.transformer import TransformerDecoder from c2nl.inputters import", "< len(tgt_dict): words.append(tgt_dict[widx]) else: widx = widx - len(tgt_dict) words.append(src_vocabs[idx][widx])", "1).float()).mean() return loss def forward(self, code_word_rep, code_char_rep, code_type_rep, code_len, summ_word_rep,", "alignment, **kwargs) else: return self.decode(code_word_rep, code_char_rep, code_type_rep, code_len, src_map, alignment,", "from c2nl.utils.misc import sequence_mask class Embedder(nn.Module): def __init__(self, args): super(Embedder,", "* 2, self.input_size), nn.Sigmoid() ) self.fusion_gate = nn.Sequential( nn.Linear(self.input_size *", "x seq_len x h params = dict() params['memory_bank'] = memory_bank", "self.encoder(word_rep, code_len) # B x seq_len x h params =", "return self.transformer.count_parameters() def init_decoder(self, src_lens, max_src_len): if self.split_decoder: state_c =", "summ_pad_mask, summ_emb) decoder_outputs = layer_wise_dec_out[-1] loss = dict() target =", "= torch.matmul(output.transpose(2, 3), layer_scores.unsqueeze(3)).squeeze(3) else: memory_bank = layer_outputs[-1] return memory_bank,", "= args.src_pos_emb self.tgt_pos_emb = args.tgt_pos_emb self.no_relative_pos = all(v == 0", "len(tgt_dict): words.append(tgt_dict[widx]) else: widx = widx - len(tgt_dict) words.append(src_vocabs[idx][widx]) return", "if self.use_all_enc_layers: self.layer_weights = nn.Linear(input_size, 1, bias=False) def count_parameters(self): return", "B x P x f if word_rep is None: word_rep", "mode='encoder', step=None): if mode == 'encoder': word_rep = None if", "if widx < len(tgt_dict): words.append(tgt_dict[widx]) else: widx = widx -", "= layer_wise_outputs if self.layer_wise_attn else memory_bank layer_wise_dec_out, attns = self.decoder(enc_outputs,", "dim=1) if choice == 'greedy': tgt_prob, tgt = torch.max(prediction, dim=1,", "self.tgt_char_embeddings = CharEmbedding(args.n_characters, args.char_emsize, args.filter_size, args.nfilters) self.dec_input_size += sum(list(map(int, args.nfilters)))", "kwargs['code_mask_rep'] params['fill'] = kwargs['fill'] params['blank'] = kwargs['blank'] params['src_dict'] = kwargs['src_dict']", "= self.embedder(summ_word_rep, summ_char_rep, mode='decoder') summ_pad_mask = ~sequence_mask(summ_len, max_len=summ_emb.size(1)) enc_outputs =", "tgt_pad_mask, tgt_emb): max_mem_len = memory_bank[0].shape[1] \\ if isinstance(memory_bank, list) else", "= 'Transformer' if len(args.max_relative_pos) != args.nlayers: assert len(args.max_relative_pos) == 1", "widx = w[0].item() if widx < len(tgt_dict): words.append(tgt_dict[widx]) else: widx", "P_LEN)`` \"\"\" if self.training: return self._run_forward_ml(code_word_rep, code_char_rep, code_type_rep, code_len, summ_word_rep,", "d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, dropout=args.trans_drop ) # To accomplish eq. 19", "source sequence code_rep = self.embedder(code_word_rep, code_char_rep, code_type_rep, mode='encoder') memory_bank, layer_wise_outputs", "``(batch_size, P_LEN)`` \"\"\" if self.training: return self._run_forward_ml(code_word_rep, code_char_rep, code_type_rep, code_len,", "return self._run_forward_ml(code_word_rep, code_char_rep, code_type_rep, code_len, summ_word_rep, summ_char_rep, summ_len, tgt_seq, src_map,", "= word_rep + pos_rep elif mode == 'decoder': word_rep =", "d_ff=args.d_ff, coverage_attn=args.coverage_attn, dropout=args.trans_drop ) self.transformer_d = TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head,", "nn.ReLU() ) else: self.transformer = TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head, d_k=args.d_k,", "std_attn: batch_size x num_heads x 1 x src_len std_attn =", "__init__(self, args, input_size): super(Encoder, self).__init__() self.transformer = TransformerEncoder(num_layers=args.nlayers, d_model=input_size, heads=args.num_head,", "ml_loss = ml_loss.mul(target.ne(constants.PAD).float()) ml_loss = ml_loss.sum(1) * kwargs['example_weights'] loss['ml_loss'] =", "self.fusion_gate(gate_input) else: decoder_outputs, attns = self.transformer(tgt_words, tgt_emb, memory_bank, state, step=step,", "* args.nlayers self.embedder = Embedder(args) self.encoder = Encoder(args, self.embedder.enc_input_size) self.decoder", "d if self.use_src_char: char_rep = self.src_char_embeddings(sequence_char) # B x P", "P x d if self.use_tgt_char: char_rep = self.tgt_char_embeddings(sequence_char) # B", "embedder mode!') word_rep = self.dropout(word_rep) return word_rep class Encoder(nn.Module): def", "= torch.arange(start=0, end=word_rep.size(1)).type(torch.LongTensor) else: pos_enc = torch.LongTensor([step]) # used in", "= \"l\" table.align[\"Output Shape\"] = \"r\" table.align[\"Param #\"] = \"r\"", "copy_info = torch.stack(copy_info, dim=1) if copy_info else None # attentions:", "= Highway(self.enc_input_size, num_layers=2) if self.use_tgt_char: assert len(args.filter_size) == len(args.nfilters) self.tgt_char_embeddings", "self.no_relative_pos: pos_enc = torch.arange(start=0, end=word_rep.size(1)).type(torch.LongTensor) pos_enc = pos_enc.expand(*word_rep.size()[:-1]) if word_rep.is_cuda:", "input_size self.split_decoder = args.split_decoder and args.copy_attn if self.split_decoder: # Following", "# +1 for <EOS> token for idx in range(params['max_len'] +", "= kwargs['blank'] params['src_dict'] = kwargs['src_dict'] params['tgt_dict'] = kwargs['tgt_dict'] params['max_len'] =", "x P x d if self.use_src_char: char_rep = self.src_char_embeddings(sequence_char) #", "if self._copy: self.copy_attn = GlobalAttention(dim=self.decoder.input_size, attn_type=args.attn_type) self.copy_generator = CopyGenerator(self.decoder.input_size, tgt_dict,", "# copy_score: batch_size, tgt_len, src_len _, copy_score, _ = self.copy_attn(decoder_outputs,", "= layer_wise_dec_out[-1] acc_dec_outs.append(decoder_outputs.squeeze(1)) if self._copy: _, copy_score, _ = self.copy_attn(decoder_outputs,", "= dict() params['memory_bank'] = memory_bank params['layer_wise_outputs'] = layer_wise_outputs params['src_len'] =", "nn.Sequential( nn.Linear(self.input_size * 2, self.input_size), nn.Sigmoid() ) self.fusion_gate = nn.Sequential(", "self.embedder.tgt_word_embeddings.word_lut.weight self._copy = args.copy_attn if self._copy: self.copy_attn = GlobalAttention(dim=self.decoder.input_size, attn_type=args.attn_type)", "for w in words] words = torch.Tensor(words).type_as(tgt) tgt_words = words.unsqueeze(1)", "max_src_len) state_d = self.transformer_d.init_state(src_lens, max_src_len) return state_c, state_d else: return", "params['src_mask'].byte().unsqueeze(1) # Make it broadcastable. copy_score.data.masked_fill_(mask, -float('inf')) attn_copy = f.softmax(copy_score,", "src_map, alignment, **kwargs) else: return self.decode(code_word_rep, code_char_rep, code_type_rep, code_len, src_map,", "c2nl.utils.misc import sequence_mask class Embedder(nn.Module): def __init__(self, args): super(Embedder, self).__init__()", "== 'decoder': word_rep = None if self.use_tgt_word: word_rep = self.tgt_word_embeddings(sequence.unsqueeze(2))", "params['fill'] = kwargs['fill'] params['blank'] = kwargs['blank'] params['src_dict'] = kwargs['src_dict'] params['tgt_dict']", "= code_word_rep dec_preds, attentions, copy_info, _ = self.__generate_sequence(params, choice='greedy') dec_preds", "memory_bank, layer_outputs class Decoder(nn.Module): def __init__(self, args, input_size): super(Decoder, self).__init__()", "# To accomplish eq. 19 - 21 from `https://arxiv.org/pdf/1808.07913.pdf` self.fusion_sigmoid", "tgt_seq: ``(batch_size, max_len)`` Output: - ``(batch_size, P_LEN)``, ``(batch_size, P_LEN)`` \"\"\"", "torch.cat([copier_out, torch.mul(f_t, dec_out)], dim=-1) decoder_outputs = self.fusion_gate(gate_input) else: decoder_outputs, attns", "for idx, w in enumerate(t): widx = w[0].item() if widx", "max_mem_len = params['memory_bank'][0].shape[1] \\ if isinstance(params['memory_bank'], list) else params['memory_bank'].shape[1] dec_states", "src_vocabs): words = [] for idx, w in enumerate(t): widx", "else: pos_enc = torch.LongTensor([step]) # used in inference time pos_enc", "[] attentions = [] dec_log_probs = [] acc_dec_outs = []", "CopyGenerator, CopyGeneratorCriterion from c2nl.utils.misc import sequence_mask class Embedder(nn.Module): def __init__(self,", "params['src_words'] = code_word_rep dec_preds, attentions, copy_info, _ = self.__generate_sequence(params, choice='greedy')", "Embedder(nn.Module): def __init__(self, args): super(Embedder, self).__init__() self.enc_input_size = 0 self.dec_input_size", "layer_wise_parameters(self): table = PrettyTable() table.field_names = [\"Layer Name\", \"Output Shape\",", "word_rep.is_cuda: pos_enc = pos_enc.cuda() pos_rep = self.tgt_pos_embeddings(pos_enc) word_rep = word_rep", "if self.split_decoder: # Following (https://arxiv.org/pdf/1808.07913.pdf), we split decoder self.transformer_c =", "Embeddings(args.emsize, args.tgt_vocab_size, constants.PAD) self.dec_input_size += args.emsize self.use_src_char = args.use_src_char self.use_tgt_char", "= TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, coverage_attn=args.coverage_attn, dropout=args.trans_drop", "attentions else None return { 'predictions': dec_preds, 'copy_info': copy_info, 'memory_bank':", "torch.LongTensor([step]) # used in inference time pos_enc = pos_enc.expand(*word_rep.size()[:-1]) if", "code_len.size(0) # embed and encode the source sequence code_rep =", "input_len): layer_outputs, _ = self.transformer(input, input_len) # B x seq_len", "in inference time pos_enc = pos_enc.expand(*word_rep.size()[:-1]) if word_rep.is_cuda: pos_enc =", "summ_len, tgt_seq, src_map, alignment, **kwargs): batch_size = code_len.size(0) # embed", "Embeddings from c2nl.modules.highway import Highway from c2nl.encoders.transformer import TransformerEncoder from", "assert args.use_tgt_word or args.use_tgt_char self.use_src_word = args.use_src_word self.use_tgt_word = args.use_tgt_word", "args.copy_attn if self.split_decoder: # Following (https://arxiv.org/pdf/1808.07913.pdf), we split decoder self.transformer_c", "= self.criterion(scores.view(-1, scores.size(2)), target.view(-1)) ml_loss = ml_loss.view(*scores.size()[:-1]) ml_loss = ml_loss.mul(target.ne(constants.PAD).float())", "**kwargs) def __tens2sent(self, t, tgt_dict, src_vocabs): words = [] for", "blank_b = blank_b.cuda() fill_b = fill_b.cuda() prediction[b].index_add_(0, fill_b, prediction[b].index_select(0, blank_b))", "B x seq_len x nlayers x h layer_scores = self.layer_weights(output).squeeze(3)", "if step is None: pos_enc = torch.arange(start=0, end=word_rep.size(1)).type(torch.LongTensor) else: pos_enc", "it broadcastable. copy_score.data.masked_fill_(mask, -float('inf')) attn_copy = f.softmax(copy_score, dim=-1) scores =", "nn.Embedding(len(constants.TOKEN_TYPE_MAP), self.enc_input_size) self.src_pos_emb = args.src_pos_emb self.tgt_pos_emb = args.tgt_pos_emb self.no_relative_pos =", "if self.use_tgt_char: char_rep = self.tgt_char_embeddings(sequence_char) # B x P x", "p in self.parameters() if p.requires_grad) def count_encoder_parameters(self): return self.encoder.count_parameters() def", "import torch.nn as nn import torch.nn.functional as f from prettytable", "heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, dropout=args.trans_drop, max_relative_positions=args.max_relative_pos, use_neg_dist=args.use_neg_dist) self.use_all_enc_layers = args.use_all_enc_layers", "summ_char_rep, summ_len, tgt_seq, src_map, alignment, **kwargs): batch_size = code_len.size(0) #", "self.tgt_pos_embeddings = nn.Embedding(args.max_tgt_len + 2, self.dec_input_size) self.dropout = nn.Dropout(args.dropout_emb) def", "self.dec_input_size += args.emsize self.use_src_char = args.use_src_char self.use_tgt_char = args.use_tgt_char if", "# B x seq_len x nlayers x h layer_scores =", "torch.mul(f_t, dec_out)], dim=-1) decoder_outputs = self.fusion_gate(gate_input) else: decoder_outputs, attns =", "= [] max_mem_len = params['memory_bank'][0].shape[1] \\ if isinstance(params['memory_bank'], list) else", "tgt_words.cuda() tgt_words = tgt_words.expand(batch_size).unsqueeze(1) # B x 1 tgt_chars =", "question given a passage.\"\"\" def __init__(self, args, tgt_dict): \"\"\"\"Constructor of", "= self.fusion_gate(gate_input) else: decoder_outputs, attns = self.transformer(tgt_words, tgt_emb, memory_bank, state,", "self.transformer_c = TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, coverage_attn=args.coverage_attn,", "scores = scores[:, :-1, :].contiguous() # `batch x tgt_len -", "attn_copy, params['src_map']) prediction = prediction.squeeze(1) for b in range(prediction.size(0)): if", "'sample': tgt, log_prob = self.reinforce.sample(prediction.unsqueeze(1)) else: assert False dec_log_probs.append(log_prob.squeeze(1)) dec_preds.append(tgt.squeeze(1).clone())", "step=step) f_t = self.fusion_sigmoid(torch.cat([copier_out, dec_out], dim=-1)) gate_input = torch.cat([copier_out, torch.mul(f_t,", "self.embedder = Embedder(args) self.encoder = Encoder(args, self.embedder.enc_input_size) self.decoder = Decoder(args,", "2) # B x P x d+f word_rep = self.src_highway_net(word_rep)", "- 1).float()).mean() return loss def forward(self, code_word_rep, code_char_rep, code_type_rep, code_len,", "self.embedder.enc_input_size) self.decoder = Decoder(args, self.embedder.dec_input_size) self.layer_wise_attn = args.layer_wise_attn self.generator =", "choice == 'sample': tgt, log_prob = self.reinforce.sample(prediction.unsqueeze(1)) else: assert False", "args.emsize self.use_src_char = args.use_src_char self.use_tgt_char = args.use_tgt_char if self.use_src_char: assert", "num_layers=2) if self.use_tgt_char: assert len(args.filter_size) == len(args.nfilters) self.tgt_char_embeddings = CharEmbedding(args.n_characters,", "copy_score, _ = self.copy_attn(decoder_outputs, params['memory_bank'], memory_lengths=params['src_len'], softmax_weights=False) # mask copy_attn", "= Embedder(args) self.encoder = Encoder(args, self.embedder.enc_input_size) self.decoder = Decoder(args, self.embedder.dec_input_size)", "self.embedder(code_word_rep, code_char_rep, code_type_rep, mode='encoder') memory_bank, layer_wise_outputs = self.encoder(code_rep, code_len) #", "w[0].item() if widx < len(tgt_dict): words.append(tgt_dict[widx]) else: widx = widx", "'attentions': attentions } def count_parameters(self): return sum(p.numel() for p in", "# B x seq_len x h if self.use_all_enc_layers: output =", "= nn.Dropout(args.dropout_emb) def forward(self, sequence, sequence_char, sequence_type=None, mode='encoder', step=None): if", "# B x seq_len x h # embed and encode", "memory_bank layer_wise_dec_out, attns = self.decoder(enc_outputs, code_len, summ_pad_mask, summ_emb) decoder_outputs =", "[] for idx, w in enumerate(t): widx = w[0].item() if", "tgt_len x vocab_size` scores = scores[:, :-1, :].contiguous() # `batch", "mask = params['src_mask'].byte().unsqueeze(1) # Make it broadcastable. copy_score.data.masked_fill_(mask, -float('inf')) attn_copy", "TransformerDecoder from c2nl.inputters import constants from c2nl.modules.global_attention import GlobalAttention from", "words def __generate_sequence(self, params, choice='greedy', tgt_words=None): batch_size = params['memory_bank'].size(0) use_cuda", "tgt_words = words.unsqueeze(1) return dec_preds, attentions, copy_info, dec_log_probs def decode(self,", "args.reload_decoder_state, map_location=lambda storage, loc: storage ) self.decoder.load_state_dict(state_dict) def count_parameters(self): if", "sum(list(map(int, args.nfilters))) self.src_highway_net = Highway(self.enc_input_size, num_layers=2) if self.use_tgt_char: assert len(args.filter_size)", "if word_rep.is_cuda: pos_enc = pos_enc.cuda() pos_rep = self.src_pos_embeddings(pos_enc) word_rep =", "self.use_type = args.use_code_type if self.use_type: self.type_embeddings = nn.Embedding(len(constants.TOKEN_TYPE_MAP), self.enc_input_size) self.src_pos_emb", "d_v=args.d_v, d_ff=args.d_ff, coverage_attn=args.coverage_attn, dropout=args.trans_drop ) self.transformer_d = TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size,", "ml_loss.mean() loss['loss_per_token'] = ml_loss.div((summ_len - 1).float()).mean() return loss def forward(self,", "B x seq_len x h if self.use_all_enc_layers: output = torch.stack(layer_outputs,", "= 0 self.dec_input_size = 0 # at least one of", "# B x 1 tgt_chars = None if self.embedder.use_tgt_char: tgt_chars", "= fill_b.cuda() prediction[b].index_add_(0, fill_b, prediction[b].index_select(0, blank_b)) prediction[b].index_fill_(0, blank_b, 1e-10) else:", "layer_outputs[-1] return memory_bank, layer_outputs class Decoder(nn.Module): def __init__(self, args, input_size):", "state = self.init_decoder(memory_len, max_mem_len) return self.decode(tgt_pad_mask, tgt_emb, memory_bank, state) class", "it broadcastable. copy_score.data.masked_fill_(mask, -float('inf')) attn_copy = f.softmax(copy_score, dim=-1) prediction =", "GlobalAttention(dim=self.decoder.input_size, attn_type=args.attn_type) self.copy_generator = CopyGenerator(self.decoder.input_size, tgt_dict, self.generator) self.criterion = CopyGeneratorCriterion(vocab_size=len(tgt_dict),", "self.input_size), nn.ReLU() ) else: self.transformer = TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head,", "x h params = dict() params['memory_bank'] = memory_bank params['layer_wise_outputs'] =", "copy_info, 'memory_bank': memory_bank, 'attentions': attentions } def count_parameters(self): return sum(p.numel()", "alignment, **kwargs): \"\"\" Input: - code_word_rep: ``(batch_size, max_doc_len)`` - code_char_rep:", "CopyGeneratorCriterion(vocab_size=len(tgt_dict), force_copy=args.force_copy) else: self.criterion = nn.CrossEntropyLoss(reduction='none') def _run_forward_ml(self, code_word_rep, code_char_rep,", "from c2nl.inputters import constants from c2nl.modules.global_attention import GlobalAttention from c2nl.modules.copy_generator", "B x P x d if self.use_src_char: char_rep = self.src_char_embeddings(sequence_char)", "if self.use_tgt_char: assert len(args.filter_size) == len(args.nfilters) self.tgt_char_embeddings = CharEmbedding(args.n_characters, args.char_emsize,", "= TransformerEncoder(num_layers=args.nlayers, d_model=input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, dropout=args.trans_drop, max_relative_positions=args.max_relative_pos, use_neg_dist=args.use_neg_dist)", "self.use_all_enc_layers: output = torch.stack(layer_outputs, dim=2) # B x seq_len x", "count_parameters(self): return self.transformer.count_parameters() def forward(self, input, input_len): layer_outputs, _ =", "None return { 'predictions': dec_preds, 'copy_info': copy_info, 'memory_bank': memory_bank, 'attentions':", "end=word_rep.size(1)).type(torch.LongTensor) else: pos_enc = torch.LongTensor([step]) # used in inference time", "# at least one of word or char embedding options", "h if self.use_all_enc_layers: output = torch.stack(layer_outputs, dim=2) # B x", "self.layer_wise_attn else memory_bank layer_wise_dec_out, attns = self.decoder(enc_outputs, code_len, summ_pad_mask, summ_emb)", "heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, coverage_attn=args.coverage_attn, dropout=args.trans_drop ) self.transformer_d = TransformerDecoder(", "return state_c, state_d else: return self.transformer.init_state(src_lens, max_src_len) def decode(self, tgt_words,", "use_neg_dist=args.use_neg_dist) self.use_all_enc_layers = args.use_all_enc_layers if self.use_all_enc_layers: self.layer_weights = nn.Linear(input_size, 1,", "words = [params['tgt_dict'][w] for w in words] words = torch.Tensor(words).type_as(tgt)", "= ml_loss.mul(target.ne(constants.PAD).float()) ml_loss = ml_loss.sum(1) * kwargs['example_weights'] loss['ml_loss'] = ml_loss.mean()", "self.use_tgt_char = args.use_tgt_char if self.use_src_char: assert len(args.filter_size) == len(args.nfilters) self.src_char_embeddings", "torch.Tensor(words).type_as(tgt) tgt_words = words.unsqueeze(1) return dec_preds, attentions, copy_info, dec_log_probs def", "torch.stack(layer_outputs, dim=2) # B x seq_len x nlayers x h", "else: prediction = self.generator(decoder_outputs.squeeze(1)) prediction = f.softmax(prediction, dim=1) if choice", "params['src_map']) prediction = prediction.squeeze(1) for b in range(prediction.size(0)): if params['blank'][b]:", "memory_bank, memory_lengths=code_len, softmax_weights=False) # mask copy_attn weights here if needed", "code_len params['source_vocab'] = kwargs['source_vocab'] params['src_map'] = src_map params['src_mask'] = kwargs['code_mask_rep']", "_ = self.transformer(input, input_len) # B x seq_len x h", "enumerate(t): widx = w[0].item() if widx < len(tgt_dict): words.append(tgt_dict[widx]) else:", "self).__init__() self.enc_input_size = 0 self.dec_input_size = 0 # at least", "mode == 'decoder': word_rep = None if self.use_tgt_word: word_rep =", "= args.use_tgt_char if self.use_src_char: assert len(args.filter_size) == len(args.nfilters) self.src_char_embeddings =", "= [\"Layer Name\", \"Output Shape\", \"Param #\"] table.align[\"Layer Name\"] =", "params['tgt_dict'], params['source_vocab']) tgt_chars = None if self.embedder.use_tgt_char: tgt_chars = [params['tgt_dict'].word_to_char_ids(w).tolist()", "return decoder_outputs, attns def forward(self, memory_bank, memory_len, tgt_pad_mask, tgt_emb): max_mem_len", "in words] tgt_chars = torch.Tensor(tgt_chars).to(tgt).unsqueeze(1) words = [params['tgt_dict'][w] for w", "def count_parameters(self): if self.split_decoder: return self.transformer_c.count_parameters() + self.transformer_d.count_parameters() else: return", "= torch.Tensor(tgt_chars).to(tgt).unsqueeze(1) words = [params['tgt_dict'][w] for w in words] words", "blank_b)) prediction[b].index_fill_(0, blank_b, 1e-10) else: prediction = self.generator(decoder_outputs.squeeze(1)) prediction =", "mask = kwargs['code_mask_rep'].byte().unsqueeze(1) # Make it broadcastable. copy_score.data.masked_fill_(mask, -float('inf')) attn_copy", "for name, parameters in self.named_parameters(): if parameters.requires_grad: table.add_row([name, str(list(parameters.shape)), parameters.numel()])", "from c2nl.encoders.transformer import TransformerEncoder from c2nl.decoders.transformer import TransformerDecoder from c2nl.inputters", "-float('inf')) attn_copy = f.softmax(copy_score, dim=-1) prediction = self.copy_generator(decoder_outputs, attn_copy, params['src_map'])", "in args.max_relative_pos) if self.src_pos_emb and self.no_relative_pos: self.src_pos_embeddings = nn.Embedding(args.max_src_len, self.enc_input_size)", "PrettyTable() table.field_names = [\"Layer Name\", \"Output Shape\", \"Param #\"] table.align[\"Layer", "pos_enc.cuda() pos_rep = self.src_pos_embeddings(pos_enc) word_rep = word_rep + pos_rep elif", "for p in self.parameters() if p.requires_grad) def count_encoder_parameters(self): return self.encoder.count_parameters()", "args.use_code_type if self.use_type: self.type_embeddings = nn.Embedding(len(constants.TOKEN_TYPE_MAP), self.enc_input_size) self.src_pos_emb = args.src_pos_emb", "= params['memory_bank'].size(0) use_cuda = params['memory_bank'].is_cuda if tgt_words is None: tgt_words", "log_prob = self.reinforce.sample(prediction.unsqueeze(1)) else: assert False dec_log_probs.append(log_prob.squeeze(1)) dec_preds.append(tgt.squeeze(1).clone()) if \"std\"", "if self.embedder.use_tgt_char: tgt_chars = [params['tgt_dict'].word_to_char_ids(w).tolist() for w in words] tgt_chars", "copy_attn weights here if needed if params['src_mask'] is not None:", "self).__init__() self.transformer = TransformerEncoder(num_layers=args.nlayers, d_model=input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, dropout=args.trans_drop,", "= pos_enc.cuda() pos_rep = self.src_pos_embeddings(pos_enc) word_rep = word_rep + pos_rep", "self.layer_weights = nn.Linear(input_size, 1, bias=False) def count_parameters(self): return self.transformer.count_parameters() def", "blank_b.cuda() fill_b = fill_b.cuda() prediction[b].index_add_(0, fill_b, prediction[b].index_select(0, blank_b)) prediction[b].index_fill_(0, blank_b,", "dec_preds, 'copy_info': copy_info, 'memory_bank': memory_bank, 'attentions': attentions } def count_parameters(self):", "`https://arxiv.org/pdf/1808.07913.pdf` self.fusion_sigmoid = nn.Sequential( nn.Linear(self.input_size * 2, self.input_size), nn.Sigmoid() )", "memory_bank[0].shape[1] \\ if isinstance(memory_bank, list) else memory_bank.shape[1] state = self.init_decoder(memory_len,", "memory_bank = layer_outputs[-1] return memory_bank, layer_outputs class Decoder(nn.Module): def __init__(self,", "= self.transformer(input, input_len) # B x seq_len x h if", "table.align[\"Layer Name\"] = \"l\" table.align[\"Output Shape\"] = \"r\" table.align[\"Param #\"]", "layer_scores.unsqueeze(3)).squeeze(3) else: memory_bank = layer_outputs[-1] return memory_bank, layer_outputs class Decoder(nn.Module):", "heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, dropout=args.trans_drop ) # To accomplish eq.", "self.transformer = TransformerEncoder(num_layers=args.nlayers, d_model=input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, dropout=args.trans_drop, max_relative_positions=args.max_relative_pos,", "weights here if needed if params['src_mask'] is not None: mask", "def _run_forward_ml(self, code_word_rep, code_char_rep, code_type_rep, code_len, summ_word_rep, summ_char_rep, summ_len, tgt_seq,", "params['memory_bank'].shape[1] dec_states = self.decoder.init_decoder(params['src_len'], max_mem_len) attns = {\"coverage\": None} enc_outputs", "batch_size x num_heads x 1 x src_len std_attn = torch.stack(attns[\"std\"],", "prettytable import PrettyTable from c2nl.modules.char_embedding import CharEmbedding from c2nl.modules.embeddings import", "d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, coverage_attn=args.coverage_attn, dropout=args.trans_drop ) self.transformer_d = TransformerDecoder( num_layers=args.nlayers,", "import PrettyTable from c2nl.modules.char_embedding import CharEmbedding from c2nl.modules.embeddings import Embeddings", "mode='decoder', step=idx) tgt_pad_mask = tgt_words.data.eq(constants.PAD) layer_wise_dec_out, attns = self.decoder.decode(tgt_pad_mask, tgt,", "return self.transformer_c.count_parameters() + self.transformer_d.count_parameters() else: return self.transformer.count_parameters() def init_decoder(self, src_lens,", "from c2nl.modules.global_attention import GlobalAttention from c2nl.modules.copy_generator import CopyGenerator, CopyGeneratorCriterion from", "= self.tgt_char_embeddings(sequence_char) # B x P x f if word_rep", "Input: - code_word_rep: ``(batch_size, max_doc_len)`` - code_char_rep: ``(batch_size, max_doc_len, max_word_len)``", "d_v=args.d_v, d_ff=args.d_ff, dropout=args.trans_drop ) # To accomplish eq. 19 -", "tgt_emb): max_mem_len = memory_bank[0].shape[1] \\ if isinstance(memory_bank, list) else memory_bank.shape[1]", "None # attentions: batch_size x tgt_len x num_heads x src_len", "copy_info.append(mask.float().squeeze(1)) words = self.__tens2sent(tgt, params['tgt_dict'], params['source_vocab']) tgt_chars = None if", "tgt_words.expand(batch_size).unsqueeze(1) # B x 1 tgt_chars = None if self.embedder.use_tgt_char:", "seq_len x nlayers x h layer_scores = self.layer_weights(output).squeeze(3) layer_scores =", "or char embedding options should be True assert args.use_src_word or", "dec_log_probs = [] acc_dec_outs = [] max_mem_len = params['memory_bank'][0].shape[1] \\", "from prettytable import PrettyTable from c2nl.modules.char_embedding import CharEmbedding from c2nl.modules.embeddings", "``(batch_size, max_doc_len)`` - code_char_rep: ``(batch_size, max_doc_len, max_word_len)`` - code_len: ``(batch_size)``", "for idx in range(params['max_len'] + 1): tgt = self.embedder(tgt_words, tgt_chars,", "state_dict = torch.load( args.reload_decoder_state, map_location=lambda storage, loc: storage ) self.decoder.load_state_dict(state_dict)", "params['memory_bank'] = memory_bank params['layer_wise_outputs'] = layer_wise_outputs params['src_len'] = code_len params['source_vocab']", "[] dec_log_probs = [] acc_dec_outs = [] max_mem_len = params['memory_bank'][0].shape[1]", "sequence_char, sequence_type=None, mode='encoder', step=None): if mode == 'encoder': word_rep =", "x seq_len x h # embed and encode the target", "in enumerate(t): widx = w[0].item() if widx < len(tgt_dict): words.append(tgt_dict[widx])", "= args.copy_attn if self._copy: self.copy_attn = GlobalAttention(dim=self.decoder.input_size, attn_type=args.attn_type) self.copy_generator =", "self.enc_input_size = 0 self.dec_input_size = 0 # at least one", "args.filter_size, args.nfilters) self.dec_input_size += sum(list(map(int, args.nfilters))) self.tgt_highway_net = Highway(self.dec_input_size, num_layers=2)", "use_cuda: tgt_words = tgt_words.cuda() tgt_words = tgt_words.expand(batch_size).unsqueeze(1) # B x", "# B x seq_len x h params = dict() params['memory_bank']", "tgt_emb, memory_bank, state) class Transformer(nn.Module): \"\"\"Module that writes an answer", "that writes an answer for the question given a passage.\"\"\"", "= kwargs['code_mask_rep'] params['fill'] = kwargs['fill'] params['blank'] = kwargs['blank'] params['src_dict'] =", "from c2nl.modules.embeddings import Embeddings from c2nl.modules.highway import Highway from c2nl.encoders.transformer", "tgt.gt(len(params['tgt_dict']) - 1) copy_info.append(mask.float().squeeze(1)) words = self.__tens2sent(tgt, params['tgt_dict'], params['source_vocab']) tgt_chars", "c2nl.modules.copy_generator import CopyGenerator, CopyGeneratorCriterion from c2nl.utils.misc import sequence_mask class Embedder(nn.Module):", "copy_info = [] attentions = [] dec_log_probs = [] acc_dec_outs", "decoder_outputs, attns def forward(self, memory_bank, memory_len, tgt_pad_mask, tgt_emb): max_mem_len =", "args, input_size): super(Encoder, self).__init__() self.transformer = TransformerEncoder(num_layers=args.nlayers, d_model=input_size, heads=args.num_head, d_k=args.d_k,", "Decoder(args, self.embedder.dec_input_size) self.layer_wise_attn = args.layer_wise_attn self.generator = nn.Linear(self.decoder.input_size, args.tgt_vocab_size) if", "scores = scores[:, :-1, :].contiguous() ml_loss = self.criterion(scores, alignment[:, 1:].contiguous(),", "kwargs['src_dict'] params['tgt_dict'] = kwargs['tgt_dict'] params['max_len'] = kwargs['max_len'] params['src_words'] = code_word_rep", "Make it broadcastable. copy_score.data.masked_fill_(mask, -float('inf')) attn_copy = f.softmax(copy_score, dim=-1) scores", "Encoder(nn.Module): def __init__(self, args, input_size): super(Encoder, self).__init__() self.transformer = TransformerEncoder(num_layers=args.nlayers,", "self.decoder(enc_outputs, code_len, summ_pad_mask, summ_emb) decoder_outputs = layer_wise_dec_out[-1] loss = dict()", "forward(self, sequence, sequence_char, sequence_type=None, mode='encoder', step=None): if mode == 'encoder':", "= blank_b.cuda() fill_b = fill_b.cuda() prediction[b].index_add_(0, fill_b, prediction[b].index_select(0, blank_b)) prediction[b].index_fill_(0,", "# B x P x d if self.use_src_char: char_rep =", "for w in words] tgt_chars = torch.Tensor(tgt_chars).to(tgt).unsqueeze(1) words = [params['tgt_dict'][w]", "code_type_rep, code_len, summ_word_rep, summ_char_rep, summ_len, tgt_seq, src_map, alignment, **kwargs): batch_size", "as nn import torch.nn.functional as f from prettytable import PrettyTable", "dim=-1) prediction = self.copy_generator(decoder_outputs, attn_copy, params['src_map']) prediction = prediction.squeeze(1) for", "x P x f if word_rep is None: word_rep =", "args.use_tgt_word if self.use_src_word: self.src_word_embeddings = Embeddings(args.emsize, args.src_vocab_size, constants.PAD) self.enc_input_size +=", "layer_wise_outputs params['src_len'] = code_len params['source_vocab'] = kwargs['source_vocab'] params['src_map'] = src_map", "self.src_char_embeddings(sequence_char) # B x P x f if word_rep is", "- code_len: ``(batch_size)`` - summ_word_rep: ``(batch_size, max_que_len)`` - summ_char_rep: ``(batch_size,", "sum(list(map(int, args.nfilters))) self.tgt_highway_net = Highway(self.dec_input_size, num_layers=2) self.use_type = args.use_code_type if", "#\"] = \"r\" for name, parameters in self.named_parameters(): if parameters.requires_grad:", "return self.decode(code_word_rep, code_char_rep, code_type_rep, code_len, src_map, alignment, **kwargs) def __tens2sent(self,", "= self.type_embeddings(sequence_type) word_rep = word_rep + type_rep if self.src_pos_emb and", "encode the target sequence summ_emb = self.embedder(summ_word_rep, summ_char_rep, mode='decoder') summ_pad_mask", "be True assert args.use_src_word or args.use_src_char assert args.use_tgt_word or args.use_tgt_char", "d_model=self.input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, coverage_attn=args.coverage_attn, dropout=args.trans_drop ) self.transformer_d =", "params['source_vocab'] = kwargs['source_vocab'] params['src_map'] = src_map params['src_mask'] = kwargs['code_mask_rep'] params['fill']", "torch.LongTensor(params['blank'][b]) fill_b = torch.LongTensor(params['fill'][b]) if use_cuda: blank_b = blank_b.cuda() fill_b", "\\ if isinstance(params['memory_bank'], list) else params['memory_bank'].shape[1] dec_states = self.decoder.init_decoder(params['src_len'], max_mem_len)", "= layer_outputs[-1] return memory_bank, layer_outputs class Decoder(nn.Module): def __init__(self, args,", "raise ValueError('Unknown embedder mode!') word_rep = self.dropout(word_rep) return word_rep class", "ml_loss = ml_loss.sum(1) * kwargs['example_weights'] loss['ml_loss'] = ml_loss.mean() loss['loss_per_token'] =", "B x seq_len x h params = dict() params['memory_bank'] =", "dim=1) if attentions else None return { 'predictions': dec_preds, 'copy_info':", "+= args.emsize if self.use_tgt_word: self.tgt_word_embeddings = Embeddings(args.emsize, args.tgt_vocab_size, constants.PAD) self.dec_input_size", "src_map, alignment, **kwargs): word_rep = self.embedder(code_word_rep, code_char_rep, code_type_rep, mode='encoder') memory_bank,", "d_ff=args.d_ff, dropout=args.trans_drop ) # To accomplish eq. 19 - 21", "self.split_decoder: # Following (https://arxiv.org/pdf/1808.07913.pdf), we split decoder self.transformer_c = TransformerDecoder(", "= tgt_seq[:, 1:].contiguous() if self._copy: # copy_score: batch_size, tgt_len, src_len", "self.copy_attn = GlobalAttention(dim=self.decoder.input_size, attn_type=args.attn_type) self.copy_generator = CopyGenerator(self.decoder.input_size, tgt_dict, self.generator) self.criterion", "args.use_src_char assert args.use_tgt_word or args.use_tgt_char self.use_src_word = args.use_src_word self.use_tgt_word =", "end=word_rep.size(1)).type(torch.LongTensor) pos_enc = pos_enc.expand(*word_rep.size()[:-1]) if word_rep.is_cuda: pos_enc = pos_enc.cuda() pos_rep", "self.dec_input_size) self.dropout = nn.Dropout(args.dropout_emb) def forward(self, sequence, sequence_char, sequence_type=None, mode='encoder',", "alignment[:, 1:].contiguous(), target) else: scores = self.generator(decoder_outputs) # `batch x", "self.decode(tgt_pad_mask, tgt_emb, memory_bank, state) class Transformer(nn.Module): \"\"\"Module that writes an", "args.use_tgt_word or args.use_tgt_char self.use_src_word = args.use_src_word self.use_tgt_word = args.use_tgt_word if", "self.use_tgt_word = args.use_tgt_word if self.use_src_word: self.src_word_embeddings = Embeddings(args.emsize, args.src_vocab_size, constants.PAD)", "= None if self.embedder.use_tgt_char: tgt_chars = [params['tgt_dict'].word_to_char_ids(w).tolist() for w in", "return loss def forward(self, code_word_rep, code_char_rep, code_type_rep, code_len, summ_word_rep, summ_char_rep,", "``(batch_size)`` - summ_word_rep: ``(batch_size, max_que_len)`` - summ_char_rep: ``(batch_size, max_que_len, max_word_len)``", "code_len, src_map, alignment, **kwargs): word_rep = self.embedder(code_word_rep, code_char_rep, code_type_rep, mode='encoder')", "return sum(p.numel() for p in self.parameters() if p.requires_grad) def count_encoder_parameters(self):", "if self.use_type: type_rep = self.type_embeddings(sequence_type) word_rep = word_rep + type_rep", "B x P x d if self.use_tgt_char: char_rep = self.tgt_char_embeddings(sequence_char)", "max_doc_len)`` - code_char_rep: ``(batch_size, max_doc_len, max_word_len)`` - code_len: ``(batch_size)`` -", "x seq_len x nlayers x h layer_scores = self.layer_weights(output).squeeze(3) layer_scores", "needed if kwargs['code_mask_rep'] is not None: mask = kwargs['code_mask_rep'].byte().unsqueeze(1) #", "import sequence_mask class Embedder(nn.Module): def __init__(self, args): super(Embedder, self).__init__() self.enc_input_size", "pos_rep elif mode == 'decoder': word_rep = None if self.use_tgt_word:", "def init_decoder(self, src_lens, max_src_len): if self.split_decoder: state_c = self.transformer_c.init_state(src_lens, max_src_len)", "= [] acc_dec_outs = [] max_mem_len = params['memory_bank'][0].shape[1] \\ if", "def __init__(self, args): super(Embedder, self).__init__() self.enc_input_size = 0 self.dec_input_size =", "= params['memory_bank'][0].shape[1] \\ if isinstance(params['memory_bank'], list) else params['memory_bank'].shape[1] dec_states =", "if needed if params['src_mask'] is not None: mask = params['src_mask'].byte().unsqueeze(1)", "= nn.Embedding(args.max_src_len, self.enc_input_size) if self.tgt_pos_emb: self.tgt_pos_embeddings = nn.Embedding(args.max_tgt_len + 2,", "__init__(self, args): super(Embedder, self).__init__() self.enc_input_size = 0 self.dec_input_size = 0", "d_v=args.d_v, d_ff=args.d_ff, coverage_attn=args.coverage_attn, dropout=args.trans_drop ) if args.reload_decoder_state: state_dict = torch.load(", "attns = self.transformer_c(tgt_words, tgt_emb, memory_bank, state[0], step=step, layer_wise_coverage=layer_wise_coverage) dec_out, _", "= [] attentions = [] dec_log_probs = [] acc_dec_outs =", "summ_char_rep, mode='decoder') summ_pad_mask = ~sequence_mask(summ_len, max_len=summ_emb.size(1)) enc_outputs = layer_wise_outputs if", "None: mask = params['src_mask'].byte().unsqueeze(1) # Make it broadcastable. copy_score.data.masked_fill_(mask, -float('inf'))", "code_char_rep, code_type_rep, mode='encoder') memory_bank, layer_wise_outputs = self.encoder(word_rep, code_len) # B", "`batch x tgt_len x vocab_size` scores = scores[:, :-1, :].contiguous()", "scores = self.copy_generator(decoder_outputs, attn_copy, src_map) scores = scores[:, :-1, :].contiguous()", "self.transformer_c.count_parameters() + self.transformer_d.count_parameters() else: return self.transformer.count_parameters() def init_decoder(self, src_lens, max_src_len):", "fill_b.cuda() prediction[b].index_add_(0, fill_b, prediction[b].index_select(0, blank_b)) prediction[b].index_fill_(0, blank_b, 1e-10) else: prediction", "embed and encode the target sequence summ_emb = self.embedder(summ_word_rep, summ_char_rep,", "P x d+f if self.use_type: type_rep = self.type_embeddings(sequence_type) word_rep =", "~sequence_mask(summ_len, max_len=summ_emb.size(1)) enc_outputs = layer_wise_outputs if self.layer_wise_attn else memory_bank layer_wise_dec_out,", "+ pos_rep elif mode == 'decoder': word_rep = None if", "attentions } def count_parameters(self): return sum(p.numel() for p in self.parameters()", "= src_map params['src_mask'] = kwargs['code_mask_rep'] params['fill'] = kwargs['fill'] params['blank'] =", "2) # B x P x d+f word_rep = self.tgt_highway_net(word_rep)", "dec_preds, attentions, copy_info, dec_log_probs def decode(self, code_word_rep, code_char_rep, code_type_rep, code_len,", "we split decoder self.transformer_c = TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head, d_k=args.d_k,", "super(Decoder, self).__init__() self.input_size = input_size self.split_decoder = args.split_decoder and args.copy_attn", "attns = self.decoder.decode(tgt_pad_mask, tgt, enc_outputs, dec_states, step=idx, layer_wise_coverage=attns['coverage']) decoder_outputs =", "copy_score.data.masked_fill_(mask, -float('inf')) attn_copy = f.softmax(copy_score, dim=-1) scores = self.copy_generator(decoder_outputs, attn_copy,", "char embedding options should be True assert args.use_src_word or args.use_src_char", "\"\"\" Input: - code_word_rep: ``(batch_size, max_doc_len)`` - code_char_rep: ``(batch_size, max_doc_len,", "self.transformer_c(tgt_words, tgt_emb, memory_bank, state[0], step=step, layer_wise_coverage=layer_wise_coverage) dec_out, _ = self.transformer_d(tgt_words,", "= nn.Linear(input_size, 1, bias=False) def count_parameters(self): return self.transformer.count_parameters() def forward(self,", "pos_enc = torch.LongTensor([step]) # used in inference time pos_enc =", "list) else params['memory_bank'].shape[1] dec_states = self.decoder.init_decoder(params['src_len'], max_mem_len) attns = {\"coverage\":", "= None if self.use_src_word: word_rep = self.src_word_embeddings(sequence.unsqueeze(2)) # B x", "nn.Sigmoid() ) self.fusion_gate = nn.Sequential( nn.Linear(self.input_size * 2, self.input_size), nn.ReLU()", "self.copy_attn(decoder_outputs, params['memory_bank'], memory_lengths=params['src_len'], softmax_weights=False) # mask copy_attn weights here if", "dec_states = self.decoder.init_decoder(params['src_len'], max_mem_len) attns = {\"coverage\": None} enc_outputs =", "heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, coverage_attn=args.coverage_attn, dropout=args.trans_drop ) if args.reload_decoder_state: state_dict", "type_rep = self.type_embeddings(sequence_type) word_rep = word_rep + type_rep if self.src_pos_emb", "mode='encoder') memory_bank, layer_wise_outputs = self.encoder(code_rep, code_len) # B x seq_len", "+1 for <EOS> token for idx in range(params['max_len'] + 1):", "__tens2sent(self, t, tgt_dict, src_vocabs): words = [] for idx, w", "2, self.input_size), nn.Sigmoid() ) self.fusion_gate = nn.Sequential( nn.Linear(self.input_size * 2,", "1e-10) else: prediction = self.generator(decoder_outputs.squeeze(1)) prediction = f.softmax(prediction, dim=1) if", "num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, dropout=args.trans_drop ) # To", "tgt_chars.repeat(batch_size, 1) tgt_chars = tgt_chars.to(tgt_words).unsqueeze(1) dec_preds = [] copy_info =", "max_relative_positions=args.max_relative_pos, use_neg_dist=args.use_neg_dist) self.use_all_enc_layers = args.use_all_enc_layers if self.use_all_enc_layers: self.layer_weights = nn.Linear(input_size,", "- summ_word_rep: ``(batch_size, max_que_len)`` - summ_char_rep: ``(batch_size, max_que_len, max_word_len)`` -", "import torch import torch.nn as nn import torch.nn.functional as f", "self.fusion_sigmoid = nn.Sequential( nn.Linear(self.input_size * 2, self.input_size), nn.Sigmoid() ) self.fusion_gate", "attentions: batch_size x tgt_len x num_heads x src_len attentions =", "summ_char_rep, summ_len, tgt_seq, src_map, alignment, **kwargs) else: return self.decode(code_word_rep, code_char_rep,", "\"Output Shape\", \"Param #\"] table.align[\"Layer Name\"] = \"l\" table.align[\"Output Shape\"]", "= args.split_decoder and args.copy_attn if self.split_decoder: # Following (https://arxiv.org/pdf/1808.07913.pdf), we", "self.use_tgt_word: self.tgt_word_embeddings = Embeddings(args.emsize, args.tgt_vocab_size, constants.PAD) self.dec_input_size += args.emsize self.use_src_char", "input_size): super(Decoder, self).__init__() self.input_size = input_size self.split_decoder = args.split_decoder and", "nn.Dropout(args.dropout_emb) def forward(self, sequence, sequence_char, sequence_type=None, mode='encoder', step=None): if mode", "tgt_words=None): batch_size = params['memory_bank'].size(0) use_cuda = params['memory_bank'].is_cuda if tgt_words is", "the target sequence summ_emb = self.embedder(summ_word_rep, summ_char_rep, mode='decoder') summ_pad_mask =", "pos_enc = torch.arange(start=0, end=word_rep.size(1)).type(torch.LongTensor) else: pos_enc = torch.LongTensor([step]) # used", "= w[0].item() if widx < len(tgt_dict): words.append(tgt_dict[widx]) else: widx =", "attns = self.decoder(enc_outputs, code_len, summ_pad_mask, summ_emb) decoder_outputs = layer_wise_dec_out[-1] loss", "copy_attn weights here if needed if kwargs['code_mask_rep'] is not None:", "``(batch_size, P_LEN)``, ``(batch_size, P_LEN)`` \"\"\" if self.training: return self._run_forward_ml(code_word_rep, code_char_rep,", "args.nlayers self.embedder = Embedder(args) self.encoder = Encoder(args, self.embedder.enc_input_size) self.decoder =", "self.tgt_pos_embeddings(pos_enc) word_rep = word_rep + pos_rep else: raise ValueError('Unknown embedder", "attn_type=args.attn_type) self.copy_generator = CopyGenerator(self.decoder.input_size, tgt_dict, self.generator) self.criterion = CopyGeneratorCriterion(vocab_size=len(tgt_dict), force_copy=args.force_copy)", "mask copy_attn weights here if needed if params['src_mask'] is not", "params['tgt_dict'] = kwargs['tgt_dict'] params['max_len'] = kwargs['max_len'] params['src_words'] = code_word_rep dec_preds,", "char_rep else: word_rep = torch.cat((word_rep, char_rep), 2) # B x", "if self.use_src_char: assert len(args.filter_size) == len(args.nfilters) self.src_char_embeddings = CharEmbedding(args.n_characters, args.char_emsize,", "nn.Linear(self.input_size * 2, self.input_size), nn.ReLU() ) else: self.transformer = TransformerDecoder(", "= kwargs['fill'] params['blank'] = kwargs['blank'] params['src_dict'] = kwargs['src_dict'] params['tgt_dict'] =", "decode(self, code_word_rep, code_char_rep, code_type_rep, code_len, src_map, alignment, **kwargs): word_rep =", "log_prob = torch.log(tgt_prob + 1e-20) elif choice == 'sample': tgt,", "fill_b, prediction[b].index_select(0, blank_b)) prediction[b].index_fill_(0, blank_b, 1e-10) else: prediction = self.generator(decoder_outputs.squeeze(1))", "is not None: mask = kwargs['code_mask_rep'].byte().unsqueeze(1) # Make it broadcastable.", "tgt_chars = torch.Tensor(tgt_chars).to(tgt).unsqueeze(1) words = [params['tgt_dict'][w] for w in words]", "embed and encode the source sequence code_rep = self.embedder(code_word_rep, code_char_rep,", "= words.unsqueeze(1) return dec_preds, attentions, copy_info, dec_log_probs def decode(self, code_word_rep,", "self.decoder.input_size self.generator.weight = self.embedder.tgt_word_embeddings.word_lut.weight self._copy = args.copy_attn if self._copy: self.copy_attn", "= params['memory_bank'].is_cuda if tgt_words is None: tgt_words = torch.LongTensor([constants.BOS]) if", "def forward(self, memory_bank, memory_len, tgt_pad_mask, tgt_emb): max_mem_len = memory_bank[0].shape[1] \\", "kwargs['example_weights'] loss['ml_loss'] = ml_loss.mean() loss['loss_per_token'] = ml_loss.div((summ_len - 1).float()).mean() return", "if tgt_words is None: tgt_words = torch.LongTensor([constants.BOS]) if use_cuda: tgt_words", "= Highway(self.dec_input_size, num_layers=2) self.use_type = args.use_code_type if self.use_type: self.type_embeddings =", "state, step=step, layer_wise_coverage=layer_wise_coverage) return decoder_outputs, attns def forward(self, memory_bank, memory_len,", "else: decoder_outputs, attns = self.transformer(tgt_words, tgt_emb, memory_bank, state, step=step, layer_wise_coverage=layer_wise_coverage)", "super(Embedder, self).__init__() self.enc_input_size = 0 self.dec_input_size = 0 # at", "None if self.embedder.use_tgt_char: tgt_chars = [params['tgt_dict'].word_to_char_ids(w).tolist() for w in words]", "is None: pos_enc = torch.arange(start=0, end=word_rep.size(1)).type(torch.LongTensor) else: pos_enc = torch.LongTensor([step])", "word_rep = self.tgt_word_embeddings(sequence.unsqueeze(2)) # B x P x d if", "and self.no_relative_pos: self.src_pos_embeddings = nn.Embedding(args.max_src_len, self.enc_input_size) if self.tgt_pos_emb: self.tgt_pos_embeddings =", "torch.LongTensor([constants.BOS]) if use_cuda: tgt_words = tgt_words.cuda() tgt_words = tgt_words.expand(batch_size).unsqueeze(1) #", "= torch.stack(copy_info, dim=1) if copy_info else None # attentions: batch_size", "if self.use_tgt_word: word_rep = self.tgt_word_embeddings(sequence.unsqueeze(2)) # B x P x", "tgt, enc_outputs, dec_states, step=idx, layer_wise_coverage=attns['coverage']) decoder_outputs = layer_wise_dec_out[-1] acc_dec_outs.append(decoder_outputs.squeeze(1)) if", "state[0], step=step, layer_wise_coverage=layer_wise_coverage) dec_out, _ = self.transformer_d(tgt_words, tgt_emb, memory_bank, state[1],", "self.fusion_sigmoid(torch.cat([copier_out, dec_out], dim=-1)) gate_input = torch.cat([copier_out, torch.mul(f_t, dec_out)], dim=-1) decoder_outputs", "attentions, copy_info, _ = self.__generate_sequence(params, choice='greedy') dec_preds = torch.stack(dec_preds, dim=1)", "# embed and encode the source sequence code_rep = self.embedder(code_word_rep,", "= kwargs['tgt_dict'] params['max_len'] = kwargs['max_len'] params['src_words'] = code_word_rep dec_preds, attentions,", "torch.load( args.reload_decoder_state, map_location=lambda storage, loc: storage ) self.decoder.load_state_dict(state_dict) def count_parameters(self):", "= self.src_word_embeddings(sequence.unsqueeze(2)) # B x P x d if self.use_src_char:", "params['src_dict'] = kwargs['src_dict'] params['tgt_dict'] = kwargs['tgt_dict'] params['max_len'] = kwargs['max_len'] params['src_words']", "import constants from c2nl.modules.global_attention import GlobalAttention from c2nl.modules.copy_generator import CopyGenerator,", "args.tgt_vocab_size, constants.PAD) self.dec_input_size += args.emsize self.use_src_char = args.use_src_char self.use_tgt_char =", "self.transformer_c.init_state(src_lens, max_src_len) state_d = self.transformer_d.init_state(src_lens, max_src_len) return state_c, state_d else:", "table = PrettyTable() table.field_names = [\"Layer Name\", \"Output Shape\", \"Param", "None if self.use_tgt_word: word_rep = self.tgt_word_embeddings(sequence.unsqueeze(2)) # B x P", "num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, coverage_attn=args.coverage_attn, dropout=args.trans_drop ) if", "isinstance(params['memory_bank'], list) else params['memory_bank'].shape[1] dec_states = self.decoder.init_decoder(params['src_len'], max_mem_len) attns =", "params['layer_wise_outputs'] = layer_wise_outputs params['src_len'] = code_len params['source_vocab'] = kwargs['source_vocab'] params['src_map']", "enc_outputs = layer_wise_outputs if self.layer_wise_attn else memory_bank layer_wise_dec_out, attns =", "dim=-1) decoder_outputs = self.fusion_gate(gate_input) else: decoder_outputs, attns = self.transformer(tgt_words, tgt_emb,", "None if self.embedder.use_tgt_char: tgt_chars = params['tgt_dict'].word_to_char_ids(constants.BOS_WORD) tgt_chars = torch.Tensor(tgt_chars.tolist()).unsqueeze(0) tgt_chars", "dec_preds.append(tgt.squeeze(1).clone()) if \"std\" in attns: # std_attn: batch_size x num_heads", "0 for v in args.max_relative_pos) if self.src_pos_emb and self.no_relative_pos: self.src_pos_embeddings", "kwargs['fill'] params['blank'] = kwargs['blank'] params['src_dict'] = kwargs['src_dict'] params['tgt_dict'] = kwargs['tgt_dict']", "tgt_emb, memory_bank, state, step=step, layer_wise_coverage=layer_wise_coverage) return decoder_outputs, attns def forward(self,", "memory_bank.shape[1] state = self.init_decoder(memory_len, max_mem_len) return self.decode(tgt_pad_mask, tgt_emb, memory_bank, state)", "= pos_enc.expand(*word_rep.size()[:-1]) if word_rep.is_cuda: pos_enc = pos_enc.cuda() pos_rep = self.src_pos_embeddings(pos_enc)", "summ_len, tgt_seq, src_map, alignment, **kwargs) else: return self.decode(code_word_rep, code_char_rep, code_type_rep,", "x src_len std_attn = torch.stack(attns[\"std\"], dim=1) attentions.append(std_attn.squeeze(2)) if self._copy: mask", "split decoder self.transformer_c = TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v,", "self.input_size), nn.Sigmoid() ) self.fusion_gate = nn.Sequential( nn.Linear(self.input_size * 2, self.input_size),", "broadcastable. copy_score.data.masked_fill_(mask, -float('inf')) attn_copy = f.softmax(copy_score, dim=-1) prediction = self.copy_generator(decoder_outputs,", "x num_heads x 1 x src_len std_attn = torch.stack(attns[\"std\"], dim=1)", "num_layers=2) self.use_type = args.use_code_type if self.use_type: self.type_embeddings = nn.Embedding(len(constants.TOKEN_TYPE_MAP), self.enc_input_size)", "= input_size self.split_decoder = args.split_decoder and args.copy_attn if self.split_decoder: #", "+= sum(list(map(int, args.nfilters))) self.src_highway_net = Highway(self.enc_input_size, num_layers=2) if self.use_tgt_char: assert", "summ_emb) decoder_outputs = layer_wise_dec_out[-1] loss = dict() target = tgt_seq[:,", "kwargs['blank'] params['src_dict'] = kwargs['src_dict'] params['tgt_dict'] = kwargs['tgt_dict'] params['max_len'] = kwargs['max_len']", "num_heads x 1 x src_len std_attn = torch.stack(attns[\"std\"], dim=1) attentions.append(std_attn.squeeze(2))", "args.reload_decoder_state: state_dict = torch.load( args.reload_decoder_state, map_location=lambda storage, loc: storage )", "args.share_decoder_embeddings: if self.embedder.use_tgt_word: assert args.emsize == self.decoder.input_size self.generator.weight = self.embedder.tgt_word_embeddings.word_lut.weight", "__init__(self, args, input_size): super(Decoder, self).__init__() self.input_size = input_size self.split_decoder =", "= torch.load( args.reload_decoder_state, map_location=lambda storage, loc: storage ) self.decoder.load_state_dict(state_dict) def", "pos_rep = self.src_pos_embeddings(pos_enc) word_rep = word_rep + pos_rep elif mode", "elif mode == 'decoder': word_rep = None if self.use_tgt_word: word_rep", "def forward(self, input, input_len): layer_outputs, _ = self.transformer(input, input_len) #", "tgt_pad_mask = tgt_words.data.eq(constants.PAD) layer_wise_dec_out, attns = self.decoder.decode(tgt_pad_mask, tgt, enc_outputs, dec_states,", "= [params['tgt_dict'][w] for w in words] words = torch.Tensor(words).type_as(tgt) tgt_words", "len(args.filter_size) == len(args.nfilters) self.src_char_embeddings = CharEmbedding(args.n_characters, args.char_emsize, args.filter_size, args.nfilters) self.enc_input_size", "if self.use_src_word: word_rep = self.src_word_embeddings(sequence.unsqueeze(2)) # B x P x", "import GlobalAttention from c2nl.modules.copy_generator import CopyGenerator, CopyGeneratorCriterion from c2nl.utils.misc import", "- ``(batch_size, P_LEN)``, ``(batch_size, P_LEN)`` \"\"\" if self.training: return self._run_forward_ml(code_word_rep,", "self.layer_wise_attn = args.layer_wise_attn self.generator = nn.Linear(self.decoder.input_size, args.tgt_vocab_size) if args.share_decoder_embeddings: if", "Embedder(args) self.encoder = Encoder(args, self.embedder.enc_input_size) self.decoder = Decoder(args, self.embedder.dec_input_size) self.layer_wise_attn", "decode(self, tgt_words, tgt_emb, memory_bank, state, step=None, layer_wise_coverage=None): if self.split_decoder: copier_out,", "tgt_len, src_len _, copy_score, _ = self.copy_attn(decoder_outputs, memory_bank, memory_lengths=code_len, softmax_weights=False)", "self).__init__() self.input_size = input_size self.split_decoder = args.split_decoder and args.copy_attn if", "Following (https://arxiv.org/pdf/1808.07913.pdf), we split decoder self.transformer_c = TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size,", "x num_heads x src_len attentions = torch.stack(attentions, dim=1) if attentions", "torch.cat((word_rep, char_rep), 2) # B x P x d+f word_rep", "params['blank'][b]: blank_b = torch.LongTensor(params['blank'][b]) fill_b = torch.LongTensor(params['fill'][b]) if use_cuda: blank_b", "torch.max(prediction, dim=1, keepdim=True) log_prob = torch.log(tgt_prob + 1e-20) elif choice", "layer_wise_coverage=None): if self.split_decoder: copier_out, attns = self.transformer_c(tgt_words, tgt_emb, memory_bank, state[0],", "word_rep.is_cuda: pos_enc = pos_enc.cuda() pos_rep = self.src_pos_embeddings(pos_enc) word_rep = word_rep", "seq_len x h if self.use_all_enc_layers: output = torch.stack(layer_outputs, dim=2) #", "map_location=lambda storage, loc: storage ) self.decoder.load_state_dict(state_dict) def count_parameters(self): if self.split_decoder:", "# B x P x d+f if self.use_type: type_rep =", "in words] words = torch.Tensor(words).type_as(tgt) tgt_words = words.unsqueeze(1) return dec_preds,", "p.requires_grad) def count_encoder_parameters(self): return self.encoder.count_parameters() def count_decoder_parameters(self): return self.decoder.count_parameters() def", "max_doc_len, max_word_len)`` - code_len: ``(batch_size)`` - summ_word_rep: ``(batch_size, max_que_len)`` -", "- code_word_rep: ``(batch_size, max_doc_len)`` - code_char_rep: ``(batch_size, max_doc_len, max_word_len)`` -", "softmax_weights=False) # mask copy_attn weights here if needed if params['src_mask']", "= f.softmax(prediction, dim=1) if choice == 'greedy': tgt_prob, tgt =", "if copy_info else None # attentions: batch_size x tgt_len x", "tgt_dict, src_vocabs): words = [] for idx, w in enumerate(t):", "code_len, summ_pad_mask, summ_emb) decoder_outputs = layer_wise_dec_out[-1] loss = dict() target", "sequence summ_emb = self.embedder(summ_word_rep, summ_char_rep, mode='decoder') summ_pad_mask = ~sequence_mask(summ_len, max_len=summ_emb.size(1))", "= code_len params['source_vocab'] = kwargs['source_vocab'] params['src_map'] = src_map params['src_mask'] =", "of the class.\"\"\" super(Transformer, self).__init__() self.name = 'Transformer' if len(args.max_relative_pos)", "num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, coverage_attn=args.coverage_attn, dropout=args.trans_drop ) self.transformer_d", "attns = self.transformer(tgt_words, tgt_emb, memory_bank, state, step=step, layer_wise_coverage=layer_wise_coverage) return decoder_outputs,", "def __generate_sequence(self, params, choice='greedy', tgt_words=None): batch_size = params['memory_bank'].size(0) use_cuda =", "dim=1) if copy_info else None # attentions: batch_size x tgt_len", "from c2nl.modules.copy_generator import CopyGenerator, CopyGeneratorCriterion from c2nl.utils.misc import sequence_mask class", "Highway(self.enc_input_size, num_layers=2) if self.use_tgt_char: assert len(args.filter_size) == len(args.nfilters) self.tgt_char_embeddings =", "= \"r\" table.align[\"Param #\"] = \"r\" for name, parameters in", "_ = self.copy_attn(decoder_outputs, params['memory_bank'], memory_lengths=params['src_len'], softmax_weights=False) # mask copy_attn weights", "TransformerEncoder from c2nl.decoders.transformer import TransformerDecoder from c2nl.inputters import constants from", "= params['layer_wise_outputs'] if self.layer_wise_attn \\ else params['memory_bank'] # +1 for", "tgt_chars = None if self.embedder.use_tgt_char: tgt_chars = params['tgt_dict'].word_to_char_ids(constants.BOS_WORD) tgt_chars =", "code_type_rep, code_len, src_map, alignment, **kwargs) def __tens2sent(self, t, tgt_dict, src_vocabs):", "torch.stack(attns[\"std\"], dim=1) attentions.append(std_attn.squeeze(2)) if self._copy: mask = tgt.gt(len(params['tgt_dict']) - 1)", "self.use_src_char: assert len(args.filter_size) == len(args.nfilters) self.src_char_embeddings = CharEmbedding(args.n_characters, args.char_emsize, args.filter_size,", "__init__(self, args, tgt_dict): \"\"\"\"Constructor of the class.\"\"\" super(Transformer, self).__init__() self.name", "= self.embedder.tgt_word_embeddings.word_lut.weight self._copy = args.copy_attn if self._copy: self.copy_attn = GlobalAttention(dim=self.decoder.input_size,", "pos_rep = self.tgt_pos_embeddings(pos_enc) word_rep = word_rep + pos_rep else: raise", "#\"] table.align[\"Layer Name\"] = \"l\" table.align[\"Output Shape\"] = \"r\" table.align[\"Param", "memory_len, tgt_pad_mask, tgt_emb): max_mem_len = memory_bank[0].shape[1] \\ if isinstance(memory_bank, list)", "= pos_enc.expand(*word_rep.size()[:-1]) if word_rep.is_cuda: pos_enc = pos_enc.cuda() pos_rep = self.tgt_pos_embeddings(pos_enc)", "std_attn = torch.stack(attns[\"std\"], dim=1) attentions.append(std_attn.squeeze(2)) if self._copy: mask = tgt.gt(len(params['tgt_dict'])", "is None: word_rep = char_rep else: word_rep = torch.cat((word_rep, char_rep),", "pos_rep else: raise ValueError('Unknown embedder mode!') word_rep = self.dropout(word_rep) return", "params['src_len'] = code_len params['source_vocab'] = kwargs['source_vocab'] params['src_map'] = src_map params['src_mask']", "None: tgt_words = torch.LongTensor([constants.BOS]) if use_cuda: tgt_words = tgt_words.cuda() tgt_words", "P x d+f if self.tgt_pos_emb: if step is None: pos_enc", "else: self.criterion = nn.CrossEntropyLoss(reduction='none') def _run_forward_ml(self, code_word_rep, code_char_rep, code_type_rep, code_len,", ") self.fusion_gate = nn.Sequential( nn.Linear(self.input_size * 2, self.input_size), nn.ReLU() )", "{ 'predictions': dec_preds, 'copy_info': copy_info, 'memory_bank': memory_bank, 'attentions': attentions }", "broadcastable. copy_score.data.masked_fill_(mask, -float('inf')) attn_copy = f.softmax(copy_score, dim=-1) scores = self.copy_generator(decoder_outputs,", "= args.use_code_type if self.use_type: self.type_embeddings = nn.Embedding(len(constants.TOKEN_TYPE_MAP), self.enc_input_size) self.src_pos_emb =", "table.field_names = [\"Layer Name\", \"Output Shape\", \"Param #\"] table.align[\"Layer Name\"]", "init_decoder(self, src_lens, max_src_len): if self.split_decoder: state_c = self.transformer_c.init_state(src_lens, max_src_len) state_d", "input, input_len): layer_outputs, _ = self.transformer(input, input_len) # B x", "if choice == 'greedy': tgt_prob, tgt = torch.max(prediction, dim=1, keepdim=True)", "self.use_src_char = args.use_src_char self.use_tgt_char = args.use_tgt_char if self.use_src_char: assert len(args.filter_size)", "memory_bank, state, step=step, layer_wise_coverage=layer_wise_coverage) return decoder_outputs, attns def forward(self, memory_bank,", "len(args.filter_size) == len(args.nfilters) self.tgt_char_embeddings = CharEmbedding(args.n_characters, args.char_emsize, args.filter_size, args.nfilters) self.dec_input_size", "\"\"\" if self.training: return self._run_forward_ml(code_word_rep, code_char_rep, code_type_rep, code_len, summ_word_rep, summ_char_rep,", "super(Encoder, self).__init__() self.transformer = TransformerEncoder(num_layers=args.nlayers, d_model=input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff,", "else: widx = widx - len(tgt_dict) words.append(src_vocabs[idx][widx]) return words def", "self.__tens2sent(tgt, params['tgt_dict'], params['source_vocab']) tgt_chars = None if self.embedder.use_tgt_char: tgt_chars =", "dropout=args.trans_drop ) self.transformer_d = TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v,", "``(batch_size, max_len)`` Output: - ``(batch_size, P_LEN)``, ``(batch_size, P_LEN)`` \"\"\" if", "class.\"\"\" super(Transformer, self).__init__() self.name = 'Transformer' if len(args.max_relative_pos) != args.nlayers:", "self.tgt_highway_net = Highway(self.dec_input_size, num_layers=2) self.use_type = args.use_code_type if self.use_type: self.type_embeddings", "eq. 19 - 21 from `https://arxiv.org/pdf/1808.07913.pdf` self.fusion_sigmoid = nn.Sequential( nn.Linear(self.input_size", "range(params['max_len'] + 1): tgt = self.embedder(tgt_words, tgt_chars, mode='decoder', step=idx) tgt_pad_mask", "args, input_size): super(Decoder, self).__init__() self.input_size = input_size self.split_decoder = args.split_decoder", "= args.use_src_char self.use_tgt_char = args.use_tgt_char if self.use_src_char: assert len(args.filter_size) ==", "output = torch.stack(layer_outputs, dim=2) # B x seq_len x nlayers", "self.encoder = Encoder(args, self.embedder.enc_input_size) self.decoder = Decoder(args, self.embedder.dec_input_size) self.layer_wise_attn =", "- summ_char_rep: ``(batch_size, max_que_len, max_word_len)`` - summ_len: ``(batch_size)`` - tgt_seq:", "state_c = self.transformer_c.init_state(src_lens, max_src_len) state_d = self.transformer_d.init_state(src_lens, max_src_len) return state_c,", "TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, dropout=args.trans_drop ) #", "x f if word_rep is None: word_rep = char_rep else:", "softmax_weights=False) # mask copy_attn weights here if needed if kwargs['code_mask_rep']", "__generate_sequence(self, params, choice='greedy', tgt_words=None): batch_size = params['memory_bank'].size(0) use_cuda = params['memory_bank'].is_cuda", "attentions.append(std_attn.squeeze(2)) if self._copy: mask = tgt.gt(len(params['tgt_dict']) - 1) copy_info.append(mask.float().squeeze(1)) words", "None: mask = kwargs['code_mask_rep'].byte().unsqueeze(1) # Make it broadcastable. copy_score.data.masked_fill_(mask, -float('inf'))", "= params['src_mask'].byte().unsqueeze(1) # Make it broadcastable. copy_score.data.masked_fill_(mask, -float('inf')) attn_copy =", "if self.src_pos_emb and self.no_relative_pos: self.src_pos_embeddings = nn.Embedding(args.max_src_len, self.enc_input_size) if self.tgt_pos_emb:", "'greedy': tgt_prob, tgt = torch.max(prediction, dim=1, keepdim=True) log_prob = torch.log(tgt_prob", "= memory_bank[0].shape[1] \\ if isinstance(memory_bank, list) else memory_bank.shape[1] state =", "= dict() target = tgt_seq[:, 1:].contiguous() if self._copy: # copy_score:", "\"\"\"Module that writes an answer for the question given a", "widx < len(tgt_dict): words.append(tgt_dict[widx]) else: widx = widx - len(tgt_dict)", "words.unsqueeze(1) return dec_preds, attentions, copy_info, dec_log_probs def decode(self, code_word_rep, code_char_rep,", "dec_preds = torch.stack(dec_preds, dim=1) copy_info = torch.stack(copy_info, dim=1) if copy_info", "super(Transformer, self).__init__() self.name = 'Transformer' if len(args.max_relative_pos) != args.nlayers: assert", "word_rep = torch.cat((word_rep, char_rep), 2) # B x P x", "here if needed if params['src_mask'] is not None: mask =", "if args.share_decoder_embeddings: if self.embedder.use_tgt_word: assert args.emsize == self.decoder.input_size self.generator.weight =", "assert False dec_log_probs.append(log_prob.squeeze(1)) dec_preds.append(tgt.squeeze(1).clone()) if \"std\" in attns: # std_attn:", "enc_outputs, dec_states, step=idx, layer_wise_coverage=attns['coverage']) decoder_outputs = layer_wise_dec_out[-1] acc_dec_outs.append(decoder_outputs.squeeze(1)) if self._copy:", "self.criterion(scores.view(-1, scores.size(2)), target.view(-1)) ml_loss = ml_loss.view(*scores.size()[:-1]) ml_loss = ml_loss.mul(target.ne(constants.PAD).float()) ml_loss", "= layer_wise_outputs params['src_len'] = code_len params['source_vocab'] = kwargs['source_vocab'] params['src_map'] =", "use_cuda = params['memory_bank'].is_cuda if tgt_words is None: tgt_words = torch.LongTensor([constants.BOS])", "import CharEmbedding from c2nl.modules.embeddings import Embeddings from c2nl.modules.highway import Highway", "constants.PAD) self.enc_input_size += args.emsize if self.use_tgt_word: self.tgt_word_embeddings = Embeddings(args.emsize, args.tgt_vocab_size,", "class Decoder(nn.Module): def __init__(self, args, input_size): super(Decoder, self).__init__() self.input_size =", "GlobalAttention from c2nl.modules.copy_generator import CopyGenerator, CopyGeneratorCriterion from c2nl.utils.misc import sequence_mask", "in self.parameters() if p.requires_grad) def count_encoder_parameters(self): return self.encoder.count_parameters() def count_decoder_parameters(self):", "= self.tgt_highway_net(word_rep) # B x P x d+f if self.tgt_pos_emb:", "for the question given a passage.\"\"\" def __init__(self, args, tgt_dict):", "loss['loss_per_token'] = ml_loss.div((summ_len - 1).float()).mean() return loss def forward(self, code_word_rep,", "= CharEmbedding(args.n_characters, args.char_emsize, args.filter_size, args.nfilters) self.dec_input_size += sum(list(map(int, args.nfilters))) self.tgt_highway_net", "assert args.use_src_word or args.use_src_char assert args.use_tgt_word or args.use_tgt_char self.use_src_word =", ") self.transformer_d = TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff,", "= CopyGenerator(self.decoder.input_size, tgt_dict, self.generator) self.criterion = CopyGeneratorCriterion(vocab_size=len(tgt_dict), force_copy=args.force_copy) else: self.criterion", "self.enc_input_size += args.emsize if self.use_tgt_word: self.tgt_word_embeddings = Embeddings(args.emsize, args.tgt_vocab_size, constants.PAD)", "words.append(src_vocabs[idx][widx]) return words def __generate_sequence(self, params, choice='greedy', tgt_words=None): batch_size =", "prediction = self.copy_generator(decoder_outputs, attn_copy, params['src_map']) prediction = prediction.squeeze(1) for b", "= self.generator(decoder_outputs.squeeze(1)) prediction = f.softmax(prediction, dim=1) if choice == 'greedy':", "if self.split_decoder: return self.transformer_c.count_parameters() + self.transformer_d.count_parameters() else: return self.transformer.count_parameters() def", "tgt_chars = torch.Tensor(tgt_chars.tolist()).unsqueeze(0) tgt_chars = tgt_chars.repeat(batch_size, 1) tgt_chars = tgt_chars.to(tgt_words).unsqueeze(1)", "= CharEmbedding(args.n_characters, args.char_emsize, args.filter_size, args.nfilters) self.enc_input_size += sum(list(map(int, args.nfilters))) self.src_highway_net", "dim=2) # B x seq_len x nlayers x h layer_scores", "the source sequence code_rep = self.embedder(code_word_rep, code_char_rep, code_type_rep, mode='encoder') memory_bank,", "self.tgt_word_embeddings = Embeddings(args.emsize, args.tgt_vocab_size, constants.PAD) self.dec_input_size += args.emsize self.use_src_char =", "memory_lengths=code_len, softmax_weights=False) # mask copy_attn weights here if needed if", "max_que_len)`` - summ_char_rep: ``(batch_size, max_que_len, max_word_len)`` - summ_len: ``(batch_size)`` -", "word_rep = self.tgt_highway_net(word_rep) # B x P x d+f if", "= scores[:, :-1, :].contiguous() # `batch x tgt_len - 1", "torch.arange(start=0, end=word_rep.size(1)).type(torch.LongTensor) else: pos_enc = torch.LongTensor([step]) # used in inference", "dec_out, _ = self.transformer_d(tgt_words, tgt_emb, memory_bank, state[1], step=step) f_t =", "return self.encoder.count_parameters() def count_decoder_parameters(self): return self.decoder.count_parameters() def layer_wise_parameters(self): table =", "Highway from c2nl.encoders.transformer import TransformerEncoder from c2nl.decoders.transformer import TransformerDecoder from", "memory_bank params['layer_wise_outputs'] = layer_wise_outputs params['src_len'] = code_len params['source_vocab'] = kwargs['source_vocab']", "len(args.nfilters) self.tgt_char_embeddings = CharEmbedding(args.n_characters, args.char_emsize, args.filter_size, args.nfilters) self.dec_input_size += sum(list(map(int,", "nn.Sequential( nn.Linear(self.input_size * 2, self.input_size), nn.ReLU() ) else: self.transformer =", "attns = {\"coverage\": None} enc_outputs = params['layer_wise_outputs'] if self.layer_wise_attn \\", "self.split_decoder: state_c = self.transformer_c.init_state(src_lens, max_src_len) state_d = self.transformer_d.init_state(src_lens, max_src_len) return", "B x 1 tgt_chars = None if self.embedder.use_tgt_char: tgt_chars =", "d_model=self.input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, coverage_attn=args.coverage_attn, dropout=args.trans_drop ) if args.reload_decoder_state:", "dim=1, keepdim=True) log_prob = torch.log(tgt_prob + 1e-20) elif choice ==", "src_len attentions = torch.stack(attentions, dim=1) if attentions else None return", "if self.embedder.use_tgt_char: tgt_chars = params['tgt_dict'].word_to_char_ids(constants.BOS_WORD) tgt_chars = torch.Tensor(tgt_chars.tolist()).unsqueeze(0) tgt_chars =", "blank_b = torch.LongTensor(params['blank'][b]) fill_b = torch.LongTensor(params['fill'][b]) if use_cuda: blank_b =", "dim=-1)) gate_input = torch.cat([copier_out, torch.mul(f_t, dec_out)], dim=-1) decoder_outputs = self.fusion_gate(gate_input)", "= self.transformer_d.init_state(src_lens, max_src_len) return state_c, state_d else: return self.transformer.init_state(src_lens, max_src_len)", "self.use_type: self.type_embeddings = nn.Embedding(len(constants.TOKEN_TYPE_MAP), self.enc_input_size) self.src_pos_emb = args.src_pos_emb self.tgt_pos_emb =", "self.criterion = nn.CrossEntropyLoss(reduction='none') def _run_forward_ml(self, code_word_rep, code_char_rep, code_type_rep, code_len, summ_word_rep,", "else: assert False dec_log_probs.append(log_prob.squeeze(1)) dec_preds.append(tgt.squeeze(1).clone()) if \"std\" in attns: #", "import Embeddings from c2nl.modules.highway import Highway from c2nl.encoders.transformer import TransformerEncoder", "self.transformer.count_parameters() def forward(self, input, input_len): layer_outputs, _ = self.transformer(input, input_len)", "tgt = torch.max(prediction, dim=1, keepdim=True) log_prob = torch.log(tgt_prob + 1e-20)", "= torch.Tensor(words).type_as(tgt) tgt_words = words.unsqueeze(1) return dec_preds, attentions, copy_info, dec_log_probs", "= Decoder(args, self.embedder.dec_input_size) self.layer_wise_attn = args.layer_wise_attn self.generator = nn.Linear(self.decoder.input_size, args.tgt_vocab_size)", "CopyGeneratorCriterion from c2nl.utils.misc import sequence_mask class Embedder(nn.Module): def __init__(self, args):", "if needed if kwargs['code_mask_rep'] is not None: mask = kwargs['code_mask_rep'].byte().unsqueeze(1)", "words] tgt_chars = torch.Tensor(tgt_chars).to(tgt).unsqueeze(1) words = [params['tgt_dict'][w] for w in", "attns def forward(self, memory_bank, memory_len, tgt_pad_mask, tgt_emb): max_mem_len = memory_bank[0].shape[1]", "self.copy_generator(decoder_outputs, attn_copy, src_map) scores = scores[:, :-1, :].contiguous() ml_loss =", "memory_bank, state[0], step=step, layer_wise_coverage=layer_wise_coverage) dec_out, _ = self.transformer_d(tgt_words, tgt_emb, memory_bank,", "= None if self.embedder.use_tgt_char: tgt_chars = params['tgt_dict'].word_to_char_ids(constants.BOS_WORD) tgt_chars = torch.Tensor(tgt_chars.tolist()).unsqueeze(0)", "x d if self.use_tgt_char: char_rep = self.tgt_char_embeddings(sequence_char) # B x", "f.softmax(copy_score, dim=-1) prediction = self.copy_generator(decoder_outputs, attn_copy, params['src_map']) prediction = prediction.squeeze(1)", "= torch.LongTensor(params['blank'][b]) fill_b = torch.LongTensor(params['fill'][b]) if use_cuda: blank_b = blank_b.cuda()", "src_map) scores = scores[:, :-1, :].contiguous() ml_loss = self.criterion(scores, alignment[:,", "def layer_wise_parameters(self): table = PrettyTable() table.field_names = [\"Layer Name\", \"Output", "args.split_decoder and args.copy_attn if self.split_decoder: # Following (https://arxiv.org/pdf/1808.07913.pdf), we split", "<EOS> token for idx in range(params['max_len'] + 1): tgt =", "char_rep = self.src_char_embeddings(sequence_char) # B x P x f if", "target = tgt_seq[:, 1:].contiguous() if self._copy: # copy_score: batch_size, tgt_len,", "= kwargs['src_dict'] params['tgt_dict'] = kwargs['tgt_dict'] params['max_len'] = kwargs['max_len'] params['src_words'] =", "self._run_forward_ml(code_word_rep, code_char_rep, code_type_rep, code_len, summ_word_rep, summ_char_rep, summ_len, tgt_seq, src_map, alignment,", "storage ) self.decoder.load_state_dict(state_dict) def count_parameters(self): if self.split_decoder: return self.transformer_c.count_parameters() +", "len(args.nfilters) self.src_char_embeddings = CharEmbedding(args.n_characters, args.char_emsize, args.filter_size, args.nfilters) self.enc_input_size += sum(list(map(int,", "tgt_words = tgt_words.cuda() tgt_words = tgt_words.expand(batch_size).unsqueeze(1) # B x 1", "summ_len: ``(batch_size)`` - tgt_seq: ``(batch_size, max_len)`` Output: - ``(batch_size, P_LEN)``,", "code_type_rep, mode='encoder') memory_bank, layer_wise_outputs = self.encoder(code_rep, code_len) # B x", "if \"std\" in attns: # std_attn: batch_size x num_heads x", "= GlobalAttention(dim=self.decoder.input_size, attn_type=args.attn_type) self.copy_generator = CopyGenerator(self.decoder.input_size, tgt_dict, self.generator) self.criterion =", "x P x d+f word_rep = self.src_highway_net(word_rep) # B x", "self.decoder.decode(tgt_pad_mask, tgt, enc_outputs, dec_states, step=idx, layer_wise_coverage=attns['coverage']) decoder_outputs = layer_wise_dec_out[-1] acc_dec_outs.append(decoder_outputs.squeeze(1))", "self.src_word_embeddings = Embeddings(args.emsize, args.src_vocab_size, constants.PAD) self.enc_input_size += args.emsize if self.use_tgt_word:", "def count_parameters(self): return self.transformer.count_parameters() def forward(self, input, input_len): layer_outputs, _", "# B x P x d+f if self.tgt_pos_emb: if step", "return self.transformer.init_state(src_lens, max_src_len) def decode(self, tgt_words, tgt_emb, memory_bank, state, step=None,", "torch.nn as nn import torch.nn.functional as f from prettytable import", "idx in range(params['max_len'] + 1): tgt = self.embedder(tgt_words, tgt_chars, mode='decoder',", "* 2, self.input_size), nn.ReLU() ) else: self.transformer = TransformerDecoder( num_layers=args.nlayers,", "args.use_src_word or args.use_src_char assert args.use_tgt_word or args.use_tgt_char self.use_src_word = args.use_src_word", "TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, coverage_attn=args.coverage_attn, dropout=args.trans_drop )", "the question given a passage.\"\"\" def __init__(self, args, tgt_dict): \"\"\"\"Constructor", "forward(self, code_word_rep, code_char_rep, code_type_rep, code_len, summ_word_rep, summ_char_rep, summ_len, tgt_seq, src_map,", "= 0 # at least one of word or char", "= self.src_pos_embeddings(pos_enc) word_rep = word_rep + pos_rep elif mode ==", "= self.tgt_pos_embeddings(pos_enc) word_rep = word_rep + pos_rep else: raise ValueError('Unknown", "from c2nl.modules.char_embedding import CharEmbedding from c2nl.modules.embeddings import Embeddings from c2nl.modules.highway", "= self.encoder(word_rep, code_len) # B x seq_len x h params", "x P x d+f word_rep = self.tgt_highway_net(word_rep) # B x", "= tgt_words.cuda() tgt_words = tgt_words.expand(batch_size).unsqueeze(1) # B x 1 tgt_chars", "= self.transformer_c(tgt_words, tgt_emb, memory_bank, state[0], step=step, layer_wise_coverage=layer_wise_coverage) dec_out, _ =", "ml_loss.div((summ_len - 1).float()).mean() return loss def forward(self, code_word_rep, code_char_rep, code_type_rep,", "= tgt_words.expand(batch_size).unsqueeze(1) # B x 1 tgt_chars = None if", "is not None: mask = params['src_mask'].byte().unsqueeze(1) # Make it broadcastable.", "count_encoder_parameters(self): return self.encoder.count_parameters() def count_decoder_parameters(self): return self.decoder.count_parameters() def layer_wise_parameters(self): table", "max_mem_len = memory_bank[0].shape[1] \\ if isinstance(memory_bank, list) else memory_bank.shape[1] state", "if self.split_decoder: state_c = self.transformer_c.init_state(src_lens, max_src_len) state_d = self.transformer_d.init_state(src_lens, max_src_len)", "attn_copy, src_map) scores = scores[:, :-1, :].contiguous() ml_loss = self.criterion(scores,", "# Following (https://arxiv.org/pdf/1808.07913.pdf), we split decoder self.transformer_c = TransformerDecoder( num_layers=args.nlayers,", "state_d else: return self.transformer.init_state(src_lens, max_src_len) def decode(self, tgt_words, tgt_emb, memory_bank,", "args.layer_wise_attn self.generator = nn.Linear(self.decoder.input_size, args.tgt_vocab_size) if args.share_decoder_embeddings: if self.embedder.use_tgt_word: assert", "# `batch x tgt_len - 1 x vocab_size` ml_loss =", "tgt_chars = None if self.embedder.use_tgt_char: tgt_chars = [params['tgt_dict'].word_to_char_ids(w).tolist() for w", "args.char_emsize, args.filter_size, args.nfilters) self.dec_input_size += sum(list(map(int, args.nfilters))) self.tgt_highway_net = Highway(self.dec_input_size,", "c2nl.modules.embeddings import Embeddings from c2nl.modules.highway import Highway from c2nl.encoders.transformer import", "args): super(Embedder, self).__init__() self.enc_input_size = 0 self.dec_input_size = 0 #", "pos_enc.expand(*word_rep.size()[:-1]) if word_rep.is_cuda: pos_enc = pos_enc.cuda() pos_rep = self.src_pos_embeddings(pos_enc) word_rep", "params['src_mask'] is not None: mask = params['src_mask'].byte().unsqueeze(1) # Make it", "== 'sample': tgt, log_prob = self.reinforce.sample(prediction.unsqueeze(1)) else: assert False dec_log_probs.append(log_prob.squeeze(1))", "loc: storage ) self.decoder.load_state_dict(state_dict) def count_parameters(self): if self.split_decoder: return self.transformer_c.count_parameters()", "given a passage.\"\"\" def __init__(self, args, tgt_dict): \"\"\"\"Constructor of the", "= self.init_decoder(memory_len, max_mem_len) return self.decode(tgt_pad_mask, tgt_emb, memory_bank, state) class Transformer(nn.Module):", "c2nl.modules.char_embedding import CharEmbedding from c2nl.modules.embeddings import Embeddings from c2nl.modules.highway import", "params['memory_bank'], memory_lengths=params['src_len'], softmax_weights=False) # mask copy_attn weights here if needed", "x d+f word_rep = self.src_highway_net(word_rep) # B x P x", "if self.layer_wise_attn else memory_bank layer_wise_dec_out, attns = self.decoder(enc_outputs, code_len, summ_pad_mask,", "self.split_decoder = args.split_decoder and args.copy_attn if self.split_decoder: # Following (https://arxiv.org/pdf/1808.07913.pdf),", "loss def forward(self, code_word_rep, code_char_rep, code_type_rep, code_len, summ_word_rep, summ_char_rep, summ_len,", "self._copy: # copy_score: batch_size, tgt_len, src_len _, copy_score, _ =", "summ_char_rep: ``(batch_size, max_que_len, max_word_len)`` - summ_len: ``(batch_size)`` - tgt_seq: ``(batch_size,", "class Transformer(nn.Module): \"\"\"Module that writes an answer for the question", "# B x P x d+f word_rep = self.src_highway_net(word_rep) #", "params['blank'] = kwargs['blank'] params['src_dict'] = kwargs['src_dict'] params['tgt_dict'] = kwargs['tgt_dict'] params['max_len']", "f.softmax(layer_scores, dim=-1) memory_bank = torch.matmul(output.transpose(2, 3), layer_scores.unsqueeze(3)).squeeze(3) else: memory_bank =", "_, copy_score, _ = self.copy_attn(decoder_outputs, params['memory_bank'], memory_lengths=params['src_len'], softmax_weights=False) # mask", "table.align[\"Param #\"] = \"r\" for name, parameters in self.named_parameters(): if", "self.name = 'Transformer' if len(args.max_relative_pos) != args.nlayers: assert len(args.max_relative_pos) ==", "batch_size, tgt_len, src_len _, copy_score, _ = self.copy_attn(decoder_outputs, memory_bank, memory_lengths=code_len,", "= self.copy_attn(decoder_outputs, params['memory_bank'], memory_lengths=params['src_len'], softmax_weights=False) # mask copy_attn weights here", "ml_loss = self.criterion(scores.view(-1, scores.size(2)), target.view(-1)) ml_loss = ml_loss.view(*scores.size()[:-1]) ml_loss =", "{\"coverage\": None} enc_outputs = params['layer_wise_outputs'] if self.layer_wise_attn \\ else params['memory_bank']", "seq_len x h # embed and encode the target sequence", "if self.use_src_word: self.src_word_embeddings = Embeddings(args.emsize, args.src_vocab_size, constants.PAD) self.enc_input_size += args.emsize", "x d+f word_rep = self.tgt_highway_net(word_rep) # B x P x", "= torch.cat([copier_out, torch.mul(f_t, dec_out)], dim=-1) decoder_outputs = self.fusion_gate(gate_input) else: decoder_outputs,", "= self.copy_attn(decoder_outputs, memory_bank, memory_lengths=code_len, softmax_weights=False) # mask copy_attn weights here", "self.dropout(word_rep) return word_rep class Encoder(nn.Module): def __init__(self, args, input_size): super(Encoder,", "self.fusion_gate = nn.Sequential( nn.Linear(self.input_size * 2, self.input_size), nn.ReLU() ) else:", "dec_preds, attentions, copy_info, _ = self.__generate_sequence(params, choice='greedy') dec_preds = torch.stack(dec_preds,", "loss = dict() target = tgt_seq[:, 1:].contiguous() if self._copy: #", "if self._copy: # copy_score: batch_size, tgt_len, src_len _, copy_score, _", "1 x src_len std_attn = torch.stack(attns[\"std\"], dim=1) attentions.append(std_attn.squeeze(2)) if self._copy:", "x h if self.use_all_enc_layers: output = torch.stack(layer_outputs, dim=2) # B", "loss['ml_loss'] = ml_loss.mean() loss['loss_per_token'] = ml_loss.div((summ_len - 1).float()).mean() return loss", "= self.decoder.decode(tgt_pad_mask, tgt, enc_outputs, dec_states, step=idx, layer_wise_coverage=attns['coverage']) decoder_outputs = layer_wise_dec_out[-1]", "def count_encoder_parameters(self): return self.encoder.count_parameters() def count_decoder_parameters(self): return self.decoder.count_parameters() def layer_wise_parameters(self):", "_, copy_score, _ = self.copy_attn(decoder_outputs, memory_bank, memory_lengths=code_len, softmax_weights=False) # mask", ":].contiguous() ml_loss = self.criterion(scores, alignment[:, 1:].contiguous(), target) else: scores =", "h layer_scores = self.layer_weights(output).squeeze(3) layer_scores = f.softmax(layer_scores, dim=-1) memory_bank =", "state_d = self.transformer_d.init_state(src_lens, max_src_len) return state_c, state_d else: return self.transformer.init_state(src_lens,", "summ_word_rep: ``(batch_size, max_que_len)`` - summ_char_rep: ``(batch_size, max_que_len, max_word_len)`` - summ_len:", "word_rep = char_rep else: word_rep = torch.cat((word_rep, char_rep), 2) #", "inference time pos_enc = pos_enc.expand(*word_rep.size()[:-1]) if word_rep.is_cuda: pos_enc = pos_enc.cuda()", "args.use_src_word self.use_tgt_word = args.use_tgt_word if self.use_src_word: self.src_word_embeddings = Embeddings(args.emsize, args.src_vocab_size,", "None if self.use_src_word: word_rep = self.src_word_embeddings(sequence.unsqueeze(2)) # B x P", "CopyGenerator(self.decoder.input_size, tgt_dict, self.generator) self.criterion = CopyGeneratorCriterion(vocab_size=len(tgt_dict), force_copy=args.force_copy) else: self.criterion =", "params['tgt_dict'].word_to_char_ids(constants.BOS_WORD) tgt_chars = torch.Tensor(tgt_chars.tolist()).unsqueeze(0) tgt_chars = tgt_chars.repeat(batch_size, 1) tgt_chars =", "dim=-1) memory_bank = torch.matmul(output.transpose(2, 3), layer_scores.unsqueeze(3)).squeeze(3) else: memory_bank = layer_outputs[-1]", "target) else: scores = self.generator(decoder_outputs) # `batch x tgt_len x", "d+f word_rep = self.tgt_highway_net(word_rep) # B x P x d+f", "pos_enc = pos_enc.cuda() pos_rep = self.src_pos_embeddings(pos_enc) word_rep = word_rep +", "x h # embed and encode the target sequence summ_emb", "constants.PAD) self.dec_input_size += args.emsize self.use_src_char = args.use_src_char self.use_tgt_char = args.use_tgt_char", "layer_wise_dec_out, attns = self.decoder(enc_outputs, code_len, summ_pad_mask, summ_emb) decoder_outputs = layer_wise_dec_out[-1]", "kwargs['code_mask_rep'] is not None: mask = kwargs['code_mask_rep'].byte().unsqueeze(1) # Make it", "self.generator(decoder_outputs) # `batch x tgt_len x vocab_size` scores = scores[:,", "if use_cuda: blank_b = blank_b.cuda() fill_b = fill_b.cuda() prediction[b].index_add_(0, fill_b,", "= f.softmax(copy_score, dim=-1) prediction = self.copy_generator(decoder_outputs, attn_copy, params['src_map']) prediction =", "\"std\" in attns: # std_attn: batch_size x num_heads x 1", "copy_score, _ = self.copy_attn(decoder_outputs, memory_bank, memory_lengths=code_len, softmax_weights=False) # mask copy_attn", "memory_bank = torch.matmul(output.transpose(2, 3), layer_scores.unsqueeze(3)).squeeze(3) else: memory_bank = layer_outputs[-1] return", "c2nl.modules.highway import Highway from c2nl.encoders.transformer import TransformerEncoder from c2nl.decoders.transformer import", "self.embedder.use_tgt_word: assert args.emsize == self.decoder.input_size self.generator.weight = self.embedder.tgt_word_embeddings.word_lut.weight self._copy =", "else memory_bank.shape[1] state = self.init_decoder(memory_len, max_mem_len) return self.decode(tgt_pad_mask, tgt_emb, memory_bank,", "= scores[:, :-1, :].contiguous() ml_loss = self.criterion(scores, alignment[:, 1:].contiguous(), target)", "params['layer_wise_outputs'] if self.layer_wise_attn \\ else params['memory_bank'] # +1 for <EOS>", "= self.reinforce.sample(prediction.unsqueeze(1)) else: assert False dec_log_probs.append(log_prob.squeeze(1)) dec_preds.append(tgt.squeeze(1).clone()) if \"std\" in", "= self.embedder(tgt_words, tgt_chars, mode='decoder', step=idx) tgt_pad_mask = tgt_words.data.eq(constants.PAD) layer_wise_dec_out, attns", "tgt_chars.to(tgt_words).unsqueeze(1) dec_preds = [] copy_info = [] attentions = []", "layer_wise_outputs = self.encoder(word_rep, code_len) # B x seq_len x h", "self.decoder.load_state_dict(state_dict) def count_parameters(self): if self.split_decoder: return self.transformer_c.count_parameters() + self.transformer_d.count_parameters() else:", "self).__init__() self.name = 'Transformer' if len(args.max_relative_pos) != args.nlayers: assert len(args.max_relative_pos)", "step is None: pos_enc = torch.arange(start=0, end=word_rep.size(1)).type(torch.LongTensor) else: pos_enc =", "- 1 x vocab_size` ml_loss = self.criterion(scores.view(-1, scores.size(2)), target.view(-1)) ml_loss", "= args.use_all_enc_layers if self.use_all_enc_layers: self.layer_weights = nn.Linear(input_size, 1, bias=False) def", "params['src_map'] = src_map params['src_mask'] = kwargs['code_mask_rep'] params['fill'] = kwargs['fill'] params['blank']", "tgt_len x num_heads x src_len attentions = torch.stack(attentions, dim=1) if", "nn.Linear(self.decoder.input_size, args.tgt_vocab_size) if args.share_decoder_embeddings: if self.embedder.use_tgt_word: assert args.emsize == self.decoder.input_size", "self.tgt_pos_emb = args.tgt_pos_emb self.no_relative_pos = all(v == 0 for v", "= tgt_chars.to(tgt_words).unsqueeze(1) dec_preds = [] copy_info = [] attentions =", "summ_len, tgt_seq, src_map, alignment, **kwargs): \"\"\" Input: - code_word_rep: ``(batch_size,", "choice='greedy', tgt_words=None): batch_size = params['memory_bank'].size(0) use_cuda = params['memory_bank'].is_cuda if tgt_words", "choice == 'greedy': tgt_prob, tgt = torch.max(prediction, dim=1, keepdim=True) log_prob", "False dec_log_probs.append(log_prob.squeeze(1)) dec_preds.append(tgt.squeeze(1).clone()) if \"std\" in attns: # std_attn: batch_size", "len(args.max_relative_pos) == 1 args.max_relative_pos = args.max_relative_pos * args.nlayers self.embedder =", "# mask copy_attn weights here if needed if params['src_mask'] is", "= tgt.gt(len(params['tgt_dict']) - 1) copy_info.append(mask.float().squeeze(1)) words = self.__tens2sent(tgt, params['tgt_dict'], params['source_vocab'])", "word or char embedding options should be True assert args.use_src_word", "= memory_bank params['layer_wise_outputs'] = layer_wise_outputs params['src_len'] = code_len params['source_vocab'] =", "assert len(args.filter_size) == len(args.nfilters) self.src_char_embeddings = CharEmbedding(args.n_characters, args.char_emsize, args.filter_size, args.nfilters)", "if word_rep.is_cuda: pos_enc = pos_enc.cuda() pos_rep = self.tgt_pos_embeddings(pos_enc) word_rep =", "copier_out, attns = self.transformer_c(tgt_words, tgt_emb, memory_bank, state[0], step=step, layer_wise_coverage=layer_wise_coverage) dec_out,", "layer_wise_coverage=layer_wise_coverage) dec_out, _ = self.transformer_d(tgt_words, tgt_emb, memory_bank, state[1], step=step) f_t", "if self.tgt_pos_emb: self.tgt_pos_embeddings = nn.Embedding(args.max_tgt_len + 2, self.dec_input_size) self.dropout =", "from c2nl.modules.highway import Highway from c2nl.encoders.transformer import TransformerEncoder from c2nl.decoders.transformer", "_ = self.__generate_sequence(params, choice='greedy') dec_preds = torch.stack(dec_preds, dim=1) copy_info =", "= prediction.squeeze(1) for b in range(prediction.size(0)): if params['blank'][b]: blank_b =", "state) class Transformer(nn.Module): \"\"\"Module that writes an answer for the", "self.src_pos_emb and self.no_relative_pos: pos_enc = torch.arange(start=0, end=word_rep.size(1)).type(torch.LongTensor) pos_enc = pos_enc.expand(*word_rep.size()[:-1])", "scores.size(2)), target.view(-1)) ml_loss = ml_loss.view(*scores.size()[:-1]) ml_loss = ml_loss.mul(target.ne(constants.PAD).float()) ml_loss =", "decoder self.transformer_c = TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff,", "args.nfilters))) self.tgt_highway_net = Highway(self.dec_input_size, num_layers=2) self.use_type = args.use_code_type if self.use_type:", "nn.Embedding(args.max_tgt_len + 2, self.dec_input_size) self.dropout = nn.Dropout(args.dropout_emb) def forward(self, sequence,", "'decoder': word_rep = None if self.use_tgt_word: word_rep = self.tgt_word_embeddings(sequence.unsqueeze(2)) #", "tgt_seq, src_map, alignment, **kwargs): batch_size = code_len.size(0) # embed and", "word_rep = None if self.use_tgt_word: word_rep = self.tgt_word_embeddings(sequence.unsqueeze(2)) # B", "= self.transformer(tgt_words, tgt_emb, memory_bank, state, step=step, layer_wise_coverage=layer_wise_coverage) return decoder_outputs, attns", "Encoder(args, self.embedder.enc_input_size) self.decoder = Decoder(args, self.embedder.dec_input_size) self.layer_wise_attn = args.layer_wise_attn self.generator", "widx - len(tgt_dict) words.append(src_vocabs[idx][widx]) return words def __generate_sequence(self, params, choice='greedy',", "dropout=args.trans_drop, max_relative_positions=args.max_relative_pos, use_neg_dist=args.use_neg_dist) self.use_all_enc_layers = args.use_all_enc_layers if self.use_all_enc_layers: self.layer_weights =", "self.embedder.use_tgt_char: tgt_chars = params['tgt_dict'].word_to_char_ids(constants.BOS_WORD) tgt_chars = torch.Tensor(tgt_chars.tolist()).unsqueeze(0) tgt_chars = tgt_chars.repeat(batch_size,", "self.enc_input_size += sum(list(map(int, args.nfilters))) self.src_highway_net = Highway(self.enc_input_size, num_layers=2) if self.use_tgt_char:", "self.decoder = Decoder(args, self.embedder.dec_input_size) self.layer_wise_attn = args.layer_wise_attn self.generator = nn.Linear(self.decoder.input_size,", "self.type_embeddings(sequence_type) word_rep = word_rep + type_rep if self.src_pos_emb and self.no_relative_pos:", "an answer for the question given a passage.\"\"\" def __init__(self,", "True assert args.use_src_word or args.use_src_char assert args.use_tgt_word or args.use_tgt_char self.use_src_word", "args.nlayers: assert len(args.max_relative_pos) == 1 args.max_relative_pos = args.max_relative_pos * args.nlayers", "params['max_len'] = kwargs['max_len'] params['src_words'] = code_word_rep dec_preds, attentions, copy_info, _", "and self.no_relative_pos: pos_enc = torch.arange(start=0, end=word_rep.size(1)).type(torch.LongTensor) pos_enc = pos_enc.expand(*word_rep.size()[:-1]) if", "= self.transformer_d(tgt_words, tgt_emb, memory_bank, state[1], step=step) f_t = self.fusion_sigmoid(torch.cat([copier_out, dec_out],", "f.softmax(copy_score, dim=-1) scores = self.copy_generator(decoder_outputs, attn_copy, src_map) scores = scores[:,", "and encode the target sequence summ_emb = self.embedder(summ_word_rep, summ_char_rep, mode='decoder')", "else: self.transformer = TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff,", "= code_len.size(0) # embed and encode the source sequence code_rep", "words = [] for idx, w in enumerate(t): widx =", "self.use_src_word: self.src_word_embeddings = Embeddings(args.emsize, args.src_vocab_size, constants.PAD) self.enc_input_size += args.emsize if", "Name\"] = \"l\" table.align[\"Output Shape\"] = \"r\" table.align[\"Param #\"] =", "- 1) copy_info.append(mask.float().squeeze(1)) words = self.__tens2sent(tgt, params['tgt_dict'], params['source_vocab']) tgt_chars =", "Embeddings(args.emsize, args.src_vocab_size, constants.PAD) self.enc_input_size += args.emsize if self.use_tgt_word: self.tgt_word_embeddings =", "\"\"\"\"Constructor of the class.\"\"\" super(Transformer, self).__init__() self.name = 'Transformer' if", "CharEmbedding(args.n_characters, args.char_emsize, args.filter_size, args.nfilters) self.dec_input_size += sum(list(map(int, args.nfilters))) self.tgt_highway_net =", "[params['tgt_dict'][w] for w in words] words = torch.Tensor(words).type_as(tgt) tgt_words =", "self.type_embeddings = nn.Embedding(len(constants.TOKEN_TYPE_MAP), self.enc_input_size) self.src_pos_emb = args.src_pos_emb self.tgt_pos_emb = args.tgt_pos_emb", "self.generator = nn.Linear(self.decoder.input_size, args.tgt_vocab_size) if args.share_decoder_embeddings: if self.embedder.use_tgt_word: assert args.emsize", "= TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, dropout=args.trans_drop )", "x d+f if self.tgt_pos_emb: if step is None: pos_enc =", "1) copy_info.append(mask.float().squeeze(1)) words = self.__tens2sent(tgt, params['tgt_dict'], params['source_vocab']) tgt_chars = None", "else: raise ValueError('Unknown embedder mode!') word_rep = self.dropout(word_rep) return word_rep", "answer for the question given a passage.\"\"\" def __init__(self, args,", "src_map params['src_mask'] = kwargs['code_mask_rep'] params['fill'] = kwargs['fill'] params['blank'] = kwargs['blank']", "x tgt_len - 1 x vocab_size` ml_loss = self.criterion(scores.view(-1, scores.size(2)),", "assert len(args.max_relative_pos) == 1 args.max_relative_pos = args.max_relative_pos * args.nlayers self.embedder", "= f.softmax(copy_score, dim=-1) scores = self.copy_generator(decoder_outputs, attn_copy, src_map) scores =", "sum(p.numel() for p in self.parameters() if p.requires_grad) def count_encoder_parameters(self): return", "word_rep = None if self.use_src_word: word_rep = self.src_word_embeddings(sequence.unsqueeze(2)) # B", "sequence_mask class Embedder(nn.Module): def __init__(self, args): super(Embedder, self).__init__() self.enc_input_size =", "self.generator.weight = self.embedder.tgt_word_embeddings.word_lut.weight self._copy = args.copy_attn if self._copy: self.copy_attn =", "P x d+f word_rep = self.tgt_highway_net(word_rep) # B x P", "\"r\" for name, parameters in self.named_parameters(): if parameters.requires_grad: table.add_row([name, str(list(parameters.shape)),", "x tgt_len x num_heads x src_len attentions = torch.stack(attentions, dim=1)", "return { 'predictions': dec_preds, 'copy_info': copy_info, 'memory_bank': memory_bank, 'attentions': attentions", "= self.__generate_sequence(params, choice='greedy') dec_preds = torch.stack(dec_preds, dim=1) copy_info = torch.stack(copy_info,", "= self.decoder.init_decoder(params['src_len'], max_mem_len) attns = {\"coverage\": None} enc_outputs = params['layer_wise_outputs']", "= tgt_words.data.eq(constants.PAD) layer_wise_dec_out, attns = self.decoder.decode(tgt_pad_mask, tgt, enc_outputs, dec_states, step=idx,", "= args.max_relative_pos * args.nlayers self.embedder = Embedder(args) self.encoder = Encoder(args,", "or args.use_tgt_char self.use_src_word = args.use_src_word self.use_tgt_word = args.use_tgt_word if self.use_src_word:", "import Highway from c2nl.encoders.transformer import TransformerEncoder from c2nl.decoders.transformer import TransformerDecoder", "= torch.Tensor(tgt_chars.tolist()).unsqueeze(0) tgt_chars = tgt_chars.repeat(batch_size, 1) tgt_chars = tgt_chars.to(tgt_words).unsqueeze(1) dec_preds", "state_c, state_d else: return self.transformer.init_state(src_lens, max_src_len) def decode(self, tgt_words, tgt_emb,", "# attentions: batch_size x tgt_len x num_heads x src_len attentions", "CharEmbedding from c2nl.modules.embeddings import Embeddings from c2nl.modules.highway import Highway from", "19 - 21 from `https://arxiv.org/pdf/1808.07913.pdf` self.fusion_sigmoid = nn.Sequential( nn.Linear(self.input_size *", "code_type_rep, code_len, summ_word_rep, summ_char_rep, summ_len, tgt_seq, src_map, alignment, **kwargs) else:", "code_len) # B x seq_len x h # embed and", "list) else memory_bank.shape[1] state = self.init_decoder(memory_len, max_mem_len) return self.decode(tgt_pad_mask, tgt_emb,", "if params['src_mask'] is not None: mask = params['src_mask'].byte().unsqueeze(1) # Make", "self.init_decoder(memory_len, max_mem_len) return self.decode(tgt_pad_mask, tgt_emb, memory_bank, state) class Transformer(nn.Module): \"\"\"Module", "self.embedder.use_tgt_char: tgt_chars = [params['tgt_dict'].word_to_char_ids(w).tolist() for w in words] tgt_chars =", "`batch x tgt_len - 1 x vocab_size` ml_loss = self.criterion(scores.view(-1,", "d+f if self.use_type: type_rep = self.type_embeddings(sequence_type) word_rep = word_rep +", "= [params['tgt_dict'].word_to_char_ids(w).tolist() for w in words] tgt_chars = torch.Tensor(tgt_chars).to(tgt).unsqueeze(1) words", "batch_size = code_len.size(0) # embed and encode the source sequence", "needed if params['src_mask'] is not None: mask = params['src_mask'].byte().unsqueeze(1) #", "tgt_chars, mode='decoder', step=idx) tgt_pad_mask = tgt_words.data.eq(constants.PAD) layer_wise_dec_out, attns = self.decoder.decode(tgt_pad_mask,", "dec_preds = [] copy_info = [] attentions = [] dec_log_probs", "if params['blank'][b]: blank_b = torch.LongTensor(params['blank'][b]) fill_b = torch.LongTensor(params['fill'][b]) if use_cuda:", "forward(self, input, input_len): layer_outputs, _ = self.transformer(input, input_len) # B", ":].contiguous() # `batch x tgt_len - 1 x vocab_size` ml_loss", "alignment, **kwargs): batch_size = code_len.size(0) # embed and encode the", "sequence_type=None, mode='encoder', step=None): if mode == 'encoder': word_rep = None", "input_size): super(Encoder, self).__init__() self.transformer = TransformerEncoder(num_layers=args.nlayers, d_model=input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v,", "summ_emb = self.embedder(summ_word_rep, summ_char_rep, mode='decoder') summ_pad_mask = ~sequence_mask(summ_len, max_len=summ_emb.size(1)) enc_outputs", "if mode == 'encoder': word_rep = None if self.use_src_word: word_rep", ":-1, :].contiguous() ml_loss = self.criterion(scores, alignment[:, 1:].contiguous(), target) else: scores", "constants from c2nl.modules.global_attention import GlobalAttention from c2nl.modules.copy_generator import CopyGenerator, CopyGeneratorCriterion", "alignment, **kwargs): word_rep = self.embedder(code_word_rep, code_char_rep, code_type_rep, mode='encoder') memory_bank, layer_wise_outputs", "self.dropout = nn.Dropout(args.dropout_emb) def forward(self, sequence, sequence_char, sequence_type=None, mode='encoder', step=None):", "alignment, **kwargs) def __tens2sent(self, t, tgt_dict, src_vocabs): words = []", "def __init__(self, args, input_size): super(Decoder, self).__init__() self.input_size = input_size self.split_decoder", ") if args.reload_decoder_state: state_dict = torch.load( args.reload_decoder_state, map_location=lambda storage, loc:", "} def count_parameters(self): return sum(p.numel() for p in self.parameters() if", "c2nl.decoders.transformer import TransformerDecoder from c2nl.inputters import constants from c2nl.modules.global_attention import", "+= sum(list(map(int, args.nfilters))) self.tgt_highway_net = Highway(self.dec_input_size, num_layers=2) self.use_type = args.use_code_type", "if self.layer_wise_attn \\ else params['memory_bank'] # +1 for <EOS> token", "= ml_loss.mean() loss['loss_per_token'] = ml_loss.div((summ_len - 1).float()).mean() return loss def", "code_char_rep, code_type_rep, code_len, summ_word_rep, summ_char_rep, summ_len, tgt_seq, src_map, alignment, **kwargs):", "w in enumerate(t): widx = w[0].item() if widx < len(tgt_dict):", "x 1 x src_len std_attn = torch.stack(attns[\"std\"], dim=1) attentions.append(std_attn.squeeze(2)) if", "21 from `https://arxiv.org/pdf/1808.07913.pdf` self.fusion_sigmoid = nn.Sequential( nn.Linear(self.input_size * 2, self.input_size),", "coverage_attn=args.coverage_attn, dropout=args.trans_drop ) if args.reload_decoder_state: state_dict = torch.load( args.reload_decoder_state, map_location=lambda", "mask copy_attn weights here if needed if kwargs['code_mask_rep'] is not", "'predictions': dec_preds, 'copy_info': copy_info, 'memory_bank': memory_bank, 'attentions': attentions } def", "- 21 from `https://arxiv.org/pdf/1808.07913.pdf` self.fusion_sigmoid = nn.Sequential( nn.Linear(self.input_size * 2,", "code_word_rep dec_preds, attentions, copy_info, _ = self.__generate_sequence(params, choice='greedy') dec_preds =", "= PrettyTable() table.field_names = [\"Layer Name\", \"Output Shape\", \"Param #\"]", "max_len=summ_emb.size(1)) enc_outputs = layer_wise_outputs if self.layer_wise_attn else memory_bank layer_wise_dec_out, attns", "x nlayers x h layer_scores = self.layer_weights(output).squeeze(3) layer_scores = f.softmax(layer_scores,", "# B x P x d+f word_rep = self.tgt_highway_net(word_rep) #", "(https://arxiv.org/pdf/1808.07913.pdf), we split decoder self.transformer_c = TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head,", "= nn.Embedding(len(constants.TOKEN_TYPE_MAP), self.enc_input_size) self.src_pos_emb = args.src_pos_emb self.tgt_pos_emb = args.tgt_pos_emb self.no_relative_pos", "if word_rep is None: word_rep = char_rep else: word_rep =", "src_lens, max_src_len): if self.split_decoder: state_c = self.transformer_c.init_state(src_lens, max_src_len) state_d =", "import TransformerEncoder from c2nl.decoders.transformer import TransformerDecoder from c2nl.inputters import constants", "= self.tgt_word_embeddings(sequence.unsqueeze(2)) # B x P x d if self.use_tgt_char:", "= torch.LongTensor([constants.BOS]) if use_cuda: tgt_words = tgt_words.cuda() tgt_words = tgt_words.expand(batch_size).unsqueeze(1)", "code_len, summ_word_rep, summ_char_rep, summ_len, tgt_seq, src_map, alignment, **kwargs) else: return", "in range(params['max_len'] + 1): tgt = self.embedder(tgt_words, tgt_chars, mode='decoder', step=idx)", "word_rep = word_rep + pos_rep else: raise ValueError('Unknown embedder mode!')", "for b in range(prediction.size(0)): if params['blank'][b]: blank_b = torch.LongTensor(params['blank'][b]) fill_b", "words = self.__tens2sent(tgt, params['tgt_dict'], params['source_vocab']) tgt_chars = None if self.embedder.use_tgt_char:", "params['source_vocab']) tgt_chars = None if self.embedder.use_tgt_char: tgt_chars = [params['tgt_dict'].word_to_char_ids(w).tolist() for", "if self.use_tgt_word: self.tgt_word_embeddings = Embeddings(args.emsize, args.tgt_vocab_size, constants.PAD) self.dec_input_size += args.emsize", "should be True assert args.use_src_word or args.use_src_char assert args.use_tgt_word or", "= torch.LongTensor([step]) # used in inference time pos_enc = pos_enc.expand(*word_rep.size()[:-1])", "tgt_words, tgt_emb, memory_bank, state, step=None, layer_wise_coverage=None): if self.split_decoder: copier_out, attns", "attentions = torch.stack(attentions, dim=1) if attentions else None return {", "code_type_rep, mode='encoder') memory_bank, layer_wise_outputs = self.encoder(word_rep, code_len) # B x", "= params['tgt_dict'].word_to_char_ids(constants.BOS_WORD) tgt_chars = torch.Tensor(tgt_chars.tolist()).unsqueeze(0) tgt_chars = tgt_chars.repeat(batch_size, 1) tgt_chars", "tgt_words is None: tgt_words = torch.LongTensor([constants.BOS]) if use_cuda: tgt_words =", "= torch.stack(attns[\"std\"], dim=1) attentions.append(std_attn.squeeze(2)) if self._copy: mask = tgt.gt(len(params['tgt_dict']) -", "* kwargs['example_weights'] loss['ml_loss'] = ml_loss.mean() loss['loss_per_token'] = ml_loss.div((summ_len - 1).float()).mean()", "= all(v == 0 for v in args.max_relative_pos) if self.src_pos_emb", "args.copy_attn if self._copy: self.copy_attn = GlobalAttention(dim=self.decoder.input_size, attn_type=args.attn_type) self.copy_generator = CopyGenerator(self.decoder.input_size,", "vocab_size` ml_loss = self.criterion(scores.view(-1, scores.size(2)), target.view(-1)) ml_loss = ml_loss.view(*scores.size()[:-1]) ml_loss", "torch.log(tgt_prob + 1e-20) elif choice == 'sample': tgt, log_prob =", "= ml_loss.view(*scores.size()[:-1]) ml_loss = ml_loss.mul(target.ne(constants.PAD).float()) ml_loss = ml_loss.sum(1) * kwargs['example_weights']", "= word_rep + type_rep if self.src_pos_emb and self.no_relative_pos: pos_enc =", "code_len, summ_word_rep, summ_char_rep, summ_len, tgt_seq, src_map, alignment, **kwargs): \"\"\" Input:", "params['memory_bank'].is_cuda if tgt_words is None: tgt_words = torch.LongTensor([constants.BOS]) if use_cuda:", "dict() target = tgt_seq[:, 1:].contiguous() if self._copy: # copy_score: batch_size,", "gate_input = torch.cat([copier_out, torch.mul(f_t, dec_out)], dim=-1) decoder_outputs = self.fusion_gate(gate_input) else:", "Name\", \"Output Shape\", \"Param #\"] table.align[\"Layer Name\"] = \"l\" table.align[\"Output", "step=step, layer_wise_coverage=layer_wise_coverage) return decoder_outputs, attns def forward(self, memory_bank, memory_len, tgt_pad_mask,", "word_rep = self.src_highway_net(word_rep) # B x P x d+f if", "Transformer(nn.Module): \"\"\"Module that writes an answer for the question given", "params = dict() params['memory_bank'] = memory_bank params['layer_wise_outputs'] = layer_wise_outputs params['src_len']", "dec_log_probs.append(log_prob.squeeze(1)) dec_preds.append(tgt.squeeze(1).clone()) if \"std\" in attns: # std_attn: batch_size x", "kwargs['source_vocab'] params['src_map'] = src_map params['src_mask'] = kwargs['code_mask_rep'] params['fill'] = kwargs['fill']", "def count_parameters(self): return sum(p.numel() for p in self.parameters() if p.requires_grad)", "x vocab_size` scores = scores[:, :-1, :].contiguous() # `batch x", "= [] dec_log_probs = [] acc_dec_outs = [] max_mem_len =", "layer_scores = self.layer_weights(output).squeeze(3) layer_scores = f.softmax(layer_scores, dim=-1) memory_bank = torch.matmul(output.transpose(2,", "prediction = f.softmax(prediction, dim=1) if choice == 'greedy': tgt_prob, tgt", "dict() params['memory_bank'] = memory_bank params['layer_wise_outputs'] = layer_wise_outputs params['src_len'] = code_len", "[] acc_dec_outs = [] max_mem_len = params['memory_bank'][0].shape[1] \\ if isinstance(params['memory_bank'],", "TransformerEncoder(num_layers=args.nlayers, d_model=input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, dropout=args.trans_drop, max_relative_positions=args.max_relative_pos, use_neg_dist=args.use_neg_dist) self.use_all_enc_layers", "self.parameters() if p.requires_grad) def count_encoder_parameters(self): return self.encoder.count_parameters() def count_decoder_parameters(self): return", "Highway(self.dec_input_size, num_layers=2) self.use_type = args.use_code_type if self.use_type: self.type_embeddings = nn.Embedding(len(constants.TOKEN_TYPE_MAP),", "token for idx in range(params['max_len'] + 1): tgt = self.embedder(tgt_words,", "len(tgt_dict) words.append(src_vocabs[idx][widx]) return words def __generate_sequence(self, params, choice='greedy', tgt_words=None): batch_size", "import TransformerDecoder from c2nl.inputters import constants from c2nl.modules.global_attention import GlobalAttention", "self.generator(decoder_outputs.squeeze(1)) prediction = f.softmax(prediction, dim=1) if choice == 'greedy': tgt_prob,", "torch.stack(copy_info, dim=1) if copy_info else None # attentions: batch_size x", "storage, loc: storage ) self.decoder.load_state_dict(state_dict) def count_parameters(self): if self.split_decoder: return", "\"Param #\"] table.align[\"Layer Name\"] = \"l\" table.align[\"Output Shape\"] = \"r\"", "# embed and encode the target sequence summ_emb = self.embedder(summ_word_rep,", "step=None, layer_wise_coverage=None): if self.split_decoder: copier_out, attns = self.transformer_c(tgt_words, tgt_emb, memory_bank,", "or args.use_src_char assert args.use_tgt_word or args.use_tgt_char self.use_src_word = args.use_src_word self.use_tgt_word", "assert len(args.filter_size) == len(args.nfilters) self.tgt_char_embeddings = CharEmbedding(args.n_characters, args.char_emsize, args.filter_size, args.nfilters)", "None} enc_outputs = params['layer_wise_outputs'] if self.layer_wise_attn \\ else params['memory_bank'] #", "summ_word_rep, summ_char_rep, summ_len, tgt_seq, src_map, alignment, **kwargs): \"\"\" Input: -", "kwargs['max_len'] params['src_words'] = code_word_rep dec_preds, attentions, copy_info, _ = self.__generate_sequence(params,", "self.layer_wise_attn \\ else params['memory_bank'] # +1 for <EOS> token for", "= torch.max(prediction, dim=1, keepdim=True) log_prob = torch.log(tgt_prob + 1e-20) elif", "torch.LongTensor(params['fill'][b]) if use_cuda: blank_b = blank_b.cuda() fill_b = fill_b.cuda() prediction[b].index_add_(0,", "self.src_highway_net(word_rep) # B x P x d+f if self.use_type: type_rep", "args.use_tgt_char self.use_src_word = args.use_src_word self.use_tgt_word = args.use_tgt_word if self.use_src_word: self.src_word_embeddings", "sequence, sequence_char, sequence_type=None, mode='encoder', step=None): if mode == 'encoder': word_rep", "= torch.cat((word_rep, char_rep), 2) # B x P x d+f", "prediction[b].index_select(0, blank_b)) prediction[b].index_fill_(0, blank_b, 1e-10) else: prediction = self.generator(decoder_outputs.squeeze(1)) prediction", "'memory_bank': memory_bank, 'attentions': attentions } def count_parameters(self): return sum(p.numel() for", "self.__generate_sequence(params, choice='greedy') dec_preds = torch.stack(dec_preds, dim=1) copy_info = torch.stack(copy_info, dim=1)", "table.align[\"Output Shape\"] = \"r\" table.align[\"Param #\"] = \"r\" for name,", "dec_states, step=idx, layer_wise_coverage=attns['coverage']) decoder_outputs = layer_wise_dec_out[-1] acc_dec_outs.append(decoder_outputs.squeeze(1)) if self._copy: _,", "tgt_words = tgt_words.expand(batch_size).unsqueeze(1) # B x 1 tgt_chars = None", "writes an answer for the question given a passage.\"\"\" def", "vocab_size` scores = scores[:, :-1, :].contiguous() # `batch x tgt_len", "return self.transformer.count_parameters() def forward(self, input, input_len): layer_outputs, _ = self.transformer(input,", "[\"Layer Name\", \"Output Shape\", \"Param #\"] table.align[\"Layer Name\"] = \"l\"", "self.tgt_char_embeddings(sequence_char) # B x P x f if word_rep is", "ml_loss = ml_loss.view(*scores.size()[:-1]) ml_loss = ml_loss.mul(target.ne(constants.PAD).float()) ml_loss = ml_loss.sum(1) *", "fill_b = torch.LongTensor(params['fill'][b]) if use_cuda: blank_b = blank_b.cuda() fill_b =", "else: return self.transformer.init_state(src_lens, max_src_len) def decode(self, tgt_words, tgt_emb, memory_bank, state,", "ml_loss.view(*scores.size()[:-1]) ml_loss = ml_loss.mul(target.ne(constants.PAD).float()) ml_loss = ml_loss.sum(1) * kwargs['example_weights'] loss['ml_loss']", "keepdim=True) log_prob = torch.log(tgt_prob + 1e-20) elif choice == 'sample':", "1:].contiguous() if self._copy: # copy_score: batch_size, tgt_len, src_len _, copy_score,", "attn_copy = f.softmax(copy_score, dim=-1) scores = self.copy_generator(decoder_outputs, attn_copy, src_map) scores", "not None: mask = params['src_mask'].byte().unsqueeze(1) # Make it broadcastable. copy_score.data.masked_fill_(mask,", "3), layer_scores.unsqueeze(3)).squeeze(3) else: memory_bank = layer_outputs[-1] return memory_bank, layer_outputs class", "if self.training: return self._run_forward_ml(code_word_rep, code_char_rep, code_type_rep, code_len, summ_word_rep, summ_char_rep, summ_len,", "word_rep = self.embedder(code_word_rep, code_char_rep, code_type_rep, mode='encoder') memory_bank, layer_wise_outputs = self.encoder(word_rep,", "enc_outputs = params['layer_wise_outputs'] if self.layer_wise_attn \\ else params['memory_bank'] # +1", "tgt_len - 1 x vocab_size` ml_loss = self.criterion(scores.view(-1, scores.size(2)), target.view(-1))", "self.src_pos_emb and self.no_relative_pos: self.src_pos_embeddings = nn.Embedding(args.max_src_len, self.enc_input_size) if self.tgt_pos_emb: self.tgt_pos_embeddings", "char_rep), 2) # B x P x d+f word_rep =", "self.transformer.init_state(src_lens, max_src_len) def decode(self, tgt_words, tgt_emb, memory_bank, state, step=None, layer_wise_coverage=None):", "ml_loss.sum(1) * kwargs['example_weights'] loss['ml_loss'] = ml_loss.mean() loss['loss_per_token'] = ml_loss.div((summ_len -", "tgt_words = torch.LongTensor([constants.BOS]) if use_cuda: tgt_words = tgt_words.cuda() tgt_words =", "parameters in self.named_parameters(): if parameters.requires_grad: table.add_row([name, str(list(parameters.shape)), parameters.numel()]) return table", "1, bias=False) def count_parameters(self): return self.transformer.count_parameters() def forward(self, input, input_len):", "code_type_rep, code_len, summ_word_rep, summ_char_rep, summ_len, tgt_seq, src_map, alignment, **kwargs): \"\"\"", "import CopyGenerator, CopyGeneratorCriterion from c2nl.utils.misc import sequence_mask class Embedder(nn.Module): def", "copy_info else None # attentions: batch_size x tgt_len x num_heads", "= torch.arange(start=0, end=word_rep.size(1)).type(torch.LongTensor) pos_enc = pos_enc.expand(*word_rep.size()[:-1]) if word_rep.is_cuda: pos_enc =", "x vocab_size` ml_loss = self.criterion(scores.view(-1, scores.size(2)), target.view(-1)) ml_loss = ml_loss.view(*scores.size()[:-1])", "target sequence summ_emb = self.embedder(summ_word_rep, summ_char_rep, mode='decoder') summ_pad_mask = ~sequence_mask(summ_len,", "w in words] tgt_chars = torch.Tensor(tgt_chars).to(tgt).unsqueeze(1) words = [params['tgt_dict'][w] for", "if self._copy: _, copy_score, _ = self.copy_attn(decoder_outputs, params['memory_bank'], memory_lengths=params['src_len'], softmax_weights=False)", "# mask copy_attn weights here if needed if kwargs['code_mask_rep'] is", "args.src_pos_emb self.tgt_pos_emb = args.tgt_pos_emb self.no_relative_pos = all(v == 0 for", "decoder_outputs = layer_wise_dec_out[-1] acc_dec_outs.append(decoder_outputs.squeeze(1)) if self._copy: _, copy_score, _ =", "for <EOS> token for idx in range(params['max_len'] + 1): tgt", "[params['tgt_dict'].word_to_char_ids(w).tolist() for w in words] tgt_chars = torch.Tensor(tgt_chars).to(tgt).unsqueeze(1) words =", "t, tgt_dict, src_vocabs): words = [] for idx, w in", "**kwargs) else: return self.decode(code_word_rep, code_char_rep, code_type_rep, code_len, src_map, alignment, **kwargs)", "self.use_src_word: word_rep = self.src_word_embeddings(sequence.unsqueeze(2)) # B x P x d", "layer_wise_outputs = self.encoder(code_rep, code_len) # B x seq_len x h", "copy_info, dec_log_probs def decode(self, code_word_rep, code_char_rep, code_type_rep, code_len, src_map, alignment,", "To accomplish eq. 19 - 21 from `https://arxiv.org/pdf/1808.07913.pdf` self.fusion_sigmoid =", "Shape\"] = \"r\" table.align[\"Param #\"] = \"r\" for name, parameters", "torch.nn.functional as f from prettytable import PrettyTable from c2nl.modules.char_embedding import", "step=None): if mode == 'encoder': word_rep = None if self.use_src_word:", "def decode(self, code_word_rep, code_char_rep, code_type_rep, code_len, src_map, alignment, **kwargs): word_rep", "count_decoder_parameters(self): return self.decoder.count_parameters() def layer_wise_parameters(self): table = PrettyTable() table.field_names =", "tgt_seq, src_map, alignment, **kwargs): \"\"\" Input: - code_word_rep: ``(batch_size, max_doc_len)``", "self.split_decoder: copier_out, attns = self.transformer_c(tgt_words, tgt_emb, memory_bank, state[0], step=step, layer_wise_coverage=layer_wise_coverage)", "self.use_tgt_char: char_rep = self.tgt_char_embeddings(sequence_char) # B x P x f", "= f.softmax(layer_scores, dim=-1) memory_bank = torch.matmul(output.transpose(2, 3), layer_scores.unsqueeze(3)).squeeze(3) else: memory_bank", "d_model=self.input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, dropout=args.trans_drop ) # To accomplish", "= ml_loss.div((summ_len - 1).float()).mean() return loss def forward(self, code_word_rep, code_char_rep,", "self.input_size = input_size self.split_decoder = args.split_decoder and args.copy_attn if self.split_decoder:", "+ 1): tgt = self.embedder(tgt_words, tgt_chars, mode='decoder', step=idx) tgt_pad_mask =", "\\ else params['memory_bank'] # +1 for <EOS> token for idx", "x P x d if self.use_tgt_char: char_rep = self.tgt_char_embeddings(sequence_char) #", "num_heads x src_len attentions = torch.stack(attentions, dim=1) if attentions else", "if self._copy: mask = tgt.gt(len(params['tgt_dict']) - 1) copy_info.append(mask.float().squeeze(1)) words =", "code_word_rep, code_char_rep, code_type_rep, code_len, summ_word_rep, summ_char_rep, summ_len, tgt_seq, src_map, alignment,", "layer_wise_dec_out[-1] acc_dec_outs.append(decoder_outputs.squeeze(1)) if self._copy: _, copy_score, _ = self.copy_attn(decoder_outputs, params['memory_bank'],", "word_rep + type_rep if self.src_pos_emb and self.no_relative_pos: pos_enc = torch.arange(start=0,", "= self.transformer_c.init_state(src_lens, max_src_len) state_d = self.transformer_d.init_state(src_lens, max_src_len) return state_c, state_d", "2, self.input_size), nn.ReLU() ) else: self.transformer = TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size,", "self.use_all_enc_layers = args.use_all_enc_layers if self.use_all_enc_layers: self.layer_weights = nn.Linear(input_size, 1, bias=False)", "= args.tgt_pos_emb self.no_relative_pos = all(v == 0 for v in", "else: scores = self.generator(decoder_outputs) # `batch x tgt_len x vocab_size`", "args.nfilters) self.enc_input_size += sum(list(map(int, args.nfilters))) self.src_highway_net = Highway(self.enc_input_size, num_layers=2) if", "code_word_rep, code_char_rep, code_type_rep, code_len, src_map, alignment, **kwargs): word_rep = self.embedder(code_word_rep,", "import torch.nn.functional as f from prettytable import PrettyTable from c2nl.modules.char_embedding", "'encoder': word_rep = None if self.use_src_word: word_rep = self.src_word_embeddings(sequence.unsqueeze(2)) #", "self.tgt_highway_net(word_rep) # B x P x d+f if self.tgt_pos_emb: if", "dec_out], dim=-1)) gate_input = torch.cat([copier_out, torch.mul(f_t, dec_out)], dim=-1) decoder_outputs =", "= CopyGeneratorCriterion(vocab_size=len(tgt_dict), force_copy=args.force_copy) else: self.criterion = nn.CrossEntropyLoss(reduction='none') def _run_forward_ml(self, code_word_rep,", "x tgt_len x vocab_size` scores = scores[:, :-1, :].contiguous() #", "force_copy=args.force_copy) else: self.criterion = nn.CrossEntropyLoss(reduction='none') def _run_forward_ml(self, code_word_rep, code_char_rep, code_type_rep,", "args.char_emsize, args.filter_size, args.nfilters) self.enc_input_size += sum(list(map(int, args.nfilters))) self.src_highway_net = Highway(self.enc_input_size,", "dim=-1) scores = self.copy_generator(decoder_outputs, attn_copy, src_map) scores = scores[:, :-1,", "range(prediction.size(0)): if params['blank'][b]: blank_b = torch.LongTensor(params['blank'][b]) fill_b = torch.LongTensor(params['fill'][b]) if", "1 args.max_relative_pos = args.max_relative_pos * args.nlayers self.embedder = Embedder(args) self.encoder", "1 tgt_chars = None if self.embedder.use_tgt_char: tgt_chars = params['tgt_dict'].word_to_char_ids(constants.BOS_WORD) tgt_chars", "if self.src_pos_emb and self.no_relative_pos: pos_enc = torch.arange(start=0, end=word_rep.size(1)).type(torch.LongTensor) pos_enc =", "tgt_emb, memory_bank, state[0], step=step, layer_wise_coverage=layer_wise_coverage) dec_out, _ = self.transformer_d(tgt_words, tgt_emb,", "else memory_bank layer_wise_dec_out, attns = self.decoder(enc_outputs, code_len, summ_pad_mask, summ_emb) decoder_outputs", "words = torch.Tensor(words).type_as(tgt) tgt_words = words.unsqueeze(1) return dec_preds, attentions, copy_info,", "== 'greedy': tgt_prob, tgt = torch.max(prediction, dim=1, keepdim=True) log_prob =", "args.emsize == self.decoder.input_size self.generator.weight = self.embedder.tgt_word_embeddings.word_lut.weight self._copy = args.copy_attn if", "time pos_enc = pos_enc.expand(*word_rep.size()[:-1]) if word_rep.is_cuda: pos_enc = pos_enc.cuda() pos_rep", "= \"r\" for name, parameters in self.named_parameters(): if parameters.requires_grad: table.add_row([name,", "accomplish eq. 19 - 21 from `https://arxiv.org/pdf/1808.07913.pdf` self.fusion_sigmoid = nn.Sequential(", "x seq_len x h if self.use_all_enc_layers: output = torch.stack(layer_outputs, dim=2)", "= nn.Sequential( nn.Linear(self.input_size * 2, self.input_size), nn.Sigmoid() ) self.fusion_gate =", "b in range(prediction.size(0)): if params['blank'][b]: blank_b = torch.LongTensor(params['blank'][b]) fill_b =", "self._copy: self.copy_attn = GlobalAttention(dim=self.decoder.input_size, attn_type=args.attn_type) self.copy_generator = CopyGenerator(self.decoder.input_size, tgt_dict, self.generator)", "[] max_mem_len = params['memory_bank'][0].shape[1] \\ if isinstance(params['memory_bank'], list) else params['memory_bank'].shape[1]", "0 # at least one of word or char embedding", "d+f word_rep = self.src_highway_net(word_rep) # B x P x d+f", "args.max_relative_pos) if self.src_pos_emb and self.no_relative_pos: self.src_pos_embeddings = nn.Embedding(args.max_src_len, self.enc_input_size) if", "memory_bank, layer_wise_outputs = self.encoder(code_rep, code_len) # B x seq_len x", "``(batch_size)`` - tgt_seq: ``(batch_size, max_len)`` Output: - ``(batch_size, P_LEN)``, ``(batch_size,", "layer_wise_coverage=layer_wise_coverage) return decoder_outputs, attns def forward(self, memory_bank, memory_len, tgt_pad_mask, tgt_emb):", "d+f if self.tgt_pos_emb: if step is None: pos_enc = torch.arange(start=0,", "= self.copy_generator(decoder_outputs, attn_copy, params['src_map']) prediction = prediction.squeeze(1) for b in", "word_rep = self.dropout(word_rep) return word_rep class Encoder(nn.Module): def __init__(self, args,", "self.generator) self.criterion = CopyGeneratorCriterion(vocab_size=len(tgt_dict), force_copy=args.force_copy) else: self.criterion = nn.CrossEntropyLoss(reduction='none') def", "def __init__(self, args, input_size): super(Encoder, self).__init__() self.transformer = TransformerEncoder(num_layers=args.nlayers, d_model=input_size,", "self.copy_generator(decoder_outputs, attn_copy, params['src_map']) prediction = prediction.squeeze(1) for b in range(prediction.size(0)):", "pos_enc.cuda() pos_rep = self.tgt_pos_embeddings(pos_enc) word_rep = word_rep + pos_rep else:", "= kwargs['max_len'] params['src_words'] = code_word_rep dec_preds, attentions, copy_info, _ =", "self.tgt_pos_emb: if step is None: pos_enc = torch.arange(start=0, end=word_rep.size(1)).type(torch.LongTensor) else:", "self.transformer = TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, coverage_attn=args.coverage_attn,", "else: return self.decode(code_word_rep, code_char_rep, code_type_rep, code_len, src_map, alignment, **kwargs) def", "None: pos_enc = torch.arange(start=0, end=word_rep.size(1)).type(torch.LongTensor) else: pos_enc = torch.LongTensor([step]) #", "+ pos_rep else: raise ValueError('Unknown embedder mode!') word_rep = self.dropout(word_rep)", "def forward(self, code_word_rep, code_char_rep, code_type_rep, code_len, summ_word_rep, summ_char_rep, summ_len, tgt_seq,", "self.copy_attn(decoder_outputs, memory_bank, memory_lengths=code_len, softmax_weights=False) # mask copy_attn weights here if", "= layer_wise_dec_out[-1] loss = dict() target = tgt_seq[:, 1:].contiguous() if", "= torch.log(tgt_prob + 1e-20) elif choice == 'sample': tgt, log_prob", "and encode the source sequence code_rep = self.embedder(code_word_rep, code_char_rep, code_type_rep,", "+ 1e-20) elif choice == 'sample': tgt, log_prob = self.reinforce.sample(prediction.unsqueeze(1))", "and args.copy_attn if self.split_decoder: # Following (https://arxiv.org/pdf/1808.07913.pdf), we split decoder", "as f from prettytable import PrettyTable from c2nl.modules.char_embedding import CharEmbedding", "class Embedder(nn.Module): def __init__(self, args): super(Embedder, self).__init__() self.enc_input_size = 0", "self.criterion(scores, alignment[:, 1:].contiguous(), target) else: scores = self.generator(decoder_outputs) # `batch", "self.dec_input_size = 0 # at least one of word or", "return self.decode(tgt_pad_mask, tgt_emb, memory_bank, state) class Transformer(nn.Module): \"\"\"Module that writes", "tgt_words.data.eq(constants.PAD) layer_wise_dec_out, attns = self.decoder.decode(tgt_pad_mask, tgt, enc_outputs, dec_states, step=idx, layer_wise_coverage=attns['coverage'])", "pos_enc.expand(*word_rep.size()[:-1]) if word_rep.is_cuda: pos_enc = pos_enc.cuda() pos_rep = self.tgt_pos_embeddings(pos_enc) word_rep", "args.use_all_enc_layers if self.use_all_enc_layers: self.layer_weights = nn.Linear(input_size, 1, bias=False) def count_parameters(self):", "dec_log_probs def decode(self, code_word_rep, code_char_rep, code_type_rep, code_len, src_map, alignment, **kwargs):", "max_que_len, max_word_len)`` - summ_len: ``(batch_size)`` - tgt_seq: ``(batch_size, max_len)`` Output:", "return dec_preds, attentions, copy_info, dec_log_probs def decode(self, code_word_rep, code_char_rep, code_type_rep,", "mode!') word_rep = self.dropout(word_rep) return word_rep class Encoder(nn.Module): def __init__(self,", "nn.Embedding(args.max_src_len, self.enc_input_size) if self.tgt_pos_emb: self.tgt_pos_embeddings = nn.Embedding(args.max_tgt_len + 2, self.dec_input_size)", "P x d+f word_rep = self.src_highway_net(word_rep) # B x P", "B x seq_len x h # embed and encode the", "nn.Linear(self.input_size * 2, self.input_size), nn.Sigmoid() ) self.fusion_gate = nn.Sequential( nn.Linear(self.input_size", "a passage.\"\"\" def __init__(self, args, tgt_dict): \"\"\"\"Constructor of the class.\"\"\"", "**kwargs): word_rep = self.embedder(code_word_rep, code_char_rep, code_type_rep, mode='encoder') memory_bank, layer_wise_outputs =", "PrettyTable from c2nl.modules.char_embedding import CharEmbedding from c2nl.modules.embeddings import Embeddings from", "1 x vocab_size` ml_loss = self.criterion(scores.view(-1, scores.size(2)), target.view(-1)) ml_loss =", "mode == 'encoder': word_rep = None if self.use_src_word: word_rep =", "+ self.transformer_d.count_parameters() else: return self.transformer.count_parameters() def init_decoder(self, src_lens, max_src_len): if", "acc_dec_outs = [] max_mem_len = params['memory_bank'][0].shape[1] \\ if isinstance(params['memory_bank'], list)", "else params['memory_bank'].shape[1] dec_states = self.decoder.init_decoder(params['src_len'], max_mem_len) attns = {\"coverage\": None}", "words.append(tgt_dict[widx]) else: widx = widx - len(tgt_dict) words.append(src_vocabs[idx][widx]) return words", "self.use_src_word = args.use_src_word self.use_tgt_word = args.use_tgt_word if self.use_src_word: self.src_word_embeddings =", "if isinstance(memory_bank, list) else memory_bank.shape[1] state = self.init_decoder(memory_len, max_mem_len) return", "return words def __generate_sequence(self, params, choice='greedy', tgt_words=None): batch_size = params['memory_bank'].size(0)", "tgt_seq, src_map, alignment, **kwargs) else: return self.decode(code_word_rep, code_char_rep, code_type_rep, code_len,", "dropout=args.trans_drop ) # To accomplish eq. 19 - 21 from", "params, choice='greedy', tgt_words=None): batch_size = params['memory_bank'].size(0) use_cuda = params['memory_bank'].is_cuda if", "dec_out)], dim=-1) decoder_outputs = self.fusion_gate(gate_input) else: decoder_outputs, attns = self.transformer(tgt_words,", "= args.use_src_word self.use_tgt_word = args.use_tgt_word if self.use_src_word: self.src_word_embeddings = Embeddings(args.emsize,", "tgt_chars = [params['tgt_dict'].word_to_char_ids(w).tolist() for w in words] tgt_chars = torch.Tensor(tgt_chars).to(tgt).unsqueeze(1)", "self.transformer_d.init_state(src_lens, max_src_len) return state_c, state_d else: return self.transformer.init_state(src_lens, max_src_len) def", "# used in inference time pos_enc = pos_enc.expand(*word_rep.size()[:-1]) if word_rep.is_cuda:", "= [] copy_info = [] attentions = [] dec_log_probs =", "at least one of word or char embedding options should", "word_rep = word_rep + pos_rep elif mode == 'decoder': word_rep", "acc_dec_outs.append(decoder_outputs.squeeze(1)) if self._copy: _, copy_score, _ = self.copy_attn(decoder_outputs, params['memory_bank'], memory_lengths=params['src_len'],", "= self.criterion(scores, alignment[:, 1:].contiguous(), target) else: scores = self.generator(decoder_outputs) #", "= nn.Linear(self.decoder.input_size, args.tgt_vocab_size) if args.share_decoder_embeddings: if self.embedder.use_tgt_word: assert args.emsize ==", "target.view(-1)) ml_loss = ml_loss.view(*scores.size()[:-1]) ml_loss = ml_loss.mul(target.ne(constants.PAD).float()) ml_loss = ml_loss.sum(1)", "memory_lengths=params['src_len'], softmax_weights=False) # mask copy_attn weights here if needed if", "the class.\"\"\" super(Transformer, self).__init__() self.name = 'Transformer' if len(args.max_relative_pos) !=", "scores = self.generator(decoder_outputs) # `batch x tgt_len x vocab_size` scores", "step=idx) tgt_pad_mask = tgt_words.data.eq(constants.PAD) layer_wise_dec_out, attns = self.decoder.decode(tgt_pad_mask, tgt, enc_outputs,", "= [] for idx, w in enumerate(t): widx = w[0].item()", "args.nfilters) self.dec_input_size += sum(list(map(int, args.nfilters))) self.tgt_highway_net = Highway(self.dec_input_size, num_layers=2) self.use_type", "self.use_type: type_rep = self.type_embeddings(sequence_type) word_rep = word_rep + type_rep if", "\"r\" table.align[\"Param #\"] = \"r\" for name, parameters in self.named_parameters():", "if self.embedder.use_tgt_word: assert args.emsize == self.decoder.input_size self.generator.weight = self.embedder.tgt_word_embeddings.word_lut.weight self._copy", "src_map, alignment, **kwargs): \"\"\" Input: - code_word_rep: ``(batch_size, max_doc_len)`` -", "of word or char embedding options should be True assert", "self.tgt_pos_emb: self.tgt_pos_embeddings = nn.Embedding(args.max_tgt_len + 2, self.dec_input_size) self.dropout = nn.Dropout(args.dropout_emb)", "params['memory_bank'].size(0) use_cuda = params['memory_bank'].is_cuda if tgt_words is None: tgt_words =", "**kwargs): batch_size = code_len.size(0) # embed and encode the source", "if use_cuda: tgt_words = tgt_words.cuda() tgt_words = tgt_words.expand(batch_size).unsqueeze(1) # B", "= self.embedder(code_word_rep, code_char_rep, code_type_rep, mode='encoder') memory_bank, layer_wise_outputs = self.encoder(code_rep, code_len)", "None: word_rep = char_rep else: word_rep = torch.cat((word_rep, char_rep), 2)", "layer_scores = f.softmax(layer_scores, dim=-1) memory_bank = torch.matmul(output.transpose(2, 3), layer_scores.unsqueeze(3)).squeeze(3) else:", "= kwargs['code_mask_rep'].byte().unsqueeze(1) # Make it broadcastable. copy_score.data.masked_fill_(mask, -float('inf')) attn_copy =", "= self.generator(decoder_outputs) # `batch x tgt_len x vocab_size` scores =", "= {\"coverage\": None} enc_outputs = params['layer_wise_outputs'] if self.layer_wise_attn \\ else", "char_rep = self.tgt_char_embeddings(sequence_char) # B x P x f if", "== 1 args.max_relative_pos = args.max_relative_pos * args.nlayers self.embedder = Embedder(args)", "= char_rep else: word_rep = torch.cat((word_rep, char_rep), 2) # B", "state, step=None, layer_wise_coverage=None): if self.split_decoder: copier_out, attns = self.transformer_c(tgt_words, tgt_emb,", "copy_score.data.masked_fill_(mask, -float('inf')) attn_copy = f.softmax(copy_score, dim=-1) prediction = self.copy_generator(decoder_outputs, attn_copy,", "def __init__(self, args, tgt_dict): \"\"\"\"Constructor of the class.\"\"\" super(Transformer, self).__init__()", "words] words = torch.Tensor(words).type_as(tgt) tgt_words = words.unsqueeze(1) return dec_preds, attentions,", "self.src_word_embeddings(sequence.unsqueeze(2)) # B x P x d if self.use_src_char: char_rep", "max_src_len) return state_c, state_d else: return self.transformer.init_state(src_lens, max_src_len) def decode(self,", "name, parameters in self.named_parameters(): if parameters.requires_grad: table.add_row([name, str(list(parameters.shape)), parameters.numel()]) return", "decoder_outputs, attns = self.transformer(tgt_words, tgt_emb, memory_bank, state, step=step, layer_wise_coverage=layer_wise_coverage) return", "= None if self.use_tgt_word: word_rep = self.tgt_word_embeddings(sequence.unsqueeze(2)) # B x", "Decoder(nn.Module): def __init__(self, args, input_size): super(Decoder, self).__init__() self.input_size = input_size", "tgt_emb, memory_bank, state[1], step=step) f_t = self.fusion_sigmoid(torch.cat([copier_out, dec_out], dim=-1)) gate_input", "1) tgt_chars = tgt_chars.to(tgt_words).unsqueeze(1) dec_preds = [] copy_info = []", "= nn.Embedding(args.max_tgt_len + 2, self.dec_input_size) self.dropout = nn.Dropout(args.dropout_emb) def forward(self,", "args.src_vocab_size, constants.PAD) self.enc_input_size += args.emsize if self.use_tgt_word: self.tgt_word_embeddings = Embeddings(args.emsize,", "self._copy: _, copy_score, _ = self.copy_attn(decoder_outputs, params['memory_bank'], memory_lengths=params['src_len'], softmax_weights=False) #", "word_rep = self.src_word_embeddings(sequence.unsqueeze(2)) # B x P x d if", "d if self.use_tgt_char: char_rep = self.tgt_char_embeddings(sequence_char) # B x P", "x src_len attentions = torch.stack(attentions, dim=1) if attentions else None", "else None return { 'predictions': dec_preds, 'copy_info': copy_info, 'memory_bank': memory_bank,", "self.criterion = CopyGeneratorCriterion(vocab_size=len(tgt_dict), force_copy=args.force_copy) else: self.criterion = nn.CrossEntropyLoss(reduction='none') def _run_forward_ml(self,", "Shape\", \"Param #\"] table.align[\"Layer Name\"] = \"l\" table.align[\"Output Shape\"] =", "copy_info, _ = self.__generate_sequence(params, choice='greedy') dec_preds = torch.stack(dec_preds, dim=1) copy_info", "= Embeddings(args.emsize, args.tgt_vocab_size, constants.PAD) self.dec_input_size += args.emsize self.use_src_char = args.use_src_char", "prediction = prediction.squeeze(1) for b in range(prediction.size(0)): if params['blank'][b]: blank_b", "torch.Tensor(tgt_chars).to(tgt).unsqueeze(1) words = [params['tgt_dict'][w] for w in words] words =", "from `https://arxiv.org/pdf/1808.07913.pdf` self.fusion_sigmoid = nn.Sequential( nn.Linear(self.input_size * 2, self.input_size), nn.Sigmoid()", "prediction.squeeze(1) for b in range(prediction.size(0)): if params['blank'][b]: blank_b = torch.LongTensor(params['blank'][b])", "max_src_len): if self.split_decoder: state_c = self.transformer_c.init_state(src_lens, max_src_len) state_d = self.transformer_d.init_state(src_lens,", "= tgt_chars.repeat(batch_size, 1) tgt_chars = tgt_chars.to(tgt_words).unsqueeze(1) dec_preds = [] copy_info", "code_char_rep, code_type_rep, code_len, src_map, alignment, **kwargs): word_rep = self.embedder(code_word_rep, code_char_rep,", "args.emsize if self.use_tgt_word: self.tgt_word_embeddings = Embeddings(args.emsize, args.tgt_vocab_size, constants.PAD) self.dec_input_size +=", "Make it broadcastable. copy_score.data.masked_fill_(mask, -float('inf')) attn_copy = f.softmax(copy_score, dim=-1) prediction", "- tgt_seq: ``(batch_size, max_len)`` Output: - ``(batch_size, P_LEN)``, ``(batch_size, P_LEN)``", "self.enc_input_size) self.src_pos_emb = args.src_pos_emb self.tgt_pos_emb = args.tgt_pos_emb self.no_relative_pos = all(v", "code_rep = self.embedder(code_word_rep, code_char_rep, code_type_rep, mode='encoder') memory_bank, layer_wise_outputs = self.encoder(code_rep,", "B x P x d+f word_rep = self.src_highway_net(word_rep) # B", "memory_bank, 'attentions': attentions } def count_parameters(self): return sum(p.numel() for p", "self.embedder(code_word_rep, code_char_rep, code_type_rep, mode='encoder') memory_bank, layer_wise_outputs = self.encoder(word_rep, code_len) #", "word_rep class Encoder(nn.Module): def __init__(self, args, input_size): super(Encoder, self).__init__() self.transformer", "torch.matmul(output.transpose(2, 3), layer_scores.unsqueeze(3)).squeeze(3) else: memory_bank = layer_outputs[-1] return memory_bank, layer_outputs", "batch_size x tgt_len x num_heads x src_len attentions = torch.stack(attentions,", "ValueError('Unknown embedder mode!') word_rep = self.dropout(word_rep) return word_rep class Encoder(nn.Module):", "if self.use_all_enc_layers: output = torch.stack(layer_outputs, dim=2) # B x seq_len", "self.training: return self._run_forward_ml(code_word_rep, code_char_rep, code_type_rep, code_len, summ_word_rep, summ_char_rep, summ_len, tgt_seq,", "summ_word_rep, summ_char_rep, summ_len, tgt_seq, src_map, alignment, **kwargs): batch_size = code_len.size(0)", "== 0 for v in args.max_relative_pos) if self.src_pos_emb and self.no_relative_pos:", "tgt_prob, tgt = torch.max(prediction, dim=1, keepdim=True) log_prob = torch.log(tgt_prob +", "# B x P x f if word_rep is None:", "= self.copy_generator(decoder_outputs, attn_copy, src_map) scores = scores[:, :-1, :].contiguous() ml_loss", "scores[:, :-1, :].contiguous() # `batch x tgt_len - 1 x", "step=idx, layer_wise_coverage=attns['coverage']) decoder_outputs = layer_wise_dec_out[-1] acc_dec_outs.append(decoder_outputs.squeeze(1)) if self._copy: _, copy_score,", "nn import torch.nn.functional as f from prettytable import PrettyTable from", "if kwargs['code_mask_rep'] is not None: mask = kwargs['code_mask_rep'].byte().unsqueeze(1) # Make", "self.transformer(input, input_len) # B x seq_len x h if self.use_all_enc_layers:", "assert args.emsize == self.decoder.input_size self.generator.weight = self.embedder.tgt_word_embeddings.word_lut.weight self._copy = args.copy_attn", "code_word_rep: ``(batch_size, max_doc_len)`` - code_char_rep: ``(batch_size, max_doc_len, max_word_len)`` - code_len:", "params['memory_bank'][0].shape[1] \\ if isinstance(params['memory_bank'], list) else params['memory_bank'].shape[1] dec_states = self.decoder.init_decoder(params['src_len'],", "d_ff=args.d_ff, dropout=args.trans_drop, max_relative_positions=args.max_relative_pos, use_neg_dist=args.use_neg_dist) self.use_all_enc_layers = args.use_all_enc_layers if self.use_all_enc_layers: self.layer_weights", "pos_enc = pos_enc.cuda() pos_rep = self.tgt_pos_embeddings(pos_enc) word_rep = word_rep +", "tgt_seq[:, 1:].contiguous() if self._copy: # copy_score: batch_size, tgt_len, src_len _,", "[] copy_info = [] attentions = [] dec_log_probs = []", "= self.__tens2sent(tgt, params['tgt_dict'], params['source_vocab']) tgt_chars = None if self.embedder.use_tgt_char: tgt_chars", "dim=1) attentions.append(std_attn.squeeze(2)) if self._copy: mask = tgt.gt(len(params['tgt_dict']) - 1) copy_info.append(mask.float().squeeze(1))", "= self.src_char_embeddings(sequence_char) # B x P x f if word_rep", "max_len)`` Output: - ``(batch_size, P_LEN)``, ``(batch_size, P_LEN)`` \"\"\" if self.training:", "fill_b = fill_b.cuda() prediction[b].index_add_(0, fill_b, prediction[b].index_select(0, blank_b)) prediction[b].index_fill_(0, blank_b, 1e-10)", "params['src_mask'] = kwargs['code_mask_rep'] params['fill'] = kwargs['fill'] params['blank'] = kwargs['blank'] params['src_dict']", "= pos_enc.cuda() pos_rep = self.tgt_pos_embeddings(pos_enc) word_rep = word_rep + pos_rep", "f.softmax(prediction, dim=1) if choice == 'greedy': tgt_prob, tgt = torch.max(prediction,", "args, tgt_dict): \"\"\"\"Constructor of the class.\"\"\" super(Transformer, self).__init__() self.name =", "src_map, alignment, **kwargs) def __tens2sent(self, t, tgt_dict, src_vocabs): words =", "kwargs['code_mask_rep'].byte().unsqueeze(1) # Make it broadcastable. copy_score.data.masked_fill_(mask, -float('inf')) attn_copy = f.softmax(copy_score,", "is None: tgt_words = torch.LongTensor([constants.BOS]) if use_cuda: tgt_words = tgt_words.cuda()", "kwargs['tgt_dict'] params['max_len'] = kwargs['max_len'] params['src_words'] = code_word_rep dec_preds, attentions, copy_info,", "word_rep + pos_rep elif mode == 'decoder': word_rep = None", "decoder_outputs = self.fusion_gate(gate_input) else: decoder_outputs, attns = self.transformer(tgt_words, tgt_emb, memory_bank,", "c2nl.modules.global_attention import GlobalAttention from c2nl.modules.copy_generator import CopyGenerator, CopyGeneratorCriterion from c2nl.utils.misc", "code_char_rep, code_type_rep, mode='encoder') memory_bank, layer_wise_outputs = self.encoder(code_rep, code_len) # B", "!= args.nlayers: assert len(args.max_relative_pos) == 1 args.max_relative_pos = args.max_relative_pos *", "x h layer_scores = self.layer_weights(output).squeeze(3) layer_scores = f.softmax(layer_scores, dim=-1) memory_bank", "args.tgt_pos_emb self.no_relative_pos = all(v == 0 for v in args.max_relative_pos)", "tgt_chars = params['tgt_dict'].word_to_char_ids(constants.BOS_WORD) tgt_chars = torch.Tensor(tgt_chars.tolist()).unsqueeze(0) tgt_chars = tgt_chars.repeat(batch_size, 1)", "args.max_relative_pos * args.nlayers self.embedder = Embedder(args) self.encoder = Encoder(args, self.embedder.enc_input_size)", "d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, coverage_attn=args.coverage_attn, dropout=args.trans_drop ) if args.reload_decoder_state: state_dict =", "seq_len x h params = dict() params['memory_bank'] = memory_bank params['layer_wise_outputs']", "dropout=args.trans_drop ) if args.reload_decoder_state: state_dict = torch.load( args.reload_decoder_state, map_location=lambda storage,", "def count_decoder_parameters(self): return self.decoder.count_parameters() def layer_wise_parameters(self): table = PrettyTable() table.field_names", "count_parameters(self): if self.split_decoder: return self.transformer_c.count_parameters() + self.transformer_d.count_parameters() else: return self.transformer.count_parameters()", "len(args.max_relative_pos) != args.nlayers: assert len(args.max_relative_pos) == 1 args.max_relative_pos = args.max_relative_pos", "src_len std_attn = torch.stack(attns[\"std\"], dim=1) attentions.append(std_attn.squeeze(2)) if self._copy: mask =", "= kwargs['source_vocab'] params['src_map'] = src_map params['src_mask'] = kwargs['code_mask_rep'] params['fill'] =", "max_src_len) def decode(self, tgt_words, tgt_emb, memory_bank, state, step=None, layer_wise_coverage=None): if", "tgt_dict): \"\"\"\"Constructor of the class.\"\"\" super(Transformer, self).__init__() self.name = 'Transformer'", "batch_size = params['memory_bank'].size(0) use_cuda = params['memory_bank'].is_cuda if tgt_words is None:", "x P x d+f if self.tgt_pos_emb: if step is None:", "from c2nl.decoders.transformer import TransformerDecoder from c2nl.inputters import constants from c2nl.modules.global_attention", "src_map, alignment, **kwargs): batch_size = code_len.size(0) # embed and encode", "P x f if word_rep is None: word_rep = char_rep", "torch.Tensor(tgt_chars.tolist()).unsqueeze(0) tgt_chars = tgt_chars.repeat(batch_size, 1) tgt_chars = tgt_chars.to(tgt_words).unsqueeze(1) dec_preds =", "dim=1) copy_info = torch.stack(copy_info, dim=1) if copy_info else None #", "tgt, log_prob = self.reinforce.sample(prediction.unsqueeze(1)) else: assert False dec_log_probs.append(log_prob.squeeze(1)) dec_preds.append(tgt.squeeze(1).clone()) if", ") else: self.transformer = TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v,", ":-1, :].contiguous() # `batch x tgt_len - 1 x vocab_size`", "prediction = self.generator(decoder_outputs.squeeze(1)) prediction = f.softmax(prediction, dim=1) if choice ==", "d_v=args.d_v, d_ff=args.d_ff, dropout=args.trans_drop, max_relative_positions=args.max_relative_pos, use_neg_dist=args.use_neg_dist) self.use_all_enc_layers = args.use_all_enc_layers if self.use_all_enc_layers:", "memory_bank, state[1], step=step) f_t = self.fusion_sigmoid(torch.cat([copier_out, dec_out], dim=-1)) gate_input =", "self.no_relative_pos: self.src_pos_embeddings = nn.Embedding(args.max_src_len, self.enc_input_size) if self.tgt_pos_emb: self.tgt_pos_embeddings = nn.Embedding(args.max_tgt_len", "x 1 tgt_chars = None if self.embedder.use_tgt_char: tgt_chars = params['tgt_dict'].word_to_char_ids(constants.BOS_WORD)", "self.decoder.init_decoder(params['src_len'], max_mem_len) attns = {\"coverage\": None} enc_outputs = params['layer_wise_outputs'] if", "else params['memory_bank'] # +1 for <EOS> token for idx in", "= self.fusion_sigmoid(torch.cat([copier_out, dec_out], dim=-1)) gate_input = torch.cat([copier_out, torch.mul(f_t, dec_out)], dim=-1)", "-float('inf')) attn_copy = f.softmax(copy_score, dim=-1) scores = self.copy_generator(decoder_outputs, attn_copy, src_map)", "h # embed and encode the target sequence summ_emb =", "least one of word or char embedding options should be", "if self.use_src_char: char_rep = self.src_char_embeddings(sequence_char) # B x P x", "= torch.LongTensor(params['fill'][b]) if use_cuda: blank_b = blank_b.cuda() fill_b = fill_b.cuda()", "+= args.emsize self.use_src_char = args.use_src_char self.use_tgt_char = args.use_tgt_char if self.use_src_char:", "blank_b, 1e-10) else: prediction = self.generator(decoder_outputs.squeeze(1)) prediction = f.softmax(prediction, dim=1)", "pos_enc = pos_enc.expand(*word_rep.size()[:-1]) if word_rep.is_cuda: pos_enc = pos_enc.cuda() pos_rep =", "self.use_tgt_char: assert len(args.filter_size) == len(args.nfilters) self.tgt_char_embeddings = CharEmbedding(args.n_characters, args.char_emsize, args.filter_size,", "attentions, copy_info, dec_log_probs def decode(self, code_word_rep, code_char_rep, code_type_rep, code_len, src_map,", "code_char_rep, code_type_rep, code_len, src_map, alignment, **kwargs) def __tens2sent(self, t, tgt_dict,", "all(v == 0 for v in args.max_relative_pos) if self.src_pos_emb and", "= ~sequence_mask(summ_len, max_len=summ_emb.size(1)) enc_outputs = layer_wise_outputs if self.layer_wise_attn else memory_bank", "if p.requires_grad) def count_encoder_parameters(self): return self.encoder.count_parameters() def count_decoder_parameters(self): return self.decoder.count_parameters()", "self.enc_input_size) if self.tgt_pos_emb: self.tgt_pos_embeddings = nn.Embedding(args.max_tgt_len + 2, self.dec_input_size) self.dropout", "options should be True assert args.use_src_word or args.use_src_char assert args.use_tgt_word", "idx, w in enumerate(t): widx = w[0].item() if widx <", "summ_word_rep, summ_char_rep, summ_len, tgt_seq, src_map, alignment, **kwargs) else: return self.decode(code_word_rep,", "if self.tgt_pos_emb: if step is None: pos_enc = torch.arange(start=0, end=word_rep.size(1)).type(torch.LongTensor)", "self.decoder.count_parameters() def layer_wise_parameters(self): table = PrettyTable() table.field_names = [\"Layer Name\",", "args.use_tgt_char if self.use_src_char: assert len(args.filter_size) == len(args.nfilters) self.src_char_embeddings = CharEmbedding(args.n_characters,", "self.transformer.count_parameters() def init_decoder(self, src_lens, max_src_len): if self.split_decoder: state_c = self.transformer_c.init_state(src_lens,", "args.tgt_vocab_size) if args.share_decoder_embeddings: if self.embedder.use_tgt_word: assert args.emsize == self.decoder.input_size self.generator.weight", "tgt_dict, self.generator) self.criterion = CopyGeneratorCriterion(vocab_size=len(tgt_dict), force_copy=args.force_copy) else: self.criterion = nn.CrossEntropyLoss(reduction='none')", "= self.src_highway_net(word_rep) # B x P x d+f if self.use_type:", "self.decode(code_word_rep, code_char_rep, code_type_rep, code_len, src_map, alignment, **kwargs) def __tens2sent(self, t,", "attn_copy = f.softmax(copy_score, dim=-1) prediction = self.copy_generator(decoder_outputs, attn_copy, params['src_map']) prediction", "= Encoder(args, self.embedder.enc_input_size) self.decoder = Decoder(args, self.embedder.dec_input_size) self.layer_wise_attn = args.layer_wise_attn", "def __tens2sent(self, t, tgt_dict, src_vocabs): words = [] for idx,", "else: return self.transformer.count_parameters() def init_decoder(self, src_lens, max_src_len): if self.split_decoder: state_c", "# `batch x tgt_len x vocab_size` scores = scores[:, :-1,", "args.max_relative_pos = args.max_relative_pos * args.nlayers self.embedder = Embedder(args) self.encoder =", "mask = tgt.gt(len(params['tgt_dict']) - 1) copy_info.append(mask.float().squeeze(1)) words = self.__tens2sent(tgt, params['tgt_dict'],", "= self.encoder(code_rep, code_len) # B x seq_len x h #", "- len(tgt_dict) words.append(src_vocabs[idx][widx]) return words def __generate_sequence(self, params, choice='greedy', tgt_words=None):", "word_rep = word_rep + type_rep if self.src_pos_emb and self.no_relative_pos: pos_enc", "= self.dropout(word_rep) return word_rep class Encoder(nn.Module): def __init__(self, args, input_size):", "w in words] words = torch.Tensor(words).type_as(tgt) tgt_words = words.unsqueeze(1) return", "v in args.max_relative_pos) if self.src_pos_emb and self.no_relative_pos: self.src_pos_embeddings = nn.Embedding(args.max_src_len,", "self.reinforce.sample(prediction.unsqueeze(1)) else: assert False dec_log_probs.append(log_prob.squeeze(1)) dec_preds.append(tgt.squeeze(1).clone()) if \"std\" in attns:", "coverage_attn=args.coverage_attn, dropout=args.trans_drop ) self.transformer_d = TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head, d_k=args.d_k,", "x d if self.use_src_char: char_rep = self.src_char_embeddings(sequence_char) # B x", "layer_outputs class Decoder(nn.Module): def __init__(self, args, input_size): super(Decoder, self).__init__() self.input_size", "scores[:, :-1, :].contiguous() ml_loss = self.criterion(scores, alignment[:, 1:].contiguous(), target) else:", "args.use_src_char self.use_tgt_char = args.use_tgt_char if self.use_src_char: assert len(args.filter_size) == len(args.nfilters)", "tgt_chars = tgt_chars.to(tgt_words).unsqueeze(1) dec_preds = [] copy_info = [] attentions", "+ type_rep if self.src_pos_emb and self.no_relative_pos: pos_enc = torch.arange(start=0, end=word_rep.size(1)).type(torch.LongTensor)", "torch.stack(dec_preds, dim=1) copy_info = torch.stack(copy_info, dim=1) if copy_info else None", "else None # attentions: batch_size x tgt_len x num_heads x", "self.use_src_char: char_rep = self.src_char_embeddings(sequence_char) # B x P x f", "nlayers x h layer_scores = self.layer_weights(output).squeeze(3) layer_scores = f.softmax(layer_scores, dim=-1)", "self.embedder.dec_input_size) self.layer_wise_attn = args.layer_wise_attn self.generator = nn.Linear(self.decoder.input_size, args.tgt_vocab_size) if args.share_decoder_embeddings:", "type_rep if self.src_pos_emb and self.no_relative_pos: pos_enc = torch.arange(start=0, end=word_rep.size(1)).type(torch.LongTensor) pos_enc", "self.use_tgt_word: word_rep = self.tgt_word_embeddings(sequence.unsqueeze(2)) # B x P x d", "= torch.stack(dec_preds, dim=1) copy_info = torch.stack(copy_info, dim=1) if copy_info else", "def forward(self, sequence, sequence_char, sequence_type=None, mode='encoder', step=None): if mode ==", "self._copy = args.copy_attn if self._copy: self.copy_attn = GlobalAttention(dim=self.decoder.input_size, attn_type=args.attn_type) self.copy_generator", "code_len: ``(batch_size)`` - summ_word_rep: ``(batch_size, max_que_len)`` - summ_char_rep: ``(batch_size, max_que_len,", "prediction[b].index_fill_(0, blank_b, 1e-10) else: prediction = self.generator(decoder_outputs.squeeze(1)) prediction = f.softmax(prediction,", "self.dec_input_size += sum(list(map(int, args.nfilters))) self.tgt_highway_net = Highway(self.dec_input_size, num_layers=2) self.use_type =", "P x d if self.use_src_char: char_rep = self.src_char_embeddings(sequence_char) # B", "layer_wise_dec_out, attns = self.decoder.decode(tgt_pad_mask, tgt, enc_outputs, dec_states, step=idx, layer_wise_coverage=attns['coverage']) decoder_outputs", "= torch.stack(attentions, dim=1) if attentions else None return { 'predictions':", "= args.layer_wise_attn self.generator = nn.Linear(self.decoder.input_size, args.tgt_vocab_size) if args.share_decoder_embeddings: if self.embedder.use_tgt_word:", "self.transformer_d = TransformerDecoder( num_layers=args.nlayers, d_model=self.input_size, heads=args.num_head, d_k=args.d_k, d_v=args.d_v, d_ff=args.d_ff, dropout=args.trans_drop", "summ_char_rep, summ_len, tgt_seq, src_map, alignment, **kwargs): \"\"\" Input: - code_word_rep:", "code_len) # B x seq_len x h params = dict()", "== 'encoder': word_rep = None if self.use_src_word: word_rep = self.src_word_embeddings(sequence.unsqueeze(2))", "widx = widx - len(tgt_dict) words.append(src_vocabs[idx][widx]) return words def __generate_sequence(self,", "self.transformer(tgt_words, tgt_emb, memory_bank, state, step=step, layer_wise_coverage=layer_wise_coverage) return decoder_outputs, attns def", "sequence code_rep = self.embedder(code_word_rep, code_char_rep, code_type_rep, mode='encoder') memory_bank, layer_wise_outputs =", "decoder_outputs = layer_wise_dec_out[-1] loss = dict() target = tgt_seq[:, 1:].contiguous()", "attns: # std_attn: batch_size x num_heads x 1 x src_len", "B x P x d+f if self.tgt_pos_emb: if step is", "self.embedder(summ_word_rep, summ_char_rep, mode='decoder') summ_pad_mask = ~sequence_mask(summ_len, max_len=summ_emb.size(1)) enc_outputs = layer_wise_outputs", "code_type_rep, code_len, src_map, alignment, **kwargs): word_rep = self.embedder(code_word_rep, code_char_rep, code_type_rep,", "B x P x d+f if self.use_type: type_rep = self.type_embeddings(sequence_type)", "choice='greedy') dec_preds = torch.stack(dec_preds, dim=1) copy_info = torch.stack(copy_info, dim=1) if", "self.src_char_embeddings = CharEmbedding(args.n_characters, args.char_emsize, args.filter_size, args.nfilters) self.enc_input_size += sum(list(map(int, args.nfilters)))", "= nn.Sequential( nn.Linear(self.input_size * 2, self.input_size), nn.ReLU() ) else: self.transformer", "# std_attn: batch_size x num_heads x 1 x src_len std_attn", "# B x P x d if self.use_tgt_char: char_rep =", "memory_bank, memory_len, tgt_pad_mask, tgt_emb): max_mem_len = memory_bank[0].shape[1] \\ if isinstance(memory_bank,", "bias=False) def count_parameters(self): return self.transformer.count_parameters() def forward(self, input, input_len): layer_outputs,", "tgt_chars = tgt_chars.repeat(batch_size, 1) tgt_chars = tgt_chars.to(tgt_words).unsqueeze(1) dec_preds = []", "isinstance(memory_bank, list) else memory_bank.shape[1] state = self.init_decoder(memory_len, max_mem_len) return self.decode(tgt_pad_mask,", "1:].contiguous(), target) else: scores = self.generator(decoder_outputs) # `batch x tgt_len", "word_rep + pos_rep else: raise ValueError('Unknown embedder mode!') word_rep =", "state[1], step=step) f_t = self.fusion_sigmoid(torch.cat([copier_out, dec_out], dim=-1)) gate_input = torch.cat([copier_out,", "1e-20) elif choice == 'sample': tgt, log_prob = self.reinforce.sample(prediction.unsqueeze(1)) else:", "x P x d+f if self.use_type: type_rep = self.type_embeddings(sequence_type) word_rep", "1): tgt = self.embedder(tgt_words, tgt_chars, mode='decoder', step=idx) tgt_pad_mask = tgt_words.data.eq(constants.PAD)", "_ = self.transformer_d(tgt_words, tgt_emb, memory_bank, state[1], step=step) f_t = self.fusion_sigmoid(torch.cat([copier_out,", "max_mem_len) return self.decode(tgt_pad_mask, tgt_emb, memory_bank, state) class Transformer(nn.Module): \"\"\"Module that", "torch import torch.nn as nn import torch.nn.functional as f from", "count_parameters(self): return sum(p.numel() for p in self.parameters() if p.requires_grad) def", "if self.use_type: self.type_embeddings = nn.Embedding(len(constants.TOKEN_TYPE_MAP), self.enc_input_size) self.src_pos_emb = args.src_pos_emb self.tgt_pos_emb", "code_len, src_map, alignment, **kwargs) def __tens2sent(self, t, tgt_dict, src_vocabs): words", "passage.\"\"\" def __init__(self, args, tgt_dict): \"\"\"\"Constructor of the class.\"\"\" super(Transformer,", "used in inference time pos_enc = pos_enc.expand(*word_rep.size()[:-1]) if word_rep.is_cuda: pos_enc", "self.transformer_d.count_parameters() else: return self.transformer.count_parameters() def init_decoder(self, src_lens, max_src_len): if self.split_decoder:" ]
[ "env['CATTLE_CONFIG_URL'] = Config.config_url() for i in range(3): p = popen(cmd,", "or \\ instanceData.get('token') is None: return container = self.compute.get_container(docker_client(), instanceData,", "0: break # Sleep and try again if missing time.sleep(1)", "retcode, output, None text = [] for line in output.splitlines():", "exit_code, output, data = ns_exec(inspect['State']['Pid'], event) if exit_code == 0:", "from cattle.agent.handler import BaseHandler from cattle.progress import Progress from cattle.type_manager", "if not len(line): continue kv = line.split('=', 1) if kv[0].startswith('CATTLE'):", "''.join(text), data class DockerDelegate(BaseHandler): def __init__(self): self.compute = DockerCompute() pass", "retcode, ''.join(text), data class DockerDelegate(BaseHandler): def __init__(self): self.compute = DockerCompute()", "def events(self): return ['delegate.request'] def delegate_request(self, req=None, event=None, instanceData=None, **kw):", "running: log.error('Can not call [%s], container is not running', instanceData.uuid)", "container is not running', instanceData.uuid) return except KeyError: log.error('Can not", "return reply(event, data, parent=req) else: progress.update('Update failed', data={ 'exitCode': exit_code,", "popen(cmd, env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, error = p.communicate(input=input) retcode", "not call [%s], container is not running', instanceData.uuid) return progress", "'-p', '-t', str(pid), '--', script] marshaller = get_type(MARSHALLER) input =", "self.compute.get_container(docker_client(), instanceData, by_agent=True) if container is None: log.info('Can not call", "instanceData, by_agent=True) if container is None: log.info('Can not call [%s],", "retcode == 0: break exists_cmd = cmd[:-1] + ['/usr/bin/test', '-e',", "is None: return container = self.compute.get_container(docker_client(), instanceData, by_agent=True) if container", "if popen(exists_cmd, env=env).wait() == 0: break # Sleep and try", "text = [] for line in output.splitlines(): if line.startswith('{'): data", "if missing time.sleep(1) if retcode: return retcode, output, None text", "0: return reply(event, data, parent=req) else: progress.update('Update failed', data={ 'exitCode':", "exists_cmd = cmd[:-1] + ['/usr/bin/test', '-e', script] if popen(exists_cmd, env=env).wait()", "for line in f.read().split('\\0'): if not len(line): continue kv =", "= os.path.join(Config.home(), 'events', event.name.split(';')[0]) cmd = ['nsenter', '-F', '-m', '-u',", "parent=req) exit_code, output, data = ns_exec(inspect['State']['Pid'], event) if exit_code ==", "retcode = p.poll() if retcode == 0: break exists_cmd =", "p = popen(cmd, env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, error =", "MARSHALLER from . import docker_client import subprocess import os import", ". import docker_client import subprocess import os import time log", "['delegate.request'] def delegate_request(self, req=None, event=None, instanceData=None, **kw): if instanceData.kind !=", "'-t', str(pid), '--', script] marshaller = get_type(MARSHALLER) input = marshaller.to_string(event)", "instanceData.kind != 'container' or \\ instanceData.get('token') is None: return container", "event): script = os.path.join(Config.home(), 'events', event.name.split(';')[0]) cmd = ['nsenter', '-F',", "cmd = ['nsenter', '-F', '-m', '-u', '-i', '-n', '-p', '-t',", "and try again if missing time.sleep(1) if retcode: return retcode,", "None text = [] for line in output.splitlines(): if line.startswith('{'):", "[%s], container is not running', instanceData.uuid) return progress = Progress(event,", "running', instanceData.uuid) return except KeyError: log.error('Can not call [%s], container", "running', instanceData.uuid) return progress = Progress(event, parent=req) exit_code, output, data", "container is not running', instanceData.uuid) return progress = Progress(event, parent=req)", "data, parent=req) else: progress.update('Update failed', data={ 'exitCode': exit_code, 'output': output", "line.startswith('{'): data = marshaller.from_string(line) break text.append(line) return retcode, ''.join(text), data", "env=env).wait() == 0: break # Sleep and try again if", "not running', instanceData.uuid) return progress = Progress(event, parent=req) exit_code, output,", "as f: for line in f.read().split('\\0'): if not len(line): continue", "if retcode == 0: break exists_cmd = cmd[:-1] + ['/usr/bin/test',", "ns_exec(inspect['State']['Pid'], event) if exit_code == 0: return reply(event, data, parent=req)", "progress = Progress(event, parent=req) exit_code, output, data = ns_exec(inspect['State']['Pid'], event)", "from cattle.progress import Progress from cattle.type_manager import get_type, MARSHALLER from", "!= 'container' or \\ instanceData.get('token') is None: return container =", "if container is None: log.info('Can not call [%s], container does", "data = None env = {} with open('/proc/{}/environ'.format(pid)) as f:", "env = {} with open('/proc/{}/environ'.format(pid)) as f: for line in", "text.append(line) return retcode, ''.join(text), data class DockerDelegate(BaseHandler): def __init__(self): self.compute", "import os import time log = logging.getLogger('docker') def ns_exec(pid, event):", "class DockerDelegate(BaseHandler): def __init__(self): self.compute = DockerCompute() pass def events(self):", "import get_type, MARSHALLER from . import docker_client import subprocess import", "inspect = self.compute.inspect(container) try: running = inspect['State']['Running'] if not running:", "env[kv[0]] = kv[1] env['PATH'] = os.environ['PATH'] env['CATTLE_CONFIG_URL'] = Config.config_url() for", "stderr=subprocess.STDOUT) output, error = p.communicate(input=input) retcode = p.poll() if retcode", "import BaseHandler from cattle.progress import Progress from cattle.type_manager import get_type,", "kv[1] env['PATH'] = os.environ['PATH'] env['CATTLE_CONFIG_URL'] = Config.config_url() for i in", "import Config from cattle.utils import reply, popen from .compute import", "in f.read().split('\\0'): if not len(line): continue kv = line.split('=', 1)", "output, error = p.communicate(input=input) retcode = p.poll() if retcode ==", "= {} with open('/proc/{}/environ'.format(pid)) as f: for line in f.read().split('\\0'):", "def delegate_request(self, req=None, event=None, instanceData=None, **kw): if instanceData.kind != 'container'", "self.compute.inspect(container) try: running = inspect['State']['Running'] if not running: log.error('Can not", "not exists', instanceData.uuid) return inspect = self.compute.inspect(container) try: running =", "Sleep and try again if missing time.sleep(1) if retcode: return", "KeyError: log.error('Can not call [%s], container is not running', instanceData.uuid)", "if line.startswith('{'): data = marshaller.from_string(line) break text.append(line) return retcode, ''.join(text),", "= marshaller.from_string(line) break text.append(line) return retcode, ''.join(text), data class DockerDelegate(BaseHandler):", "['nsenter', '-F', '-m', '-u', '-i', '-n', '-p', '-t', str(pid), '--',", "= Config.config_url() for i in range(3): p = popen(cmd, env=env,", "log.info('Can not call [%s], container does not exists', instanceData.uuid) return", ".compute import DockerCompute from cattle.agent.handler import BaseHandler from cattle.progress import", "= DockerCompute() pass def events(self): return ['delegate.request'] def delegate_request(self, req=None,", "kv = line.split('=', 1) if kv[0].startswith('CATTLE'): env[kv[0]] = kv[1] env['PATH']", "stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, error = p.communicate(input=input) retcode = p.poll() if", "= marshaller.to_string(event) data = None env = {} with open('/proc/{}/environ'.format(pid))", "error = p.communicate(input=input) retcode = p.poll() if retcode == 0:", "f: for line in f.read().split('\\0'): if not len(line): continue kv", "script] marshaller = get_type(MARSHALLER) input = marshaller.to_string(event) data = None", "BaseHandler from cattle.progress import Progress from cattle.type_manager import get_type, MARSHALLER", "line.split('=', 1) if kv[0].startswith('CATTLE'): env[kv[0]] = kv[1] env['PATH'] = os.environ['PATH']", "= ['nsenter', '-F', '-m', '-u', '-i', '-n', '-p', '-t', str(pid),", "= p.communicate(input=input) retcode = p.poll() if retcode == 0: break", "Progress(event, parent=req) exit_code, output, data = ns_exec(inspect['State']['Pid'], event) if exit_code", "return progress = Progress(event, parent=req) exit_code, output, data = ns_exec(inspect['State']['Pid'],", "cmd[:-1] + ['/usr/bin/test', '-e', script] if popen(exists_cmd, env=env).wait() == 0:", "try again if missing time.sleep(1) if retcode: return retcode, output,", "missing time.sleep(1) if retcode: return retcode, output, None text =", "output.splitlines(): if line.startswith('{'): data = marshaller.from_string(line) break text.append(line) return retcode,", "kv[0].startswith('CATTLE'): env[kv[0]] = kv[1] env['PATH'] = os.environ['PATH'] env['CATTLE_CONFIG_URL'] = Config.config_url()", "popen from .compute import DockerCompute from cattle.agent.handler import BaseHandler from", "input = marshaller.to_string(event) data = None env = {} with", "event) if exit_code == 0: return reply(event, data, parent=req) else:", "again if missing time.sleep(1) if retcode: return retcode, output, None", "cattle.utils import reply, popen from .compute import DockerCompute from cattle.agent.handler", "{} with open('/proc/{}/environ'.format(pid)) as f: for line in f.read().split('\\0'): if", "None env = {} with open('/proc/{}/environ'.format(pid)) as f: for line", "return retcode, output, None text = [] for line in", "i in range(3): p = popen(cmd, env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)", "time.sleep(1) if retcode: return retcode, output, None text = []", "exit_code == 0: return reply(event, data, parent=req) else: progress.update('Update failed',", "call [%s], container is not running', instanceData.uuid) return except KeyError:", "0: break exists_cmd = cmd[:-1] + ['/usr/bin/test', '-e', script] if", "events(self): return ['delegate.request'] def delegate_request(self, req=None, event=None, instanceData=None, **kw): if", "container = self.compute.get_container(docker_client(), instanceData, by_agent=True) if container is None: log.info('Can", "instanceData.get('token') is None: return container = self.compute.get_container(docker_client(), instanceData, by_agent=True) if", "reply(event, data, parent=req) else: progress.update('Update failed', data={ 'exitCode': exit_code, 'output':", "'-F', '-m', '-u', '-i', '-n', '-p', '-t', str(pid), '--', script]", "= ns_exec(inspect['State']['Pid'], event) if exit_code == 0: return reply(event, data,", "time log = logging.getLogger('docker') def ns_exec(pid, event): script = os.path.join(Config.home(),", "os.path.join(Config.home(), 'events', event.name.split(';')[0]) cmd = ['nsenter', '-F', '-m', '-u', '-i',", "continue kv = line.split('=', 1) if kv[0].startswith('CATTLE'): env[kv[0]] = kv[1]", "break exists_cmd = cmd[:-1] + ['/usr/bin/test', '-e', script] if popen(exists_cmd,", "= self.compute.inspect(container) try: running = inspect['State']['Running'] if not running: log.error('Can", "if retcode: return retcode, output, None text = [] for", "pass def events(self): return ['delegate.request'] def delegate_request(self, req=None, event=None, instanceData=None,", "'-u', '-i', '-n', '-p', '-t', str(pid), '--', script] marshaller =", "None: log.info('Can not call [%s], container does not exists', instanceData.uuid)", "is not running', instanceData.uuid) return except KeyError: log.error('Can not call", "data = ns_exec(inspect['State']['Pid'], event) if exit_code == 0: return reply(event,", "output, data = ns_exec(inspect['State']['Pid'], event) if exit_code == 0: return", "import logging from cattle import Config from cattle.utils import reply,", "['/usr/bin/test', '-e', script] if popen(exists_cmd, env=env).wait() == 0: break #", "DockerDelegate(BaseHandler): def __init__(self): self.compute = DockerCompute() pass def events(self): return", "exists', instanceData.uuid) return inspect = self.compute.inspect(container) try: running = inspect['State']['Running']", "from .compute import DockerCompute from cattle.agent.handler import BaseHandler from cattle.progress", "script] if popen(exists_cmd, env=env).wait() == 0: break # Sleep and", "if kv[0].startswith('CATTLE'): env[kv[0]] = kv[1] env['PATH'] = os.environ['PATH'] env['CATTLE_CONFIG_URL'] =", "output, None text = [] for line in output.splitlines(): if", "= [] for line in output.splitlines(): if line.startswith('{'): data =", "= kv[1] env['PATH'] = os.environ['PATH'] env['CATTLE_CONFIG_URL'] = Config.config_url() for i", "'-i', '-n', '-p', '-t', str(pid), '--', script] marshaller = get_type(MARSHALLER)", "try: running = inspect['State']['Running'] if not running: log.error('Can not call", "None: return container = self.compute.get_container(docker_client(), instanceData, by_agent=True) if container is", "from cattle import Config from cattle.utils import reply, popen from", "break # Sleep and try again if missing time.sleep(1) if", "'-e', script] if popen(exists_cmd, env=env).wait() == 0: break # Sleep", "event.name.split(';')[0]) cmd = ['nsenter', '-F', '-m', '-u', '-i', '-n', '-p',", "instanceData.uuid) return inspect = self.compute.inspect(container) try: running = inspect['State']['Running'] if", "= p.poll() if retcode == 0: break exists_cmd = cmd[:-1]", "os.environ['PATH'] env['CATTLE_CONFIG_URL'] = Config.config_url() for i in range(3): p =", "instanceData.uuid) return progress = Progress(event, parent=req) exit_code, output, data =", "data class DockerDelegate(BaseHandler): def __init__(self): self.compute = DockerCompute() pass def", "= cmd[:-1] + ['/usr/bin/test', '-e', script] if popen(exists_cmd, env=env).wait() ==", "# Sleep and try again if missing time.sleep(1) if retcode:", "return ['delegate.request'] def delegate_request(self, req=None, event=None, instanceData=None, **kw): if instanceData.kind", "[] for line in output.splitlines(): if line.startswith('{'): data = marshaller.from_string(line)", "'container' or \\ instanceData.get('token') is None: return container = self.compute.get_container(docker_client(),", "line in f.read().split('\\0'): if not len(line): continue kv = line.split('=',", "p.communicate(input=input) retcode = p.poll() if retcode == 0: break exists_cmd", "log = logging.getLogger('docker') def ns_exec(pid, event): script = os.path.join(Config.home(), 'events',", "does not exists', instanceData.uuid) return inspect = self.compute.inspect(container) try: running", "return retcode, ''.join(text), data class DockerDelegate(BaseHandler): def __init__(self): self.compute =", "'-m', '-u', '-i', '-n', '-p', '-t', str(pid), '--', script] marshaller", "for i in range(3): p = popen(cmd, env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE,", "range(3): p = popen(cmd, env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, error", "open('/proc/{}/environ'.format(pid)) as f: for line in f.read().split('\\0'): if not len(line):", "marshaller.to_string(event) data = None env = {} with open('/proc/{}/environ'.format(pid)) as", "env['PATH'] = os.environ['PATH'] env['CATTLE_CONFIG_URL'] = Config.config_url() for i in range(3):", "marshaller.from_string(line) break text.append(line) return retcode, ''.join(text), data class DockerDelegate(BaseHandler): def", "not running', instanceData.uuid) return except KeyError: log.error('Can not call [%s],", "= line.split('=', 1) if kv[0].startswith('CATTLE'): env[kv[0]] = kv[1] env['PATH'] =", "= logging.getLogger('docker') def ns_exec(pid, event): script = os.path.join(Config.home(), 'events', event.name.split(';')[0])", "str(pid), '--', script] marshaller = get_type(MARSHALLER) input = marshaller.to_string(event) data", "if not running: log.error('Can not call [%s], container is not", "inspect['State']['Running'] if not running: log.error('Can not call [%s], container is", "with open('/proc/{}/environ'.format(pid)) as f: for line in f.read().split('\\0'): if not", "== 0: return reply(event, data, parent=req) else: progress.update('Update failed', data={", "container does not exists', instanceData.uuid) return inspect = self.compute.inspect(container) try:", "if exit_code == 0: return reply(event, data, parent=req) else: progress.update('Update", "in range(3): p = popen(cmd, env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output,", "ns_exec(pid, event): script = os.path.join(Config.home(), 'events', event.name.split(';')[0]) cmd = ['nsenter',", "import docker_client import subprocess import os import time log =", "self.compute = DockerCompute() pass def events(self): return ['delegate.request'] def delegate_request(self,", "running = inspect['State']['Running'] if not running: log.error('Can not call [%s],", "for line in output.splitlines(): if line.startswith('{'): data = marshaller.from_string(line) break", "Progress from cattle.type_manager import get_type, MARSHALLER from . import docker_client", "from . import docker_client import subprocess import os import time", "return except KeyError: log.error('Can not call [%s], container is not", "len(line): continue kv = line.split('=', 1) if kv[0].startswith('CATTLE'): env[kv[0]] =", "os import time log = logging.getLogger('docker') def ns_exec(pid, event): script", "= None env = {} with open('/proc/{}/environ'.format(pid)) as f: for", "get_type, MARSHALLER from . import docker_client import subprocess import os", "log.error('Can not call [%s], container is not running', instanceData.uuid) return", "is None: log.info('Can not call [%s], container does not exists',", "subprocess import os import time log = logging.getLogger('docker') def ns_exec(pid,", "reply, popen from .compute import DockerCompute from cattle.agent.handler import BaseHandler", "from cattle.utils import reply, popen from .compute import DockerCompute from", "return container = self.compute.get_container(docker_client(), instanceData, by_agent=True) if container is None:", "except KeyError: log.error('Can not call [%s], container is not running',", "**kw): if instanceData.kind != 'container' or \\ instanceData.get('token') is None:", "container is None: log.info('Can not call [%s], container does not", "in output.splitlines(): if line.startswith('{'): data = marshaller.from_string(line) break text.append(line) return", "DockerCompute() pass def events(self): return ['delegate.request'] def delegate_request(self, req=None, event=None,", "not running: log.error('Can not call [%s], container is not running',", "logging from cattle import Config from cattle.utils import reply, popen", "DockerCompute from cattle.agent.handler import BaseHandler from cattle.progress import Progress from", "[%s], container does not exists', instanceData.uuid) return inspect = self.compute.inspect(container)", "def ns_exec(pid, event): script = os.path.join(Config.home(), 'events', event.name.split(';')[0]) cmd =", "instanceData=None, **kw): if instanceData.kind != 'container' or \\ instanceData.get('token') is", "import reply, popen from .compute import DockerCompute from cattle.agent.handler import", "Config.config_url() for i in range(3): p = popen(cmd, env=env, stdin=subprocess.PIPE,", "import Progress from cattle.type_manager import get_type, MARSHALLER from . import", "req=None, event=None, instanceData=None, **kw): if instanceData.kind != 'container' or \\", "from cattle.type_manager import get_type, MARSHALLER from . import docker_client import", "not call [%s], container is not running', instanceData.uuid) return except", "[%s], container is not running', instanceData.uuid) return except KeyError: log.error('Can", "return inspect = self.compute.inspect(container) try: running = inspect['State']['Running'] if not", "== 0: break exists_cmd = cmd[:-1] + ['/usr/bin/test', '-e', script]", "== 0: break # Sleep and try again if missing", "line in output.splitlines(): if line.startswith('{'): data = marshaller.from_string(line) break text.append(line)", "instanceData.uuid) return except KeyError: log.error('Can not call [%s], container is", "marshaller = get_type(MARSHALLER) input = marshaller.to_string(event) data = None env", "= self.compute.get_container(docker_client(), instanceData, by_agent=True) if container is None: log.info('Can not", "import time log = logging.getLogger('docker') def ns_exec(pid, event): script =", "get_type(MARSHALLER) input = marshaller.to_string(event) data = None env = {}", "cattle.agent.handler import BaseHandler from cattle.progress import Progress from cattle.type_manager import", "Config from cattle.utils import reply, popen from .compute import DockerCompute", "import subprocess import os import time log = logging.getLogger('docker') def", "import DockerCompute from cattle.agent.handler import BaseHandler from cattle.progress import Progress", "event=None, instanceData=None, **kw): if instanceData.kind != 'container' or \\ instanceData.get('token')", "= get_type(MARSHALLER) input = marshaller.to_string(event) data = None env =", "not len(line): continue kv = line.split('=', 1) if kv[0].startswith('CATTLE'): env[kv[0]]", "cattle import Config from cattle.utils import reply, popen from .compute", "if instanceData.kind != 'container' or \\ instanceData.get('token') is None: return", "__init__(self): self.compute = DockerCompute() pass def events(self): return ['delegate.request'] def", "def __init__(self): self.compute = DockerCompute() pass def events(self): return ['delegate.request']", "+ ['/usr/bin/test', '-e', script] if popen(exists_cmd, env=env).wait() == 0: break", "call [%s], container does not exists', instanceData.uuid) return inspect =", "cattle.type_manager import get_type, MARSHALLER from . import docker_client import subprocess", "p.poll() if retcode == 0: break exists_cmd = cmd[:-1] +", "env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, error = p.communicate(input=input) retcode =", "cattle.progress import Progress from cattle.type_manager import get_type, MARSHALLER from .", "script = os.path.join(Config.home(), 'events', event.name.split(';')[0]) cmd = ['nsenter', '-F', '-m',", "1) if kv[0].startswith('CATTLE'): env[kv[0]] = kv[1] env['PATH'] = os.environ['PATH'] env['CATTLE_CONFIG_URL']", "stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, error = p.communicate(input=input) retcode = p.poll()", "logging.getLogger('docker') def ns_exec(pid, event): script = os.path.join(Config.home(), 'events', event.name.split(';')[0]) cmd", "data = marshaller.from_string(line) break text.append(line) return retcode, ''.join(text), data class", "\\ instanceData.get('token') is None: return container = self.compute.get_container(docker_client(), instanceData, by_agent=True)", "is not running', instanceData.uuid) return progress = Progress(event, parent=req) exit_code,", "'-n', '-p', '-t', str(pid), '--', script] marshaller = get_type(MARSHALLER) input", "f.read().split('\\0'): if not len(line): continue kv = line.split('=', 1) if", "popen(exists_cmd, env=env).wait() == 0: break # Sleep and try again", "parent=req) else: progress.update('Update failed', data={ 'exitCode': exit_code, 'output': output })", "'events', event.name.split(';')[0]) cmd = ['nsenter', '-F', '-m', '-u', '-i', '-n',", "retcode: return retcode, output, None text = [] for line", "delegate_request(self, req=None, event=None, instanceData=None, **kw): if instanceData.kind != 'container' or", "by_agent=True) if container is None: log.info('Can not call [%s], container", "not call [%s], container does not exists', instanceData.uuid) return inspect", "call [%s], container is not running', instanceData.uuid) return progress =", "= Progress(event, parent=req) exit_code, output, data = ns_exec(inspect['State']['Pid'], event) if", "= popen(cmd, env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, error = p.communicate(input=input)", "= inspect['State']['Running'] if not running: log.error('Can not call [%s], container", "= os.environ['PATH'] env['CATTLE_CONFIG_URL'] = Config.config_url() for i in range(3): p", "'--', script] marshaller = get_type(MARSHALLER) input = marshaller.to_string(event) data =", "break text.append(line) return retcode, ''.join(text), data class DockerDelegate(BaseHandler): def __init__(self):", "docker_client import subprocess import os import time log = logging.getLogger('docker')" ]
[ "calculate_historic_data. \"\"\" __metaclass__ = ABCMeta def __init__(name=\"default name\", interval=5): \"\"\"Constructor", "float(historic_data[-1][0]) start_price = float(historic_data[0][4]) end_price = float(historic_data[-1][4]) market_performance = ((end_price-start_price)/start_price)*100", "if min_price > timeslice[1]: min_price = timeslice[1] closing_price = timeslice[4]", "matplotlib import pyplot as plt from exchange import cb_exchange as", "\"+str(end_amt)+\" BTC\") strategy_performance_vs_market = strategy_performance - market_performance if strategy_performance >", "self.interval = interval self.times_recalculated = 0 @abstractmethod def trade(self, timeslice):", "class strategy(object): \"\"\"`strategy` defines an abstract base strategy class. Minimum", "= float(data[0][1]) max_price = float(data[0][2]) discrete_prices = {} for timeslice", "now historic_data strarts with the oldest entry historic_data = list(reversed(historic_data))", "\"+str(strategy_performance_vs_market)+\" % worse than market.\") return strategy_performance_vs_market, strategy_performance, market_performance @staticmethod", "pivot: num_at+=value total_volume = 0.0 for volume in fltvolumes: total_volume+=volume", "print(\"Running simulation on historic data. This may take some time....\")", "import cb_exchange as cb_exchange from exchange import CoinbaseExchangeAuth from abc", "import numpy from datetime import date, datetime, timedelta from matplotlib", "some time....\") for timeslice in historic_data: # Display what percent", "strategy has preformed: \"+str(strategy_performance_vs_market)+\" % worse than market.\") return strategy_performance_vs_market,", "of bitcoins traded above and below a price point, called", "= [] weights = [] if data is None: pass", "elif strategy_performance < market_performance: print(\"This strategy has preformed: \"+str(strategy_performance_vs_market)+\" %", "weights.append(volume) fltprices = [float(i) for i in discrete_prices.keys()] fltvolumes =", "#import xml.utils.iso8601 import time import numpy from datetime import date,", "bonus, strategy includes utility functions like calculate_historic_data. \"\"\" __metaclass__ =", "the strategy's interval, formatted as follows: \\n[time, low, high, open,", "= float(historic_data[0][0]) latest_time = float(historic_data[-1][0]) start_price = float(historic_data[0][4]) end_price =", "timeslice in data: timeslice = [float(i) for i in timeslice]", "to the strategy's interval, formatted as follows: \\n[time, low, high,", "includes utility functions like calculate_historic_data. \"\"\" __metaclass__ = ABCMeta def", "in historic_data: # Display what percent through the data we", "+ float(self.exchange.start_btc) end_amt = (float(self.exchange.usd_bal)/float(end_price)) + float(self.exchange.btc_bal) start_amt = (float(self.exchange.start_usd)/float(start_price))", "and below \\ndata: a list of lists formated as follows", "max_prie = timeslice[2] if min_price > timeslice[1]: min_price = timeslice[1]", "price_list.append(closing_price) weights.append(volume) fltprices = [float(i) for i in discrete_prices.keys()] fltvolumes", "data.index(timeslice) price_list.append(closing_price) weights.append(volume) fltprices = [float(i) for i in discrete_prices.keys()]", "Reverse the data since Coinbase returns it in reverse chronological", "\"+str(num_above)) #print(\"num_at: \"+str(num_at)) #print(\"weighted_average: \"+str(weighted_avg)) #plt.title(\"Price distribution\") #plt.xlabel(\"Price (USD)\") #plt.ylabel(\"Volume\")", "as follows [time, low, high, open, close] \\n[ \\n\\t[\"2014-11-07 22:19:28.578544+00\",", "and below a price point, called a pivot.\\n \\npivot: the", "returns it in reverse chronological # now historic_data strarts with", "value = discrete_prices[key] if float(key) > pivot: num_above+=value elif float(key)", "max_price < timeslice[2]: max_prie = timeslice[2] if min_price > timeslice[1]:", "like calculate_historic_data. \"\"\" __metaclass__ = ABCMeta def __init__(name=\"default name\", interval=5):", "else: discrete[str(closing_price)] += volume idx = data.index(timeslice) price_list.append(closing_price) weights.append(volume) fltprices", "= 0.0 for volume in fltvolumes: total_volume+=volume fltprops = []", "float(self.exchange.btc_bal) start_amt = (float(self.exchange.start_usd)/float(start_price)) + float(self.exchange.start_btc) strategy_performance = ((end_amt-start_amt)/start_amt)*100 print(\"\\n\")", "pivot): \"\"\"Returns average price weighted according to volume, and the", "time....\") for timeslice in historic_data: # Display what percent through", "worse than market.\") return strategy_performance_vs_market, strategy_performance, market_performance @staticmethod def calculate_historic_data(data,", "timeslice = [float(i) for i in timeslice] if max_price <", "= (float(self.exchange.start_usd)/float(start_price)) + float(self.exchange.start_btc) strategy_performance = ((end_amt-start_amt)/start_amt)*100 print(\"\\n\") print(\"Times recalculated:", "import ABCMeta, abstractmethod class strategy(object): \"\"\"`strategy` defines an abstract base", "amount of time in seconds for each 'tick' default is", "timeslice. \\n`timeslice`: a section of trade data with time length", "timeslice[2]: max_prie = timeslice[2] if min_price > timeslice[1]: min_price =", "return def backtest_strategy(self, historic_data): \"\"\"Returns performance of a strategy vs", "= 0 num_at = 0 for key in discrete_prices.keys(): value", "+ float(self.exchange.start_btc) strategy_performance = ((end_amt-start_amt)/start_amt)*100 print(\"\\n\") print(\"Times recalculated: \"+str(self.times_recalculated)) print(\"Times", "print(\"The Market's performance: \"+str(market_performance)+\" %\") print(\"Strategy's performance: \"+str(strategy_performance)+\" %\") print(\"Account's", "strategy. You can modify it as needed. \\n`interval`: a.k.a timeslice", "vs market performance. \"\"\" # Reverse the data since Coinbase", "bought: \"+str(self.exchange.times_bought)) print(\"Times sold: \"+str(self.exchange.times_sold)) print(\"The Market's performance: \"+str(market_performance)+\" %\")", "float(self.exchange.start_btc) strategy_performance = ((end_amt-start_amt)/start_amt)*100 print(\"\\n\") print(\"Times recalculated: \"+str(self.times_recalculated)) print(\"Times bought:", "close] \\n[ \\n\\t[\"2014-11-07 22:19:28.578544+00\", \"0.32\", \"4.2\", \"0.35\", \"4.2\", \"12.3\"], \\n\\t\\t...", "total_volume = 0.0 for volume in fltvolumes: total_volume+=volume fltprops =", "np_discrete_prices = numpy.array(fltprices) np_volume_per_price = numpy.array(fltvolumes) weighted_avg = numpy.average(np_discrete_prices, weights=np_volume_per_price)", "market_performance if strategy_performance > market_performance: print(\"Congratulations! This strategy has beat", "no trades were made: \"+str(end_amt_no_trades)+\" BTC\") print(\"Account's ending value with", "containing a backtest_strategy function. As a bonus, strategy includes utility", "= float(historic_data[0][4]) end_price = float(historic_data[-1][4]) market_performance = ((end_price-start_price)/start_price)*100 print(\"Running simulation", "+ 1 sys.stdout.write(\"\\r%d%%\" % percent) sys.stdout.flush() self.trade(timeslice) # Calculate performance", "performance end_amt_no_trades = (float(self.exchange.start_usd)/float(end_price)) + float(self.exchange.start_btc) end_amt = (float(self.exchange.usd_bal)/float(end_price)) +", "[] if data is None: pass min_price = float(data[0][1]) max_price", "weighted according to volume, and the number of bitcoins traded", "import pyplot as plt from exchange import cb_exchange as cb_exchange", "and the number of bitcoins traded above and below a", "\"\"\" __metaclass__ = ABCMeta def __init__(name=\"default name\", interval=5): \"\"\"Constructor for", "timeslice the amount of time in seconds for each 'tick'", "can modify it as needed. \\n`interval`: a.k.a timeslice the amount", "from abc import ABCMeta, abstractmethod class strategy(object): \"\"\"`strategy` defines an", "= [float(i) for i in timeslice] if max_price < timeslice[2]:", "data is None: pass min_price = float(data[0][1]) max_price = float(data[0][2])", "has beat the market by: \"+str(strategy_performance_vs_market)+\" %\") elif strategy_performance <", "timeslice] if max_price < timeslice[2]: max_prie = timeslice[2] if min_price", "num_at = 0 for key in discrete_prices.keys(): value = discrete_prices[key]", "interval, formatted as follows: \\n[time, low, high, open, close, volume]", "bitcoins traded above and below a price point, called a", "market_performance: print(\"This strategy has preformed: \"+str(strategy_performance_vs_market)+\" % worse than market.\")", "has preformed: \"+str(strategy_performance_vs_market)+\" % worse than market.\") return strategy_performance_vs_market, strategy_performance,", "name self.interval = interval self.times_recalculated = 0 @abstractmethod def trade(self,", "historic_data.index(timeslice) percent = (float(idx)/float(len(historic_data)))*100 + 1 sys.stdout.write(\"\\r%d%%\" % percent) sys.stdout.flush()", "fltvolumes: total_volume+=volume fltprops = [] for volume in fltvolumes: fltprops.append((volume/total_volume))", "abstract strategy. You can modify it as needed. \\n`interval`: a.k.a", "exchange import CoinbaseExchangeAuth from abc import ABCMeta, abstractmethod class strategy(object):", "a list of lists formated as follows [time, low, high,", "market performance. \"\"\" # Reverse the data since Coinbase returns", "discrete_prices = {} for timeslice in data: timeslice = [float(i)", "timedelta from matplotlib import pyplot as plt from exchange import", "pyplot as plt from exchange import cb_exchange as cb_exchange from", "data we are idx = historic_data.index(timeslice) percent = (float(idx)/float(len(historic_data)))*100 +", "\"\"\" self.name = name self.interval = interval self.times_recalculated = 0", "import sys import pytz #import xml.utils.iso8601 import time import numpy", "[float(i) for i in discrete_prices.values()] np_discrete_prices = numpy.array(fltprices) np_volume_per_price =", "for key in discrete_prices.keys(): value = discrete_prices[key] if float(key) >", "> market_performance: print(\"Congratulations! This strategy has beat the market by:", "in seconds for each 'tick' default is 5 \\n`name`: a", "data: timeslice = [float(i) for i in timeslice] if max_price", "> pivot: num_above+=value elif float(key) < pivot: num_below+=value elif float(key)", "= timeslice[2] if min_price > timeslice[1]: min_price = timeslice[1] closing_price", "made: \"+str(end_amt_no_trades)+\" BTC\") print(\"Account's ending value with this strategy: \"+str(end_amt)+\"", "plt from exchange import cb_exchange as cb_exchange from exchange import", "0 num_below = 0 num_at = 0 for key in", "if float(key) > pivot: num_above+=value elif float(key) < pivot: num_below+=value", "\"\"\"`strategy` defines an abstract base strategy class. Minimum required to", "default is 5 \\n`name`: a string name for the strategy", "\\n\\t[\"2014-11-07 22:19:28.578544+00\", \"0.32\", \"4.2\", \"0.35\", \"4.2\", \"12.3\"], \\n\\t\\t... \\n] \"\"\"", "as cb_exchange from exchange import CoinbaseExchangeAuth from abc import ABCMeta,", "\\n`timeslice`: a section of trade data with time length equal", "in discrete_prices.values()] np_discrete_prices = numpy.array(fltprices) np_volume_per_price = numpy.array(fltvolumes) weighted_avg =", "= discrete_prices[key] if float(key) > pivot: num_above+=value elif float(key) <", "a file with a class which inherits from strategy containing", "#print(\"num_at: \"+str(num_at)) #print(\"weighted_average: \"+str(weighted_avg)) #plt.title(\"Price distribution\") #plt.xlabel(\"Price (USD)\") #plt.ylabel(\"Volume\") #plt.bar(fltprices,", "print(\"\\n\") print(\"Times recalculated: \"+str(self.times_recalculated)) print(\"Times bought: \"+str(self.exchange.times_bought)) print(\"Times sold: \"+str(self.exchange.times_sold))", "file with a class which inherits from strategy containing a", "calculate_historic_data(data, pivot): \"\"\"Returns average price weighted according to volume, and", "were made: \"+str(end_amt_no_trades)+\" BTC\") print(\"Account's ending value with this strategy:", "operations on a timeslice. \\n`timeslice`: a section of trade data", "i in timeslice] if max_price < timeslice[2]: max_prie = timeslice[2]", "closing_price not in discrete_prices.keys(): discrete_prices[str(closing_price)] = volume else: discrete[str(closing_price)] +=", "\"+str(weighted_avg)) #plt.title(\"Price distribution\") #plt.xlabel(\"Price (USD)\") #plt.ylabel(\"Volume\") #plt.bar(fltprices, fltprops) #plt.show() return", "strategy vs market performance. \"\"\" # Reverse the data since", "by: \"+str(strategy_performance_vs_market)+\" %\") elif strategy_performance < market_performance: print(\"This strategy has", "for an abstract strategy. You can modify it as needed.", "simulation on historic data. This may take some time....\") for", "float(self.exchange.start_btc) end_amt = (float(self.exchange.usd_bal)/float(end_price)) + float(self.exchange.btc_bal) start_amt = (float(self.exchange.start_usd)/float(start_price)) +", "You can modify it as needed. \\n`interval`: a.k.a timeslice the", "preformed: \"+str(strategy_performance_vs_market)+\" % worse than market.\") return strategy_performance_vs_market, strategy_performance, market_performance", "\"\"\"Returns average price weighted according to volume, and the number", "data since Coinbase returns it in reverse chronological # now", "below \\ndata: a list of lists formated as follows [time,", "[time, low, high, open, close] \\n[ \\n\\t[\"2014-11-07 22:19:28.578544+00\", \"0.32\", \"4.2\",", "# now historic_data strarts with the oldest entry historic_data =", "time import numpy from datetime import date, datetime, timedelta from", "i in discrete_prices.values()] np_discrete_prices = numpy.array(fltprices) np_volume_per_price = numpy.array(fltvolumes) weighted_avg", "num_above+=value elif float(key) < pivot: num_below+=value elif float(key) == pivot:", "BTC\") print(\"Account's ending value with this strategy: \"+str(end_amt)+\" BTC\") strategy_performance_vs_market", "strategy \"\"\" self.name = name self.interval = interval self.times_recalculated =", "= ABCMeta def __init__(name=\"default name\", interval=5): \"\"\"Constructor for an abstract", "through the data we are idx = historic_data.index(timeslice) percent =", "from matplotlib import pyplot as plt from exchange import cb_exchange", "# Display what percent through the data we are idx", "low, high, open, close, volume] \"\"\" return def backtest_strategy(self, historic_data):", "follows: \\n[time, low, high, open, close, volume] \"\"\" return def", "percent) sys.stdout.flush() self.trade(timeslice) # Calculate performance end_amt_no_trades = (float(self.exchange.start_usd)/float(end_price)) +", "volume else: discrete[str(closing_price)] += volume idx = data.index(timeslice) price_list.append(closing_price) weights.append(volume)", "self.times_recalculated = 0 @abstractmethod def trade(self, timeslice): \"\"\"Perform operations on", "which inherits from strategy containing a backtest_strategy function. As a", "data. This may take some time....\") for timeslice in historic_data:", "= (float(self.exchange.start_usd)/float(end_price)) + float(self.exchange.start_btc) end_amt = (float(self.exchange.usd_bal)/float(end_price)) + float(self.exchange.btc_bal) start_amt", "for returning volume above and below \\ndata: a list of", "end_amt_no_trades = (float(self.exchange.start_usd)/float(end_price)) + float(self.exchange.start_btc) end_amt = (float(self.exchange.usd_bal)/float(end_price)) + float(self.exchange.btc_bal)", "traded above and below a price point, called a pivot.\\n", "with a class which inherits from strategy containing a backtest_strategy", "in fltvolumes: total_volume+=volume fltprops = [] for volume in fltvolumes:", "Calculate performance end_amt_no_trades = (float(self.exchange.start_usd)/float(end_price)) + float(self.exchange.start_btc) end_amt = (float(self.exchange.usd_bal)/float(end_price))", "pivot: num_above+=value elif float(key) < pivot: num_below+=value elif float(key) ==", "take some time....\") for timeslice in historic_data: # Display what", "float(historic_data[0][4]) end_price = float(historic_data[-1][4]) market_performance = ((end_price-start_price)/start_price)*100 print(\"Running simulation on", "#plt.xlabel(\"Price (USD)\") #plt.ylabel(\"Volume\") #plt.bar(fltprices, fltprops) #plt.show() return weighted_avg, num_above, num_below", "for i in discrete_prices.keys()] fltvolumes = [float(i) for i in", "above and below \\ndata: a list of lists formated as", "+= volume idx = data.index(timeslice) price_list.append(closing_price) weights.append(volume) fltprices = [float(i)", "recalculated: \"+str(self.times_recalculated)) print(\"Times bought: \"+str(self.exchange.times_bought)) print(\"Times sold: \"+str(self.exchange.times_sold)) print(\"The Market's", "needed. \\n`interval`: a.k.a timeslice the amount of time in seconds", "= (float(self.exchange.usd_bal)/float(end_price)) + float(self.exchange.btc_bal) start_amt = (float(self.exchange.start_usd)/float(start_price)) + float(self.exchange.start_btc) strategy_performance", "numpy.average(np_discrete_prices, weights=np_volume_per_price) num_above = 0 num_below = 0 num_at =", "create a strategy is a file with a class which", "low, high, open, close] \\n[ \\n\\t[\"2014-11-07 22:19:28.578544+00\", \"0.32\", \"4.2\", \"0.35\",", "22:19:28.578544+00\", \"0.32\", \"4.2\", \"0.35\", \"4.2\", \"12.3\"], \\n\\t\\t... \\n] \"\"\" price_list", "= float(data[0][2]) discrete_prices = {} for timeslice in data: timeslice", "\"4.2\", \"0.35\", \"4.2\", \"12.3\"], \\n\\t\\t... \\n] \"\"\" price_list = []", "Coinbase returns it in reverse chronological # now historic_data strarts", "datetime import date, datetime, timedelta from matplotlib import pyplot as", "= name self.interval = interval self.times_recalculated = 0 @abstractmethod def", "returning volume above and below \\ndata: a list of lists", "num_below = 0 num_at = 0 for key in discrete_prices.keys():", "the price used for returning volume above and below \\ndata:", "i in discrete_prices.keys()] fltvolumes = [float(i) for i in discrete_prices.values()]", "ABCMeta def __init__(name=\"default name\", interval=5): \"\"\"Constructor for an abstract strategy.", "abstract base strategy class. Minimum required to create a strategy", "from exchange import cb_exchange as cb_exchange from exchange import CoinbaseExchangeAuth", "def trade(self, timeslice): \"\"\"Perform operations on a timeslice. \\n`timeslice`: a", "= ((end_amt-start_amt)/start_amt)*100 print(\"\\n\") print(\"Times recalculated: \"+str(self.times_recalculated)) print(\"Times bought: \"+str(self.exchange.times_bought)) print(\"Times", "% percent) sys.stdout.flush() self.trade(timeslice) # Calculate performance end_amt_no_trades = (float(self.exchange.start_usd)/float(end_price))", "This strategy has beat the market by: \"+str(strategy_performance_vs_market)+\" %\") elif", "list of lists formated as follows [time, low, high, open,", "Market's performance: \"+str(market_performance)+\" %\") print(\"Strategy's performance: \"+str(strategy_performance)+\" %\") print(\"Account's ending", "None: pass min_price = float(data[0][1]) max_price = float(data[0][2]) discrete_prices =", "exchange import cb_exchange as cb_exchange from exchange import CoinbaseExchangeAuth from", "min_price > timeslice[1]: min_price = timeslice[1] closing_price = timeslice[4] volume", "of lists formated as follows [time, low, high, open, close]", "datetime, timedelta from matplotlib import pyplot as plt from exchange", "__init__(name=\"default name\", interval=5): \"\"\"Constructor for an abstract strategy. You can", "float(key) < pivot: num_below+=value elif float(key) == pivot: num_at+=value total_volume", "< timeslice[2]: max_prie = timeslice[2] if min_price > timeslice[1]: min_price", "beat the market by: \"+str(strategy_performance_vs_market)+\" %\") elif strategy_performance < market_performance:", "# Reverse the data since Coinbase returns it in reverse", "(float(self.exchange.start_usd)/float(end_price)) + float(self.exchange.start_btc) end_amt = (float(self.exchange.usd_bal)/float(end_price)) + float(self.exchange.btc_bal) start_amt =", "name for the strategy \"\"\" self.name = name self.interval =", "in discrete_prices.keys(): value = discrete_prices[key] if float(key) > pivot: num_above+=value", "idx = data.index(timeslice) price_list.append(closing_price) weights.append(volume) fltprices = [float(i) for i", "def calculate_historic_data(data, pivot): \"\"\"Returns average price weighted according to volume,", "((end_amt-start_amt)/start_amt)*100 print(\"\\n\") print(\"Times recalculated: \"+str(self.times_recalculated)) print(\"Times bought: \"+str(self.exchange.times_bought)) print(\"Times sold:", "\"+str(self.times_recalculated)) print(\"Times bought: \"+str(self.exchange.times_bought)) print(\"Times sold: \"+str(self.exchange.times_sold)) print(\"The Market's performance:", "\"\"\"Perform operations on a timeslice. \\n`timeslice`: a section of trade", "= 0 for key in discrete_prices.keys(): value = discrete_prices[key] if", "time length equal to the strategy's interval, formatted as follows:", "sold: \"+str(self.exchange.times_sold)) print(\"The Market's performance: \"+str(market_performance)+\" %\") print(\"Strategy's performance: \"+str(strategy_performance)+\"", "a backtest_strategy function. As a bonus, strategy includes utility functions", "\"+str(strategy_performance_vs_market)+\" %\") elif strategy_performance < market_performance: print(\"This strategy has preformed:", "in data: timeslice = [float(i) for i in timeslice] if", "float(historic_data[0][0]) latest_time = float(historic_data[-1][0]) start_price = float(historic_data[0][4]) end_price = float(historic_data[-1][4])", "volume above and below \\ndata: a list of lists formated", "historic_data = list(reversed(historic_data)) earliest_time = float(historic_data[0][0]) latest_time = float(historic_data[-1][0]) start_price", "# Calculate performance end_amt_no_trades = (float(self.exchange.start_usd)/float(end_price)) + float(self.exchange.start_btc) end_amt =", "chronological # now historic_data strarts with the oldest entry historic_data", "called a pivot.\\n \\npivot: the price used for returning volume", "high, open, close, volume] \"\"\" return def backtest_strategy(self, historic_data): \"\"\"Returns", "%\") elif strategy_performance < market_performance: print(\"This strategy has preformed: \"+str(strategy_performance_vs_market)+\"", "inherits from strategy containing a backtest_strategy function. As a bonus,", "= {} for timeslice in data: timeslice = [float(i) for", "on historic data. This may take some time....\") for timeslice", "strategy containing a backtest_strategy function. As a bonus, strategy includes", "elif float(key) < pivot: num_below+=value elif float(key) == pivot: num_at+=value", "{} for timeslice in data: timeslice = [float(i) for i", "\"+str(market_performance)+\" %\") print(\"Strategy's performance: \"+str(strategy_performance)+\" %\") print(\"Account's ending value if", "distribution\") #plt.xlabel(\"Price (USD)\") #plt.ylabel(\"Volume\") #plt.bar(fltprices, fltprops) #plt.show() return weighted_avg, num_above,", "\"\"\"Constructor for an abstract strategy. You can modify it as", "percent through the data we are idx = historic_data.index(timeslice) percent", "strategy includes utility functions like calculate_historic_data. \"\"\" __metaclass__ = ABCMeta", "seconds for each 'tick' default is 5 \\n`name`: a string", "print(\"Times bought: \"+str(self.exchange.times_bought)) print(\"Times sold: \"+str(self.exchange.times_sold)) print(\"The Market's performance: \"+str(market_performance)+\"", "\"+str(self.exchange.times_sold)) print(\"The Market's performance: \"+str(market_performance)+\" %\") print(\"Strategy's performance: \"+str(strategy_performance)+\" %\")", "on a timeslice. \\n`timeslice`: a section of trade data with", "open, close, volume] \"\"\" return def backtest_strategy(self, historic_data): \"\"\"Returns performance", "follows [time, low, high, open, close] \\n[ \\n\\t[\"2014-11-07 22:19:28.578544+00\", \"0.32\",", "an abstract strategy. You can modify it as needed. \\n`interval`:", "in fltvolumes: fltprops.append((volume/total_volume)) #print(\"num_below: \"+str(num_below)) #print(\"num_above: \"+str(num_above)) #print(\"num_at: \"+str(num_at)) #print(\"weighted_average:", "max_price = float(data[0][2]) discrete_prices = {} for timeslice in data:", "#print(\"num_below: \"+str(num_below)) #print(\"num_above: \"+str(num_above)) #print(\"num_at: \"+str(num_at)) #print(\"weighted_average: \"+str(weighted_avg)) #plt.title(\"Price distribution\")", "0 @abstractmethod def trade(self, timeslice): \"\"\"Perform operations on a timeslice.", "defines an abstract base strategy class. Minimum required to create", "strategy class. Minimum required to create a strategy is a", "latest_time = float(historic_data[-1][0]) start_price = float(historic_data[0][4]) end_price = float(historic_data[-1][4]) market_performance", "class which inherits from strategy containing a backtest_strategy function. As", "0.0 for volume in fltvolumes: total_volume+=volume fltprops = [] for", "if strategy_performance > market_performance: print(\"Congratulations! This strategy has beat the", "float(data[0][2]) discrete_prices = {} for timeslice in data: timeslice =", "cb_exchange from exchange import CoinbaseExchangeAuth from abc import ABCMeta, abstractmethod", "historic_data): \"\"\"Returns performance of a strategy vs market performance. \"\"\"", "trades were made: \"+str(end_amt_no_trades)+\" BTC\") print(\"Account's ending value with this", "float(key) == pivot: num_at+=value total_volume = 0.0 for volume in", "%\") print(\"Account's ending value if no trades were made: \"+str(end_amt_no_trades)+\"", "open, close] \\n[ \\n\\t[\"2014-11-07 22:19:28.578544+00\", \"0.32\", \"4.2\", \"0.35\", \"4.2\", \"12.3\"],", "[float(i) for i in discrete_prices.keys()] fltvolumes = [float(i) for i", "= list(reversed(historic_data)) earliest_time = float(historic_data[0][0]) latest_time = float(historic_data[-1][0]) start_price =", "This may take some time....\") for timeslice in historic_data: #", "volume idx = data.index(timeslice) price_list.append(closing_price) weights.append(volume) fltprices = [float(i) for", "import CoinbaseExchangeAuth from abc import ABCMeta, abstractmethod class strategy(object): \"\"\"`strategy`", "strategy(object): \"\"\"`strategy` defines an abstract base strategy class. Minimum required", "pivot.\\n \\npivot: the price used for returning volume above and", "discrete_prices[key] if float(key) > pivot: num_above+=value elif float(key) < pivot:", "number of bitcoins traded above and below a price point,", "length equal to the strategy's interval, formatted as follows: \\n[time,", "\"\"\" return def backtest_strategy(self, historic_data): \"\"\"Returns performance of a strategy", "may take some time....\") for timeslice in historic_data: # Display", "num_at+=value total_volume = 0.0 for volume in fltvolumes: total_volume+=volume fltprops", "(float(self.exchange.usd_bal)/float(end_price)) + float(self.exchange.btc_bal) start_amt = (float(self.exchange.start_usd)/float(start_price)) + float(self.exchange.start_btc) strategy_performance =", "string name for the strategy \"\"\" self.name = name self.interval", "strategy_performance - market_performance if strategy_performance > market_performance: print(\"Congratulations! This strategy", "fltprops.append((volume/total_volume)) #print(\"num_below: \"+str(num_below)) #print(\"num_above: \"+str(num_above)) #print(\"num_at: \"+str(num_at)) #print(\"weighted_average: \"+str(weighted_avg)) #plt.title(\"Price", "pytz #import xml.utils.iso8601 import time import numpy from datetime import", "= numpy.array(fltprices) np_volume_per_price = numpy.array(fltvolumes) weighted_avg = numpy.average(np_discrete_prices, weights=np_volume_per_price) num_above", "timeslice[4] volume = timeslice[5] if closing_price not in discrete_prices.keys(): discrete_prices[str(closing_price)]", "with the oldest entry historic_data = list(reversed(historic_data)) earliest_time = float(historic_data[0][0])", "utility functions like calculate_historic_data. \"\"\" __metaclass__ = ABCMeta def __init__(name=\"default", "value with this strategy: \"+str(end_amt)+\" BTC\") strategy_performance_vs_market = strategy_performance -", "@staticmethod def calculate_historic_data(data, pivot): \"\"\"Returns average price weighted according to", "pass min_price = float(data[0][1]) max_price = float(data[0][2]) discrete_prices = {}", "an abstract base strategy class. Minimum required to create a", "end_amt = (float(self.exchange.usd_bal)/float(end_price)) + float(self.exchange.btc_bal) start_amt = (float(self.exchange.start_usd)/float(start_price)) + float(self.exchange.start_btc)", "cb_exchange as cb_exchange from exchange import CoinbaseExchangeAuth from abc import", "market.\") return strategy_performance_vs_market, strategy_performance, market_performance @staticmethod def calculate_historic_data(data, pivot): \"\"\"Returns", "BTC\") strategy_performance_vs_market = strategy_performance - market_performance if strategy_performance > market_performance:", "(float(self.exchange.start_usd)/float(start_price)) + float(self.exchange.start_btc) strategy_performance = ((end_amt-start_amt)/start_amt)*100 print(\"\\n\") print(\"Times recalculated: \"+str(self.times_recalculated))", "= historic_data.index(timeslice) percent = (float(idx)/float(len(historic_data)))*100 + 1 sys.stdout.write(\"\\r%d%%\" % percent)", "- market_performance if strategy_performance > market_performance: print(\"Congratulations! This strategy has", "it in reverse chronological # now historic_data strarts with the", "num_above = 0 num_below = 0 num_at = 0 for", "price_list = [] weights = [] if data is None:", "to volume, and the number of bitcoins traded above and", "a string name for the strategy \"\"\" self.name = name", "the data we are idx = historic_data.index(timeslice) percent = (float(idx)/float(len(historic_data)))*100", "the data since Coinbase returns it in reverse chronological #", "print(\"Times sold: \"+str(self.exchange.times_sold)) print(\"The Market's performance: \"+str(market_performance)+\" %\") print(\"Strategy's performance:", "total_volume+=volume fltprops = [] for volume in fltvolumes: fltprops.append((volume/total_volume)) #print(\"num_below:", "< pivot: num_below+=value elif float(key) == pivot: num_at+=value total_volume =", "= float(historic_data[-1][4]) market_performance = ((end_price-start_price)/start_price)*100 print(\"Running simulation on historic data.", "fltvolumes: fltprops.append((volume/total_volume)) #print(\"num_below: \"+str(num_below)) #print(\"num_above: \"+str(num_above)) #print(\"num_at: \"+str(num_at)) #print(\"weighted_average: \"+str(weighted_avg))", "formatted as follows: \\n[time, low, high, open, close, volume] \"\"\"", "historic_data strarts with the oldest entry historic_data = list(reversed(historic_data)) earliest_time", "fltprops = [] for volume in fltvolumes: fltprops.append((volume/total_volume)) #print(\"num_below: \"+str(num_below))", "\\n[time, low, high, open, close, volume] \"\"\" return def backtest_strategy(self,", "+ float(self.exchange.btc_bal) start_amt = (float(self.exchange.start_usd)/float(start_price)) + float(self.exchange.start_btc) strategy_performance = ((end_amt-start_amt)/start_amt)*100", "\\n`interval`: a.k.a timeslice the amount of time in seconds for", "as needed. \\n`interval`: a.k.a timeslice the amount of time in", "\\n`name`: a string name for the strategy \"\"\" self.name =", "print(\"Account's ending value if no trades were made: \"+str(end_amt_no_trades)+\" BTC\")", "#print(\"weighted_average: \"+str(weighted_avg)) #plt.title(\"Price distribution\") #plt.xlabel(\"Price (USD)\") #plt.ylabel(\"Volume\") #plt.bar(fltprices, fltprops) #plt.show()", "strategy_performance, market_performance @staticmethod def calculate_historic_data(data, pivot): \"\"\"Returns average price weighted", "strategy is a file with a class which inherits from", "end_price = float(historic_data[-1][4]) market_performance = ((end_price-start_price)/start_price)*100 print(\"Running simulation on historic", "a strategy vs market performance. \"\"\" # Reverse the data", "\"4.2\", \"12.3\"], \\n\\t\\t... \\n] \"\"\" price_list = [] weights =", "performance: \"+str(market_performance)+\" %\") print(\"Strategy's performance: \"+str(strategy_performance)+\" %\") print(\"Account's ending value", "if no trades were made: \"+str(end_amt_no_trades)+\" BTC\") print(\"Account's ending value", "list(reversed(historic_data)) earliest_time = float(historic_data[0][0]) latest_time = float(historic_data[-1][0]) start_price = float(historic_data[0][4])", "required to create a strategy is a file with a", "as plt from exchange import cb_exchange as cb_exchange from exchange", "the oldest entry historic_data = list(reversed(historic_data)) earliest_time = float(historic_data[0][0]) latest_time", "name\", interval=5): \"\"\"Constructor for an abstract strategy. You can modify", "section of trade data with time length equal to the", "numpy from datetime import date, datetime, timedelta from matplotlib import", "abstractmethod class strategy(object): \"\"\"`strategy` defines an abstract base strategy class.", "trade(self, timeslice): \"\"\"Perform operations on a timeslice. \\n`timeslice`: a section", "= data.index(timeslice) price_list.append(closing_price) weights.append(volume) fltprices = [float(i) for i in", "#print(\"num_above: \"+str(num_above)) #print(\"num_at: \"+str(num_at)) #print(\"weighted_average: \"+str(weighted_avg)) #plt.title(\"Price distribution\") #plt.xlabel(\"Price (USD)\")", "def __init__(name=\"default name\", interval=5): \"\"\"Constructor for an abstract strategy. You", "[] weights = [] if data is None: pass min_price", "functions like calculate_historic_data. \"\"\" __metaclass__ = ABCMeta def __init__(name=\"default name\",", "\\n\\t\\t... \\n] \"\"\" price_list = [] weights = [] if", "strarts with the oldest entry historic_data = list(reversed(historic_data)) earliest_time =", "a strategy is a file with a class which inherits", "weights=np_volume_per_price) num_above = 0 num_below = 0 num_at = 0", "discrete[str(closing_price)] += volume idx = data.index(timeslice) price_list.append(closing_price) weights.append(volume) fltprices =", "timeslice in historic_data: # Display what percent through the data", "volume] \"\"\" return def backtest_strategy(self, historic_data): \"\"\"Returns performance of a", "discrete_prices[str(closing_price)] = volume else: discrete[str(closing_price)] += volume idx = data.index(timeslice)", "\\n] \"\"\" price_list = [] weights = [] if data", "as follows: \\n[time, low, high, open, close, volume] \"\"\" return", "fltprices = [float(i) for i in discrete_prices.keys()] fltvolumes = [float(i)", "% worse than market.\") return strategy_performance_vs_market, strategy_performance, market_performance @staticmethod def", "for i in timeslice] if max_price < timeslice[2]: max_prie =", "point, called a pivot.\\n \\npivot: the price used for returning", "min_price = float(data[0][1]) max_price = float(data[0][2]) discrete_prices = {} for", "in timeslice] if max_price < timeslice[2]: max_prie = timeslice[2] if", "[float(i) for i in timeslice] if max_price < timeslice[2]: max_prie", "strategy_performance_vs_market, strategy_performance, market_performance @staticmethod def calculate_historic_data(data, pivot): \"\"\"Returns average price", "market_performance @staticmethod def calculate_historic_data(data, pivot): \"\"\"Returns average price weighted according", "function. As a bonus, strategy includes utility functions like calculate_historic_data.", "\"+str(self.exchange.times_bought)) print(\"Times sold: \"+str(self.exchange.times_sold)) print(\"The Market's performance: \"+str(market_performance)+\" %\") print(\"Strategy's", "timeslice[1] closing_price = timeslice[4] volume = timeslice[5] if closing_price not", "= volume else: discrete[str(closing_price)] += volume idx = data.index(timeslice) price_list.append(closing_price)", "\\npivot: the price used for returning volume above and below", "each 'tick' default is 5 \\n`name`: a string name for", "\"+str(num_below)) #print(\"num_above: \"+str(num_above)) #print(\"num_at: \"+str(num_at)) #print(\"weighted_average: \"+str(weighted_avg)) #plt.title(\"Price distribution\") #plt.xlabel(\"Price", "a section of trade data with time length equal to", "volume in fltvolumes: total_volume+=volume fltprops = [] for volume in", "strategy's interval, formatted as follows: \\n[time, low, high, open, close,", "idx = historic_data.index(timeslice) percent = (float(idx)/float(len(historic_data)))*100 + 1 sys.stdout.write(\"\\r%d%%\" %", "with this strategy: \"+str(end_amt)+\" BTC\") strategy_performance_vs_market = strategy_performance - market_performance", "discrete_prices.values()] np_discrete_prices = numpy.array(fltprices) np_volume_per_price = numpy.array(fltvolumes) weighted_avg = numpy.average(np_discrete_prices,", "= ((end_price-start_price)/start_price)*100 print(\"Running simulation on historic data. This may take", "= [] for volume in fltvolumes: fltprops.append((volume/total_volume)) #print(\"num_below: \"+str(num_below)) #print(\"num_above:", "sys.stdout.flush() self.trade(timeslice) # Calculate performance end_amt_no_trades = (float(self.exchange.start_usd)/float(end_price)) + float(self.exchange.start_btc)", "[] for volume in fltvolumes: fltprops.append((volume/total_volume)) #print(\"num_below: \"+str(num_below)) #print(\"num_above: \"+str(num_above))", "a.k.a timeslice the amount of time in seconds for each", "pivot: num_below+=value elif float(key) == pivot: num_at+=value total_volume = 0.0", "= timeslice[5] if closing_price not in discrete_prices.keys(): discrete_prices[str(closing_price)] = volume", "percent = (float(idx)/float(len(historic_data)))*100 + 1 sys.stdout.write(\"\\r%d%%\" % percent) sys.stdout.flush() self.trade(timeslice)", "price used for returning volume above and below \\ndata: a", "closing_price = timeslice[4] volume = timeslice[5] if closing_price not in", "== pivot: num_at+=value total_volume = 0.0 for volume in fltvolumes:", "value if no trades were made: \"+str(end_amt_no_trades)+\" BTC\") print(\"Account's ending", "\"0.35\", \"4.2\", \"12.3\"], \\n\\t\\t... \\n] \"\"\" price_list = [] weights", "= 0 @abstractmethod def trade(self, timeslice): \"\"\"Perform operations on a", "strategy_performance = ((end_amt-start_amt)/start_amt)*100 print(\"\\n\") print(\"Times recalculated: \"+str(self.times_recalculated)) print(\"Times bought: \"+str(self.exchange.times_bought))", "in discrete_prices.keys()] fltvolumes = [float(i) for i in discrete_prices.values()] np_discrete_prices", "__metaclass__ = ABCMeta def __init__(name=\"default name\", interval=5): \"\"\"Constructor for an", "average price weighted according to volume, and the number of", "print(\"Times recalculated: \"+str(self.times_recalculated)) print(\"Times bought: \"+str(self.exchange.times_bought)) print(\"Times sold: \"+str(self.exchange.times_sold)) print(\"The", "\"12.3\"], \\n\\t\\t... \\n] \"\"\" price_list = [] weights = []", "sys.stdout.write(\"\\r%d%%\" % percent) sys.stdout.flush() self.trade(timeslice) # Calculate performance end_amt_no_trades =", "for timeslice in data: timeslice = [float(i) for i in", "%\") print(\"Strategy's performance: \"+str(strategy_performance)+\" %\") print(\"Account's ending value if no", "ABCMeta, abstractmethod class strategy(object): \"\"\"`strategy` defines an abstract base strategy", "backtest_strategy(self, historic_data): \"\"\"Returns performance of a strategy vs market performance.", "what percent through the data we are idx = historic_data.index(timeslice)", "\"\"\"Returns performance of a strategy vs market performance. \"\"\" #", "'tick' default is 5 \\n`name`: a string name for the", "for the strategy \"\"\" self.name = name self.interval = interval", "close, volume] \"\"\" return def backtest_strategy(self, historic_data): \"\"\"Returns performance of", "import time import numpy from datetime import date, datetime, timedelta", "timeslice[5] if closing_price not in discrete_prices.keys(): discrete_prices[str(closing_price)] = volume else:", "= strategy_performance - market_performance if strategy_performance > market_performance: print(\"Congratulations! This", "float(data[0][1]) max_price = float(data[0][2]) discrete_prices = {} for timeslice in", "high, open, close] \\n[ \\n\\t[\"2014-11-07 22:19:28.578544+00\", \"0.32\", \"4.2\", \"0.35\", \"4.2\",", "print(\"Congratulations! This strategy has beat the market by: \"+str(strategy_performance_vs_market)+\" %\")", "start_price = float(historic_data[0][4]) end_price = float(historic_data[-1][4]) market_performance = ((end_price-start_price)/start_price)*100 print(\"Running", "= (float(idx)/float(len(historic_data)))*100 + 1 sys.stdout.write(\"\\r%d%%\" % percent) sys.stdout.flush() self.trade(timeslice) #", "num_below+=value elif float(key) == pivot: num_at+=value total_volume = 0.0 for", "\"\"\" # Reverse the data since Coinbase returns it in", "strategy_performance_vs_market = strategy_performance - market_performance if strategy_performance > market_performance: print(\"Congratulations!", "0 num_at = 0 for key in discrete_prices.keys(): value =", "historic data. This may take some time....\") for timeslice in", "print(\"Account's ending value with this strategy: \"+str(end_amt)+\" BTC\") strategy_performance_vs_market =", "date, datetime, timedelta from matplotlib import pyplot as plt from", "equal to the strategy's interval, formatted as follows: \\n[time, low,", "numpy.array(fltprices) np_volume_per_price = numpy.array(fltvolumes) weighted_avg = numpy.average(np_discrete_prices, weights=np_volume_per_price) num_above =", "@abstractmethod def trade(self, timeslice): \"\"\"Perform operations on a timeslice. \\n`timeslice`:", "key in discrete_prices.keys(): value = discrete_prices[key] if float(key) > pivot:", "\"+str(strategy_performance)+\" %\") print(\"Account's ending value if no trades were made:", "float(historic_data[-1][4]) market_performance = ((end_price-start_price)/start_price)*100 print(\"Running simulation on historic data. This", "modify it as needed. \\n`interval`: a.k.a timeslice the amount of", "= [float(i) for i in discrete_prices.keys()] fltvolumes = [float(i) for", "0 for key in discrete_prices.keys(): value = discrete_prices[key] if float(key)", "weighted_avg = numpy.average(np_discrete_prices, weights=np_volume_per_price) num_above = 0 num_below = 0", "\"0.32\", \"4.2\", \"0.35\", \"4.2\", \"12.3\"], \\n\\t\\t... \\n] \"\"\" price_list =", "according to volume, and the number of bitcoins traded above", "\"\"\" price_list = [] weights = [] if data is", "strategy_performance > market_performance: print(\"Congratulations! This strategy has beat the market", "trade data with time length equal to the strategy's interval,", "Minimum required to create a strategy is a file with", "timeslice[2] if min_price > timeslice[1]: min_price = timeslice[1] closing_price =", "performance: \"+str(strategy_performance)+\" %\") print(\"Account's ending value if no trades were", "a bonus, strategy includes utility functions like calculate_historic_data. \"\"\" __metaclass__", "the strategy \"\"\" self.name = name self.interval = interval self.times_recalculated", "strategy_performance < market_performance: print(\"This strategy has preformed: \"+str(strategy_performance_vs_market)+\" % worse", "((end_price-start_price)/start_price)*100 print(\"Running simulation on historic data. This may take some", "price point, called a pivot.\\n \\npivot: the price used for", "start_amt = (float(self.exchange.start_usd)/float(start_price)) + float(self.exchange.start_btc) strategy_performance = ((end_amt-start_amt)/start_amt)*100 print(\"\\n\") print(\"Times", "not in discrete_prices.keys(): discrete_prices[str(closing_price)] = volume else: discrete[str(closing_price)] += volume", "As a bonus, strategy includes utility functions like calculate_historic_data. \"\"\"", "for each 'tick' default is 5 \\n`name`: a string name", "performance. \"\"\" # Reverse the data since Coinbase returns it", "base strategy class. Minimum required to create a strategy is", "(float(idx)/float(len(historic_data)))*100 + 1 sys.stdout.write(\"\\r%d%%\" % percent) sys.stdout.flush() self.trade(timeslice) # Calculate", "market_performance: print(\"Congratulations! This strategy has beat the market by: \"+str(strategy_performance_vs_market)+\"", "below a price point, called a pivot.\\n \\npivot: the price", "1 sys.stdout.write(\"\\r%d%%\" % percent) sys.stdout.flush() self.trade(timeslice) # Calculate performance end_amt_no_trades", "elif float(key) == pivot: num_at+=value total_volume = 0.0 for volume", "5 \\n`name`: a string name for the strategy \"\"\" self.name", "= 0 num_below = 0 num_at = 0 for key", "of a strategy vs market performance. \"\"\" # Reverse the", "historic_data: # Display what percent through the data we are", "ending value with this strategy: \"+str(end_amt)+\" BTC\") strategy_performance_vs_market = strategy_performance", "in discrete_prices.keys(): discrete_prices[str(closing_price)] = volume else: discrete[str(closing_price)] += volume idx", "import pytz #import xml.utils.iso8601 import time import numpy from datetime", "\"+str(num_at)) #print(\"weighted_average: \"+str(weighted_avg)) #plt.title(\"Price distribution\") #plt.xlabel(\"Price (USD)\") #plt.ylabel(\"Volume\") #plt.bar(fltprices, fltprops)", "reverse chronological # now historic_data strarts with the oldest entry", "< market_performance: print(\"This strategy has preformed: \"+str(strategy_performance_vs_market)+\" % worse than", "interval self.times_recalculated = 0 @abstractmethod def trade(self, timeslice): \"\"\"Perform operations", "with time length equal to the strategy's interval, formatted as", "> timeslice[1]: min_price = timeslice[1] closing_price = timeslice[4] volume =", "\"+str(end_amt_no_trades)+\" BTC\") print(\"Account's ending value with this strategy: \"+str(end_amt)+\" BTC\")", "if data is None: pass min_price = float(data[0][1]) max_price =", "import date, datetime, timedelta from matplotlib import pyplot as plt", "to create a strategy is a file with a class", "for volume in fltvolumes: total_volume+=volume fltprops = [] for volume", "float(key) > pivot: num_above+=value elif float(key) < pivot: num_below+=value elif", "from strategy containing a backtest_strategy function. As a bonus, strategy", "return strategy_performance_vs_market, strategy_performance, market_performance @staticmethod def calculate_historic_data(data, pivot): \"\"\"Returns average", "= interval self.times_recalculated = 0 @abstractmethod def trade(self, timeslice): \"\"\"Perform", "data with time length equal to the strategy's interval, formatted", "= [float(i) for i in discrete_prices.values()] np_discrete_prices = numpy.array(fltprices) np_volume_per_price", "from exchange import CoinbaseExchangeAuth from abc import ABCMeta, abstractmethod class", "entry historic_data = list(reversed(historic_data)) earliest_time = float(historic_data[0][0]) latest_time = float(historic_data[-1][0])", "a timeslice. \\n`timeslice`: a section of trade data with time", "than market.\") return strategy_performance_vs_market, strategy_performance, market_performance @staticmethod def calculate_historic_data(data, pivot):", "formated as follows [time, low, high, open, close] \\n[ \\n\\t[\"2014-11-07", "np_volume_per_price = numpy.array(fltvolumes) weighted_avg = numpy.average(np_discrete_prices, weights=np_volume_per_price) num_above = 0", "discrete_prices.keys(): value = discrete_prices[key] if float(key) > pivot: num_above+=value elif", "since Coinbase returns it in reverse chronological # now historic_data", "CoinbaseExchangeAuth from abc import ABCMeta, abstractmethod class strategy(object): \"\"\"`strategy` defines", "for i in discrete_prices.values()] np_discrete_prices = numpy.array(fltprices) np_volume_per_price = numpy.array(fltvolumes)", "are idx = historic_data.index(timeslice) percent = (float(idx)/float(len(historic_data)))*100 + 1 sys.stdout.write(\"\\r%d%%\"", "time in seconds for each 'tick' default is 5 \\n`name`:", "this strategy: \"+str(end_amt)+\" BTC\") strategy_performance_vs_market = strategy_performance - market_performance if", "strategy: \"+str(end_amt)+\" BTC\") strategy_performance_vs_market = strategy_performance - market_performance if strategy_performance", "earliest_time = float(historic_data[0][0]) latest_time = float(historic_data[-1][0]) start_price = float(historic_data[0][4]) end_price", "self.trade(timeslice) # Calculate performance end_amt_no_trades = (float(self.exchange.start_usd)/float(end_price)) + float(self.exchange.start_btc) end_amt", "def backtest_strategy(self, historic_data): \"\"\"Returns performance of a strategy vs market", "discrete_prices.keys(): discrete_prices[str(closing_price)] = volume else: discrete[str(closing_price)] += volume idx =", "= numpy.average(np_discrete_prices, weights=np_volume_per_price) num_above = 0 num_below = 0 num_at", "a price point, called a pivot.\\n \\npivot: the price used", "\\ndata: a list of lists formated as follows [time, low,", "for volume in fltvolumes: fltprops.append((volume/total_volume)) #print(\"num_below: \"+str(num_below)) #print(\"num_above: \"+str(num_above)) #print(\"num_at:", "is None: pass min_price = float(data[0][1]) max_price = float(data[0][2]) discrete_prices", "volume in fltvolumes: fltprops.append((volume/total_volume)) #print(\"num_below: \"+str(num_below)) #print(\"num_above: \"+str(num_above)) #print(\"num_at: \"+str(num_at))", "print(\"Strategy's performance: \"+str(strategy_performance)+\" %\") print(\"Account's ending value if no trades", "is 5 \\n`name`: a string name for the strategy \"\"\"", "used for returning volume above and below \\ndata: a list", "is a file with a class which inherits from strategy", "\\n[ \\n\\t[\"2014-11-07 22:19:28.578544+00\", \"0.32\", \"4.2\", \"0.35\", \"4.2\", \"12.3\"], \\n\\t\\t... \\n]", "the number of bitcoins traded above and below a price", "sys import pytz #import xml.utils.iso8601 import time import numpy from", "timeslice[1]: min_price = timeslice[1] closing_price = timeslice[4] volume = timeslice[5]", "if max_price < timeslice[2]: max_prie = timeslice[2] if min_price >", "abc import ABCMeta, abstractmethod class strategy(object): \"\"\"`strategy` defines an abstract", "= timeslice[1] closing_price = timeslice[4] volume = timeslice[5] if closing_price", "the market by: \"+str(strategy_performance_vs_market)+\" %\") elif strategy_performance < market_performance: print(\"This", "#plt.title(\"Price distribution\") #plt.xlabel(\"Price (USD)\") #plt.ylabel(\"Volume\") #plt.bar(fltprices, fltprops) #plt.show() return weighted_avg,", "interval=5): \"\"\"Constructor for an abstract strategy. You can modify it", "of time in seconds for each 'tick' default is 5", "volume = timeslice[5] if closing_price not in discrete_prices.keys(): discrete_prices[str(closing_price)] =", "market_performance = ((end_price-start_price)/start_price)*100 print(\"Running simulation on historic data. This may", "ending value if no trades were made: \"+str(end_amt_no_trades)+\" BTC\") print(\"Account's", "Display what percent through the data we are idx =", "volume, and the number of bitcoins traded above and below", "price weighted according to volume, and the number of bitcoins", "lists formated as follows [time, low, high, open, close] \\n[", "it as needed. \\n`interval`: a.k.a timeslice the amount of time", "strategy has beat the market by: \"+str(strategy_performance_vs_market)+\" %\") elif strategy_performance", "we are idx = historic_data.index(timeslice) percent = (float(idx)/float(len(historic_data)))*100 + 1", "for timeslice in historic_data: # Display what percent through the", "backtest_strategy function. As a bonus, strategy includes utility functions like", "performance of a strategy vs market performance. \"\"\" # Reverse", "a pivot.\\n \\npivot: the price used for returning volume above", "oldest entry historic_data = list(reversed(historic_data)) earliest_time = float(historic_data[0][0]) latest_time =", "= [] if data is None: pass min_price = float(data[0][1])", "self.name = name self.interval = interval self.times_recalculated = 0 @abstractmethod", "min_price = timeslice[1] closing_price = timeslice[4] volume = timeslice[5] if", "discrete_prices.keys()] fltvolumes = [float(i) for i in discrete_prices.values()] np_discrete_prices =", "class. Minimum required to create a strategy is a file", "a class which inherits from strategy containing a backtest_strategy function.", "in reverse chronological # now historic_data strarts with the oldest", "the amount of time in seconds for each 'tick' default", "of trade data with time length equal to the strategy's", "market by: \"+str(strategy_performance_vs_market)+\" %\") elif strategy_performance < market_performance: print(\"This strategy", "print(\"This strategy has preformed: \"+str(strategy_performance_vs_market)+\" % worse than market.\") return", "timeslice): \"\"\"Perform operations on a timeslice. \\n`timeslice`: a section of", "= numpy.array(fltvolumes) weighted_avg = numpy.average(np_discrete_prices, weights=np_volume_per_price) num_above = 0 num_below", "weights = [] if data is None: pass min_price =", "above and below a price point, called a pivot.\\n \\npivot:", "if closing_price not in discrete_prices.keys(): discrete_prices[str(closing_price)] = volume else: discrete[str(closing_price)]", "= float(historic_data[-1][0]) start_price = float(historic_data[0][4]) end_price = float(historic_data[-1][4]) market_performance =", "= timeslice[4] volume = timeslice[5] if closing_price not in discrete_prices.keys():", "xml.utils.iso8601 import time import numpy from datetime import date, datetime,", "fltvolumes = [float(i) for i in discrete_prices.values()] np_discrete_prices = numpy.array(fltprices)", "from datetime import date, datetime, timedelta from matplotlib import pyplot", "numpy.array(fltvolumes) weighted_avg = numpy.average(np_discrete_prices, weights=np_volume_per_price) num_above = 0 num_below =" ]
[ "0 else 0 def linear_model(inputs, weights): return np.dot(inputs, weights) return", "np # Perceptron def predict_perceptron(inputs, weights): if np.dot(inputs, weights) >", "predict_perceptron_proper(inputs, weights): def step_function(input): return 1 if input > 0", "def step_function(input): return 1 if input > 0 else 0", "if np.dot(inputs, weights) > 0: return 1 else: return 0", "import numpy as np # Perceptron def predict_perceptron(inputs, weights): if", "weights): def step_function(input): return 1 if input > 0 else", "1 if input > 0 else 0 def linear_model(inputs, weights):", "def predict_perceptron_proper(inputs, weights): def step_function(input): return 1 if input >", "def linear_model(inputs, weights): return np.dot(inputs, weights) return sigmoid_function(linear_model(inputs, weights)) neural_network", "np.dot(inputs, weights) return sigmoid_function(linear_model(inputs, weights)) neural_network = neuron(neuron(inputs, weights1), weights2)", "linear_model(inputs, weights): return np.dot(inputs, weights) return step_function(linear_model(inputs, weights)) def neuron(inputs,", "numpy as np # Perceptron def predict_perceptron(inputs, weights): if np.dot(inputs,", "input)) def linear_model(inputs, weights): return np.dot(inputs, weights) return sigmoid_function(linear_model(inputs, weights))", "if input > 0 else 0 def linear_model(inputs, weights): return", "0 def linear_model(inputs, weights): return np.dot(inputs, weights) return step_function(linear_model(inputs, weights))", "step_function(input): return 1 if input > 0 else 0 def", "<filename>neural-networks.py import numpy as np # Perceptron def predict_perceptron(inputs, weights):", "return np.dot(inputs, weights) return step_function(linear_model(inputs, weights)) def neuron(inputs, weights): def", "def sigmoid_function(input): return 1 / (1 + np.exp(-1 * input))", "return np.dot(inputs, weights) return sigmoid_function(linear_model(inputs, weights)) neural_network = neuron(neuron(inputs, weights1),", "> 0: return 1 else: return 0 def predict_perceptron_proper(inputs, weights):", "0: return 1 else: return 0 def predict_perceptron_proper(inputs, weights): def", "return 1 if input > 0 else 0 def linear_model(inputs,", "def linear_model(inputs, weights): return np.dot(inputs, weights) return step_function(linear_model(inputs, weights)) def", "return 1 / (1 + np.exp(-1 * input)) def linear_model(inputs,", "# Perceptron def predict_perceptron(inputs, weights): if np.dot(inputs, weights) > 0:", "> 0 else 0 def linear_model(inputs, weights): return np.dot(inputs, weights)", "weights)) def neuron(inputs, weights): def sigmoid_function(input): return 1 / (1", "return 0 def predict_perceptron_proper(inputs, weights): def step_function(input): return 1 if", "as np # Perceptron def predict_perceptron(inputs, weights): if np.dot(inputs, weights)", "predict_perceptron(inputs, weights): if np.dot(inputs, weights) > 0: return 1 else:", "neuron(inputs, weights): def sigmoid_function(input): return 1 / (1 + np.exp(-1", "* input)) def linear_model(inputs, weights): return np.dot(inputs, weights) return sigmoid_function(linear_model(inputs,", "weights): return np.dot(inputs, weights) return sigmoid_function(linear_model(inputs, weights)) neural_network = neuron(neuron(inputs,", "weights): return np.dot(inputs, weights) return step_function(linear_model(inputs, weights)) def neuron(inputs, weights):", "0 def predict_perceptron_proper(inputs, weights): def step_function(input): return 1 if input", "+ np.exp(-1 * input)) def linear_model(inputs, weights): return np.dot(inputs, weights)", "else 0 def linear_model(inputs, weights): return np.dot(inputs, weights) return step_function(linear_model(inputs,", "(1 + np.exp(-1 * input)) def linear_model(inputs, weights): return np.dot(inputs,", "weights) return step_function(linear_model(inputs, weights)) def neuron(inputs, weights): def sigmoid_function(input): return", "np.dot(inputs, weights) > 0: return 1 else: return 0 def", "return step_function(linear_model(inputs, weights)) def neuron(inputs, weights): def sigmoid_function(input): return 1", "sigmoid_function(input): return 1 / (1 + np.exp(-1 * input)) def", "linear_model(inputs, weights): return np.dot(inputs, weights) return sigmoid_function(linear_model(inputs, weights)) neural_network =", "else: return 0 def predict_perceptron_proper(inputs, weights): def step_function(input): return 1", "weights): def sigmoid_function(input): return 1 / (1 + np.exp(-1 *", "weights): if np.dot(inputs, weights) > 0: return 1 else: return", "Perceptron def predict_perceptron(inputs, weights): if np.dot(inputs, weights) > 0: return", "weights) > 0: return 1 else: return 0 def predict_perceptron_proper(inputs,", "step_function(linear_model(inputs, weights)) def neuron(inputs, weights): def sigmoid_function(input): return 1 /", "np.exp(-1 * input)) def linear_model(inputs, weights): return np.dot(inputs, weights) return", "def predict_perceptron(inputs, weights): if np.dot(inputs, weights) > 0: return 1", "np.dot(inputs, weights) return step_function(linear_model(inputs, weights)) def neuron(inputs, weights): def sigmoid_function(input):", "1 / (1 + np.exp(-1 * input)) def linear_model(inputs, weights):", "return 1 else: return 0 def predict_perceptron_proper(inputs, weights): def step_function(input):", "1 else: return 0 def predict_perceptron_proper(inputs, weights): def step_function(input): return", "input > 0 else 0 def linear_model(inputs, weights): return np.dot(inputs,", "def neuron(inputs, weights): def sigmoid_function(input): return 1 / (1 +", "/ (1 + np.exp(-1 * input)) def linear_model(inputs, weights): return" ]
[ "z_, y_, fixed_z, fixed_y, state_dict, config, experiment_name) writer.add_image('samples', sample_sheet, 0)", "pre-trained G if necessary download_G() config[\"G_path\"] = 'checkpoints/138k' G, state_dict,", "main(): # parse command line and run parser = utils.prepare_parser()", "optim writer = SummaryWriter('%s/%s' % (config['logs_root'], experiment_name)) sample_sheet = train_fns.save_and_sample(G.module.G,", "torch.nn as nn import torch.optim import utils import train_fns from", "direction_indicators = torch.eye(config['ndirs']).to(device) G.eval() G.module.optim = optim writer = SummaryWriter('%s/%s'", "= torch.zeros((G_batch_size, config['ndirs'])) # equal to the one-hot w penalty", "cur_training_iter) writer.add_scalar('Metrics/direction_norm', A.pow(2).mean().pow(0.5).item(), cur_training_iter) # Save directions and log visuals:", "mostly track G iterations. iters_per_epoch = 1000 dummy_loader = [None]", "writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], 0, fps=24) for epoch in range(state_dict['epoch'],", "interp_z, interp_y = utils.prepare_z_y(config[\"n_samples\"], G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) interp_z.sample_() interp_y.sample_()", "as nn import torch.optim import utils import train_fns from sync_batchnorm", "Save directions and log visuals: if not (state_dict['itr'] % config['save_every']):", "if config['wandb_entity'] is not None: init_wandb(config, config['experiment_name'], config['wandb_entity'], 'imagenet') if", "# If search_space != 'all', then we need to pad", "print('Initializing with random directions') A = torch.nn.Parameter(torch.empty(config['ndirs'], direction_size, device=device), requires_grad=True)", "config['fix_class'] is None: y_.sample_() y = G.module.G.shared(y_) # OroJaR taken", "visualizations...\") interp_vis = visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size, Q=Q, high_quality=False, npv=1)", "state_dict['itr'] += 1 z_.sample_() if config['fix_class'] is None: y_.sample_() y", "A for visualizations and the next training iteration: Q =", "Q=Q, high_quality=False, npv=1) for w_ix in range(config['ndirs']): writer.add_video('G_ema/w%03d' % w_ix,", "to pad the z components that we are leaving alone:", "# Log metrics to TensorBoard/WandB: cur_training_iter = epoch * iters_per_epoch", "of directions we want to learn if config['load_A'] == 'coords':", "G is left frozen during training: optim = torch.optim.Adam(params=[A], lr=config['A_lr'])", "config['path_size'] # Simply stores a |z|-dimensional one-hot vector indicating each", "writer.add_image('samples', sample_sheet, 0) interp_y_ = G.module.G.shared(interp_y) norm_fn = norm #", "z: w = torch.zeros((G_batch_size, config['ndirs'])) # equal to the one-hot", "for epoch in range(state_dict['epoch'], config['num_epochs']): if config['pbar'] == 'mine': pbar", "interp_vis[w_ix], cur_training_iter, fps=24) state_dict['epoch'] += 1 def main(): # parse", "to see individual sample evolution throghout training fixed_z, fixed_y =", "high_quality=False, npv=1) for w_ix in range(config['ndirs']): writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix],", "import utils import train_fns from sync_batchnorm import patch_replication_callback from torch.utils.tensorboard", "standard basis directions') A = torch.nn.Parameter(torch.eye(config['ndirs'], direction_size, device=device), requires_grad=True) elif", "writer.add_scalar(f'Metrics/orojar', penalty.item(), cur_training_iter) writer.add_scalar('Metrics/direction_norm', A.pow(2).mean().pow(0.5).item(), cur_training_iter) # Save directions and", "Q = pad(norm_fn(fast_gram_schmidt(A))) if not config[\"no_ortho\"] else pad(A) # Log", "this training run. def run(config): if config['wandb_entity'] is not None:", "fps=24) for epoch in range(state_dict['epoch'], config['num_epochs']): if config['pbar'] == 'mine':", "self).__init__() self.G = G def forward(self, z, y, w, Q):", "\"\"\" def __init__(self, G): super(DataParallelLoss, self).__init__() self.G = G def", "a normalization: Q = pad(norm_fn(fast_gram_schmidt(A))) if not config[\"no_ortho\"] else pad(A)", "ndirs indicates the number of directions we want to learn", "penalty.backward() optim.step() # re-orthogonalize A for visualizations and the next", "import torch import torch.nn as nn import torch.optim import utils", "def run(config): if config['wandb_entity'] is not None: init_wandb(config, config['experiment_name'], config['wandb_entity'],", "G.module.G, z_, y_, fixed_z, fixed_y, state_dict, config, experiment_name) writer.add_image('samples', sample_sheet,", "# We only learn A; G is left frozen during", "= SummaryWriter('%s/%s' % (config['logs_root'], experiment_name)) sample_sheet = train_fns.save_and_sample(G.module.G, None, G.module.G,", "the next training iteration: Q = pad(norm_fn(fast_gram_schmidt(A))) if not config[\"no_ortho\"]", "'all', then we need to pad the z components that", "get_direction_padding_fn, init_wandb, download_G from layers import fast_gram_schmidt, norm class DataParallelLoss(nn.Module):", "w_sampled, NOT z: w = torch.zeros((G_batch_size, config['ndirs'])) # equal to", "Config is a dictionary specifying the configuration # of this", "= torch.optim.Adam(params=[A], lr=config['A_lr']) # Allow for different batch sizes in", "directions using a pre-trained BigGAN Generator. Modified from train.py in", "pad(A) # Log metrics to TensorBoard/WandB: cur_training_iter = epoch *", "if config['cross_replica']: patch_replication_callback(G) num_gpus = torch.cuda.device_count() print(f'Using {num_gpus} GPUs') #", "is a dictionary specifying the configuration # of this training", "run(config): if config['wandb_entity'] is not None: init_wandb(config, config['experiment_name'], config['wandb_entity'], 'imagenet')", "config['load_A'] == 'random': print('Initializing with random directions') A = torch.nn.Parameter(torch.empty(config['ndirs'],", "= utils.prepare_parser() config = vars(parser.parse_args()) print(config) run(config) if __name__ ==", "if not config[\"no_ortho\"] else pad(A) if config[\"vis_during_training\"]: print(\"Generating initial visualizations...\")", "# Prepare a fixed z & y to see individual", "cur_training_iter, fps=24) state_dict['epoch'] += 1 def main(): # parse command", "G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) # Prepare a fixed z &", "import train_fns from sync_batchnorm import patch_replication_callback from torch.utils.tensorboard import SummaryWriter", "Generator. Modified from train.py in the PyTorch BigGAN repo. \"\"\"", "matrix of directions, where ndirs indicates the number of directions", "fixed_z, fixed_y, state_dict, config, experiment_name) writer.add_image('samples', sample_sheet, 0) interp_y_ =", "1 z_.sample_() if config['fix_class'] is None: y_.sample_() y = G.module.G.shared(y_)", "fixed z & y to see individual sample evolution throghout", "norm_fn = norm # Make directions orthonormal via Gram Schmidt", "# of this training run. def run(config): if config['wandb_entity'] is", "= utils.progress(dummy_loader, displaytype='s1k' if config['use_multiepoch_sampler'] else 'eta') else: pbar =", "'mine': pbar = utils.progress(dummy_loader, displaytype='s1k' if config['use_multiepoch_sampler'] else 'eta') else:", "from train.py in the PyTorch BigGAN repo. \"\"\" import os", "fixed_y.new_full(fixed_y.size(), config['fix_class']) interp_y = interp_y.new_full(interp_y.size(), config['fix_class']) print('Beginning training at epoch", "w_ix, interp_vis[w_ix], 0, fps=24) for epoch in range(state_dict['epoch'], config['num_epochs']): if", "torch.zeros((G_batch_size, config['ndirs'])) # equal to the one-hot w penalty =", "w = torch.zeros((G_batch_size, config['ndirs'])) # equal to the one-hot w", "pad(norm_fn(fast_gram_schmidt(A))) if not config[\"no_ortho\"] else pad(A) # Log metrics to", "fixed_y, state_dict, config, experiment_name) writer.add_image('samples', sample_sheet, 0) interp_y_ = G.module.G.shared(interp_y)", "repo. \"\"\" import os from tqdm import tqdm import torch", "initial visualizations...\") interp_vis = visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size, Q=Q, high_quality=False,", "for different batch sizes in G G_batch_size = max(config['G_batch_size'], config['batch_size'])", "GPUs') # If search_space != 'all', then we need to", "Q = pad(norm_fn(fast_gram_schmidt(A))) if not config[\"no_ortho\"] else pad(A) if config[\"vis_during_training\"]:", "interp_z, interp_y_, path_sizes=path_size, Q=Q, high_quality=False, npv=1) for w_ix in range(config['ndirs']):", "else config['ndirs'] # A is our (ndirs, |z|) matrix of", "main training file. Config is a dictionary specifying the configuration", "# Make directions orthonormal via Gram Schmidt followed a normalization:", "torch.nn.Parameter(torch.empty(config['ndirs'], direction_size, device=device), requires_grad=True) torch.nn.init.kaiming_normal_(A) else: raise NotImplementedError # We", "= pad(norm_fn(fast_gram_schmidt(A))) if not config[\"no_ortho\"] else pad(A) if config[\"vis_during_training\"]: print(\"Generating", "= y_.new_full(y_.size(), config['fix_class']) fixed_y = fixed_y.new_full(fixed_y.size(), config['fix_class']) interp_y = interp_y.new_full(interp_y.size(),", "= G.module.G.shared(interp_y) norm_fn = norm # Make directions orthonormal via", "None: # Download a pre-trained G if necessary download_G() config[\"G_path\"]", "is None: # Download a pre-trained G if necessary download_G()", "experiment_name) writer.add_image('samples', sample_sheet, 0) interp_y_ = G.module.G.shared(interp_y) norm_fn = norm", "of directions, where ndirs indicates the number of directions we", "Q): penalty = orojar(self.G, z, c=y, w=w, G_z=None, Q=Q, multiple_layers=False)", "log visuals: if not (state_dict['itr'] % config['save_every']): torch.save(A.cpu().detach(), '%s/%s/A_%06d.pt' %", "w_ix, interp_vis[w_ix], cur_training_iter, fps=24) state_dict['epoch'] += 1 def main(): #", "epoch * iters_per_epoch + i writer.add_scalar(f'Metrics/orojar', penalty.item(), cur_training_iter) writer.add_scalar('Metrics/direction_norm', A.pow(2).mean().pow(0.5).item(),", "y = G.module.G.shared(y_) # OroJaR taken w.r.t. w_sampled, NOT z:", "directions and log visuals: if not (state_dict['itr'] % config['save_every']): torch.save(A.cpu().detach(),", "w, Q): penalty = orojar(self.G, z, c=y, w=w, G_z=None, Q=Q,", "pbar = tqdm(dummy_loader) for i, _ in enumerate(pbar): state_dict['itr'] +=", "if config[\"vis_during_training\"]: interp_vis = visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size, Q=Q, high_quality=False,", "config['experiment_name'], config['wandb_entity'], 'imagenet') if config[\"G_path\"] is None: # Download a", "epochs, although we mostly track G iterations. iters_per_epoch = 1000", "norm # Make directions orthonormal via Gram Schmidt followed a", "frozen during training: optim = torch.optim.Adam(params=[A], lr=config['A_lr']) # Allow for", "and run parser = utils.prepare_parser() config = vars(parser.parse_args()) print(config) run(config)", "simply a wrapper class to compute the OroJaR efficiently over", "tqdm import tqdm import torch import torch.nn as nn import", "config['fix_class']) fixed_y = fixed_y.new_full(fixed_y.size(), config['fix_class']) interp_y = interp_y.new_full(interp_y.size(), config['fix_class']) print('Beginning", "direction_size, device=device), requires_grad=True) elif config['load_A'] == 'random': print('Initializing with random", "training fixed_z, fixed_y = utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) fixed_z.sample_()", "direction_size = config['dim_z'] if config['search_space'] == 'all' else config['ndirs'] #", "train_fns.save_and_sample(G.module.G, None, G.module.G, z_, y_, fixed_z, fixed_y, state_dict, config, experiment_name)", "orojar(self.G, z, c=y, w=w, G_z=None, Q=Q, multiple_layers=False) return penalty #", "if config['fix_class'] is None: y_.sample_() y = G.module.G.shared(y_) # OroJaR", "Log metrics to TensorBoard/WandB: cur_training_iter = epoch * iters_per_epoch +", "random directions') A = torch.nn.Parameter(torch.empty(config['ndirs'], direction_size, device=device), requires_grad=True) torch.nn.init.kaiming_normal_(A) else:", "device=device, fp16=config['G_fp16']) # Prepare a fixed z & y to", "search_space != 'all', then we need to pad the z", "torch import torch.nn as nn import torch.optim import utils import", "config['search_space'] == 'all' else config['ndirs'] # A is our (ndirs,", "specified number of epochs, although we mostly track G iterations.", "from layers import fast_gram_schmidt, norm class DataParallelLoss(nn.Module): \"\"\" This is", "A = torch.nn.Parameter(torch.empty(config['ndirs'], direction_size, device=device), requires_grad=True) torch.nn.init.kaiming_normal_(A) else: raise NotImplementedError", "G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) interp_z.sample_() interp_y.sample_() if config['fix_class'] is not", "Gram Schmidt followed a normalization: Q = pad(norm_fn(fast_gram_schmidt(A))) if not", "orojar import orojar from direction_utils import visualize_directions, load_G, get_direction_padding_fn, init_wandb,", "range(state_dict['epoch'], config['num_epochs']): if config['pbar'] == 'mine': pbar = utils.progress(dummy_loader, displaytype='s1k'", "next training iteration: Q = pad(norm_fn(fast_gram_schmidt(A))) if not config[\"no_ortho\"] else", "fps=24) state_dict['epoch'] += 1 def main(): # parse command line", "sample_sheet, 0) interp_y_ = G.module.G.shared(interp_y) norm_fn = norm # Make", "visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size, Q=Q, high_quality=False, npv=1) for w_ix in", "vector indicating each direction we are learning: direction_indicators = torch.eye(config['ndirs']).to(device)", "print(\"Generating initial visualizations...\") interp_vis = visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size, Q=Q,", "BigGAN repo. \"\"\" import os from tqdm import tqdm import", "= orojar(self.G, z, c=y, w=w, G_z=None, Q=Q, multiple_layers=False) return penalty", "indicates the number of directions we want to learn if", "iterations. iters_per_epoch = 1000 dummy_loader = [None] * iters_per_epoch #", "= optim writer = SummaryWriter('%s/%s' % (config['logs_root'], experiment_name)) sample_sheet =", "a pre-trained G if necessary download_G() config[\"G_path\"] = 'checkpoints/138k' G,", "w=w, Q=Q.repeat(num_gpus, 1)).mean() optim.zero_grad() penalty.backward() optim.step() # re-orthogonalize A for", "a fixed z & y to see individual sample evolution", "iters_per_epoch + i writer.add_scalar(f'Metrics/orojar', penalty.item(), cur_training_iter) writer.add_scalar('Metrics/direction_norm', A.pow(2).mean().pow(0.5).item(), cur_training_iter) #", "* iters_per_epoch # We don't need any real data path_size", "utils.prepare_parser() config = vars(parser.parse_args()) print(config) run(config) if __name__ == '__main__':", "each direction we are learning: direction_indicators = torch.eye(config['ndirs']).to(device) G.eval() G.module.optim", "PyTorch BigGAN repo. \"\"\" import os from tqdm import tqdm", "= visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size, Q=Q, high_quality=False, npv=1) for w_ix", "if config['use_multiepoch_sampler'] else 'eta') else: pbar = tqdm(dummy_loader) for i,", "we want to learn if config['load_A'] == 'coords': print('Initializing with", "fixed_z, fixed_y = utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) fixed_z.sample_() fixed_y.sample_()", "# parse command line and run parser = utils.prepare_parser() config", "download_G() config[\"G_path\"] = 'checkpoints/138k' G, state_dict, device, experiment_name = load_G(config)", "pre-trained BigGAN Generator. Modified from train.py in the PyTorch BigGAN", "train.py in the PyTorch BigGAN repo. \"\"\" import os from", "learning: direction_indicators = torch.eye(config['ndirs']).to(device) G.eval() G.module.optim = optim writer =", "not config[\"no_ortho\"] else pad(A) # Log metrics to TensorBoard/WandB: cur_training_iter", "left frozen during training: optim = torch.optim.Adam(params=[A], lr=config['A_lr']) # Allow", "if config['pbar'] == 'mine': pbar = utils.progress(dummy_loader, displaytype='s1k' if config['use_multiepoch_sampler']", "'checkpoints/138k' G, state_dict, device, experiment_name = load_G(config) # If parallel,", "individual sample evolution throghout training fixed_z, fixed_y = utils.prepare_z_y(G_batch_size, G.module.G.dim_z,", "'all' else config['ndirs'] # A is our (ndirs, |z|) matrix", "torch.utils.tensorboard import SummaryWriter from orojar import orojar from direction_utils import", "Modified from train.py in the PyTorch BigGAN repo. \"\"\" import", "data path_size = config['path_size'] # Simply stores a |z|-dimensional one-hot", "line and run parser = utils.prepare_parser() config = vars(parser.parse_args()) print(config)", "our (ndirs, |z|) matrix of directions, where ndirs indicates the", "config['parallel']: G = nn.DataParallel(DataParallelLoss(G)) if config['cross_replica']: patch_replication_callback(G) num_gpus = torch.cuda.device_count()", "import os from tqdm import tqdm import torch import torch.nn", "directions') A = torch.nn.Parameter(torch.empty(config['ndirs'], direction_size, device=device), requires_grad=True) torch.nn.init.kaiming_normal_(A) else: raise", "+ i writer.add_scalar(f'Metrics/orojar', penalty.item(), cur_training_iter) writer.add_scalar('Metrics/direction_norm', A.pow(2).mean().pow(0.5).item(), cur_training_iter) # Save", "the PyTorch BigGAN repo. \"\"\" import os from tqdm import", "!= 'all', then we need to pad the z components", "to TensorBoard/WandB: cur_training_iter = epoch * iters_per_epoch + i writer.add_scalar(f'Metrics/orojar',", "= 1000 dummy_loader = [None] * iters_per_epoch # We don't", "Q=Q, multiple_layers=False) return penalty # The main training file. Config", "y_ = y_.new_full(y_.size(), config['fix_class']) fixed_y = fixed_y.new_full(fixed_y.size(), config['fix_class']) interp_y =", "want to learn if config['load_A'] == 'coords': print('Initializing with standard", "* iters_per_epoch + i writer.add_scalar(f'Metrics/orojar', penalty.item(), cur_training_iter) writer.add_scalar('Metrics/direction_norm', A.pow(2).mean().pow(0.5).item(), cur_training_iter)", "from direction_utils import visualize_directions, load_G, get_direction_padding_fn, init_wandb, download_G from layers", "# The main training file. Config is a dictionary specifying", "G iterations. iters_per_epoch = 1000 dummy_loader = [None] * iters_per_epoch", "npv=1) for w_ix in range(config['ndirs']): writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], 0,", "None: y_ = y_.new_full(y_.size(), config['fix_class']) fixed_y = fixed_y.new_full(fixed_y.size(), config['fix_class']) interp_y", "1 def main(): # parse command line and run parser", "training at epoch %d...' % state_dict['epoch']) # Train for specified", "load_G, get_direction_padding_fn, init_wandb, download_G from layers import fast_gram_schmidt, norm class", "z_.sample_() if config['fix_class'] is None: y_.sample_() y = G.module.G.shared(y_) #", "cur_training_iter) # Save directions and log visuals: if not (state_dict['itr']", "Download a pre-trained G if necessary download_G() config[\"G_path\"] = 'checkpoints/138k'", "config['fix_class']) print('Beginning training at epoch %d...' % state_dict['epoch']) # Train", "Schmidt followed a normalization: Q = pad(norm_fn(fast_gram_schmidt(A))) if not config[\"no_ortho\"]", "G(z_, y, w=w, Q=Q.repeat(num_gpus, 1)).mean() optim.zero_grad() penalty.backward() optim.step() # re-orthogonalize", "y_ = utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) # Prepare a", "number of epochs, although we mostly track G iterations. iters_per_epoch", "'random': print('Initializing with random directions') A = torch.nn.Parameter(torch.empty(config['ndirs'], direction_size, device=device),", "where ndirs indicates the number of directions we want to", "raise NotImplementedError # We only learn A; G is left", "utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) fixed_z.sample_() fixed_y.sample_() interp_z, interp_y =", "for i, _ in enumerate(pbar): state_dict['itr'] += 1 z_.sample_() if", "config[\"no_ortho\"] else pad(A) # Log metrics to TensorBoard/WandB: cur_training_iter =", "= torch.eye(config['ndirs']).to(device) G.eval() G.module.optim = optim writer = SummaryWriter('%s/%s' %", "epoch %d...' % state_dict['epoch']) # Train for specified number of", "load_G(config) # If parallel, parallelize the GD module if config['parallel']:", "leaving alone: pad = get_direction_padding_fn(config) direction_size = config['dim_z'] if config['search_space']", "= utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) # Prepare a fixed", "w.r.t. w_sampled, NOT z: w = torch.zeros((G_batch_size, config['ndirs'])) # equal", "print('Initializing with standard basis directions') A = torch.nn.Parameter(torch.eye(config['ndirs'], direction_size, device=device),", "epoch in range(state_dict['epoch'], config['num_epochs']): if config['pbar'] == 'mine': pbar =", "y to see individual sample evolution throghout training fixed_z, fixed_y", "config['dim_z'] if config['search_space'] == 'all' else config['ndirs'] # A is", "class DataParallelLoss(nn.Module): \"\"\" This is simply a wrapper class to", "if not (state_dict['itr'] % config['save_every']): torch.save(A.cpu().detach(), '%s/%s/A_%06d.pt' % (config['weights_root'], experiment_name,", "interp_y_, path_sizes=path_size, Q=Q, high_quality=False, npv=1) for w_ix in range(config['ndirs']): writer.add_video('G_ema/w%03d'", "of Z-Space directions using a pre-trained BigGAN Generator. Modified from", "specifying the configuration # of this training run. def run(config):", "= nn.DataParallel(DataParallelLoss(G)) if config['cross_replica']: patch_replication_callback(G) num_gpus = torch.cuda.device_count() print(f'Using {num_gpus}", "interp_z.sample_() interp_y.sample_() if config['fix_class'] is not None: y_ = y_.new_full(y_.size(),", "is our (ndirs, |z|) matrix of directions, where ndirs indicates", "directions') A = torch.nn.Parameter(torch.eye(config['ndirs'], direction_size, device=device), requires_grad=True) elif config['load_A'] ==", "torch.save(A.cpu().detach(), '%s/%s/A_%06d.pt' % (config['weights_root'], experiment_name, cur_training_iter)) if config[\"vis_during_training\"]: interp_vis =", "return penalty # The main training file. Config is a", "init_wandb, download_G from layers import fast_gram_schmidt, norm class DataParallelLoss(nn.Module): \"\"\"", "torch.eye(config['ndirs']).to(device) G.eval() G.module.optim = optim writer = SummaryWriter('%s/%s' % (config['logs_root'],", "we are learning: direction_indicators = torch.eye(config['ndirs']).to(device) G.eval() G.module.optim = optim", "else pad(A) # Log metrics to TensorBoard/WandB: cur_training_iter = epoch", "experiment_name)) sample_sheet = train_fns.save_and_sample(G.module.G, None, G.module.G, z_, y_, fixed_z, fixed_y,", "== 'random': print('Initializing with random directions') A = torch.nn.Parameter(torch.empty(config['ndirs'], direction_size,", "from tqdm import tqdm import torch import torch.nn as nn", "parser = utils.prepare_parser() config = vars(parser.parse_args()) print(config) run(config) if __name__", "in range(state_dict['epoch'], config['num_epochs']): if config['pbar'] == 'mine': pbar = utils.progress(dummy_loader,", "__init__(self, G): super(DataParallelLoss, self).__init__() self.G = G def forward(self, z,", "= max(config['G_batch_size'], config['batch_size']) z_, y_ = utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'], device=device,", "if config['parallel']: G = nn.DataParallel(DataParallelLoss(G)) if config['cross_replica']: patch_replication_callback(G) num_gpus =", "Simply stores a |z|-dimensional one-hot vector indicating each direction we", "G if necessary download_G() config[\"G_path\"] = 'checkpoints/138k' G, state_dict, device,", "G G_batch_size = max(config['G_batch_size'], config['batch_size']) z_, y_ = utils.prepare_z_y(G_batch_size, G.module.G.dim_z,", "the z components that we are leaving alone: pad =", "cur_training_iter = epoch * iters_per_epoch + i writer.add_scalar(f'Metrics/orojar', penalty.item(), cur_training_iter)", "else 'eta') else: pbar = tqdm(dummy_loader) for i, _ in", "for w_ix in range(config['ndirs']): writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], cur_training_iter, fps=24)", "(state_dict['itr'] % config['save_every']): torch.save(A.cpu().detach(), '%s/%s/A_%06d.pt' % (config['weights_root'], experiment_name, cur_training_iter)) if", "== 'mine': pbar = utils.progress(dummy_loader, displaytype='s1k' if config['use_multiepoch_sampler'] else 'eta')", "G, state_dict, device, experiment_name = load_G(config) # If parallel, parallelize", "using a pre-trained BigGAN Generator. Modified from train.py in the", "sync_batchnorm import patch_replication_callback from torch.utils.tensorboard import SummaryWriter from orojar import", "forward(self, z, y, w, Q): penalty = orojar(self.G, z, c=y,", "if config['load_A'] == 'coords': print('Initializing with standard basis directions') A", "interp_y.new_full(interp_y.size(), config['fix_class']) print('Beginning training at epoch %d...' % state_dict['epoch']) #", "for w_ix in range(config['ndirs']): writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], 0, fps=24)", "orojar from direction_utils import visualize_directions, load_G, get_direction_padding_fn, init_wandb, download_G from", "else: pbar = tqdm(dummy_loader) for i, _ in enumerate(pbar): state_dict['itr']", "G.module.G.shared(interp_y) norm_fn = norm # Make directions orthonormal via Gram", "are leaving alone: pad = get_direction_padding_fn(config) direction_size = config['dim_z'] if", "Q=Q.repeat(num_gpus, 1)).mean() optim.zero_grad() penalty.backward() optim.step() # re-orthogonalize A for visualizations", "direction_utils import visualize_directions, load_G, get_direction_padding_fn, init_wandb, download_G from layers import", "[None] * iters_per_epoch # We don't need any real data", "path_size = config['path_size'] # Simply stores a |z|-dimensional one-hot vector", "% config['save_every']): torch.save(A.cpu().detach(), '%s/%s/A_%06d.pt' % (config['weights_root'], experiment_name, cur_training_iter)) if config[\"vis_during_training\"]:", "and log visuals: if not (state_dict['itr'] % config['save_every']): torch.save(A.cpu().detach(), '%s/%s/A_%06d.pt'", "patch_replication_callback(G) num_gpus = torch.cuda.device_count() print(f'Using {num_gpus} GPUs') # If search_space", "fp16=config['G_fp16']) fixed_z.sample_() fixed_y.sample_() interp_z, interp_y = utils.prepare_z_y(config[\"n_samples\"], G.module.G.dim_z, config['n_classes'], device=device,", "direction we are learning: direction_indicators = torch.eye(config['ndirs']).to(device) G.eval() G.module.optim =", "run parser = utils.prepare_parser() config = vars(parser.parse_args()) print(config) run(config) if", "G def forward(self, z, y, w, Q): penalty = orojar(self.G,", "dictionary specifying the configuration # of this training run. def", "Learns a matrix of Z-Space directions using a pre-trained BigGAN", "config[\"vis_during_training\"]: print(\"Generating initial visualizations...\") interp_vis = visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size,", "in the PyTorch BigGAN repo. \"\"\" import os from tqdm", "interp_vis[w_ix], 0, fps=24) for epoch in range(state_dict['epoch'], config['num_epochs']): if config['pbar']", "displaytype='s1k' if config['use_multiepoch_sampler'] else 'eta') else: pbar = tqdm(dummy_loader) for", "class to compute the OroJaR efficiently over several GPUs \"\"\"", "Allow for different batch sizes in G G_batch_size = max(config['G_batch_size'],", "and the next training iteration: Q = pad(norm_fn(fast_gram_schmidt(A))) if not", "OroJaR taken w.r.t. w_sampled, NOT z: w = torch.zeros((G_batch_size, config['ndirs']))", "train_fns from sync_batchnorm import patch_replication_callback from torch.utils.tensorboard import SummaryWriter from", "training run. def run(config): if config['wandb_entity'] is not None: init_wandb(config,", "% w_ix, interp_vis[w_ix], cur_training_iter, fps=24) state_dict['epoch'] += 1 def main():", "state_dict, config, experiment_name) writer.add_image('samples', sample_sheet, 0) interp_y_ = G.module.G.shared(interp_y) norm_fn", "different batch sizes in G G_batch_size = max(config['G_batch_size'], config['batch_size']) z_,", "is not None: init_wandb(config, config['experiment_name'], config['wandb_entity'], 'imagenet') if config[\"G_path\"] is", "= tqdm(dummy_loader) for i, _ in enumerate(pbar): state_dict['itr'] += 1", "# re-orthogonalize A for visualizations and the next training iteration:", "\"\"\" This is simply a wrapper class to compute the", "evolution throghout training fixed_z, fixed_y = utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'], device=device,", "G_z=None, Q=Q, multiple_layers=False) return penalty # The main training file.", "direction_size, device=device), requires_grad=True) torch.nn.init.kaiming_normal_(A) else: raise NotImplementedError # We only", "import patch_replication_callback from torch.utils.tensorboard import SummaryWriter from orojar import orojar", "we are leaving alone: pad = get_direction_padding_fn(config) direction_size = config['dim_z']", "0) interp_y_ = G.module.G.shared(interp_y) norm_fn = norm # Make directions", "we mostly track G iterations. iters_per_epoch = 1000 dummy_loader =", "metrics to TensorBoard/WandB: cur_training_iter = epoch * iters_per_epoch + i", "z & y to see individual sample evolution throghout training", "with standard basis directions') A = torch.nn.Parameter(torch.eye(config['ndirs'], direction_size, device=device), requires_grad=True)", "= epoch * iters_per_epoch + i writer.add_scalar(f'Metrics/orojar', penalty.item(), cur_training_iter) writer.add_scalar('Metrics/direction_norm',", "\"\"\" Learns a matrix of Z-Space directions using a pre-trained", "y_.new_full(y_.size(), config['fix_class']) fixed_y = fixed_y.new_full(fixed_y.size(), config['fix_class']) interp_y = interp_y.new_full(interp_y.size(), config['fix_class'])", "a dictionary specifying the configuration # of this training run.", "with random directions') A = torch.nn.Parameter(torch.empty(config['ndirs'], direction_size, device=device), requires_grad=True) torch.nn.init.kaiming_normal_(A)", "w_ix in range(config['ndirs']): writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], cur_training_iter, fps=24) state_dict['epoch']", "config['ndirs'] # A is our (ndirs, |z|) matrix of directions,", "config['n_classes'], device=device, fp16=config['G_fp16']) # Prepare a fixed z & y", "parse command line and run parser = utils.prepare_parser() config =", "= config['path_size'] # Simply stores a |z|-dimensional one-hot vector indicating", "experiment_name, cur_training_iter)) if config[\"vis_during_training\"]: interp_vis = visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size,", "wrapper class to compute the OroJaR efficiently over several GPUs", "a matrix of Z-Space directions using a pre-trained BigGAN Generator.", "utils import train_fns from sync_batchnorm import patch_replication_callback from torch.utils.tensorboard import", "import SummaryWriter from orojar import orojar from direction_utils import visualize_directions,", "'eta') else: pbar = tqdm(dummy_loader) for i, _ in enumerate(pbar):", "if config['fix_class'] is not None: y_ = y_.new_full(y_.size(), config['fix_class']) fixed_y", "from torch.utils.tensorboard import SummaryWriter from orojar import orojar from direction_utils", "optim.zero_grad() penalty.backward() optim.step() # re-orthogonalize A for visualizations and the", "layers import fast_gram_schmidt, norm class DataParallelLoss(nn.Module): \"\"\" This is simply", "fixed_z.sample_() fixed_y.sample_() interp_z, interp_y = utils.prepare_z_y(config[\"n_samples\"], G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16'])", "normalization: Q = pad(norm_fn(fast_gram_schmidt(A))) if not config[\"no_ortho\"] else pad(A) if", "None: y_.sample_() y = G.module.G.shared(y_) # OroJaR taken w.r.t. w_sampled,", "directions we want to learn if config['load_A'] == 'coords': print('Initializing", "writer.add_scalar('Metrics/direction_norm', A.pow(2).mean().pow(0.5).item(), cur_training_iter) # Save directions and log visuals: if", "of this training run. def run(config): if config['wandb_entity'] is not", "if necessary download_G() config[\"G_path\"] = 'checkpoints/138k' G, state_dict, device, experiment_name", "z, c=y, w=w, G_z=None, Q=Q, multiple_layers=False) return penalty # The", "1000 dummy_loader = [None] * iters_per_epoch # We don't need", "import visualize_directions, load_G, get_direction_padding_fn, init_wandb, download_G from layers import fast_gram_schmidt,", "# Download a pre-trained G if necessary download_G() config[\"G_path\"] =", "interp_y = utils.prepare_z_y(config[\"n_samples\"], G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) interp_z.sample_() interp_y.sample_() if", "is None: y_.sample_() y = G.module.G.shared(y_) # OroJaR taken w.r.t.", "at epoch %d...' % state_dict['epoch']) # Train for specified number", "iteration: Q = pad(norm_fn(fast_gram_schmidt(A))) if not config[\"no_ortho\"] else pad(A) #", "penalty = G(z_, y, w=w, Q=Q.repeat(num_gpus, 1)).mean() optim.zero_grad() penalty.backward() optim.step()", "import torch.nn as nn import torch.optim import utils import train_fns", "get_direction_padding_fn(config) direction_size = config['dim_z'] if config['search_space'] == 'all' else config['ndirs']", "the configuration # of this training run. def run(config): if", "cur_training_iter)) if config[\"vis_during_training\"]: interp_vis = visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size, Q=Q,", "download_G from layers import fast_gram_schmidt, norm class DataParallelLoss(nn.Module): \"\"\" This", "This is simply a wrapper class to compute the OroJaR", "% state_dict['epoch']) # Train for specified number of epochs, although", "utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) # Prepare a fixed z", "then we need to pad the z components that we", "= utils.prepare_z_y(config[\"n_samples\"], G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) interp_z.sample_() interp_y.sample_() if config['fix_class']", "if config['search_space'] == 'all' else config['ndirs'] # A is our", "not None: y_ = y_.new_full(y_.size(), config['fix_class']) fixed_y = fixed_y.new_full(fixed_y.size(), config['fix_class'])", "# equal to the one-hot w penalty = G(z_, y,", "to learn if config['load_A'] == 'coords': print('Initializing with standard basis", "None: init_wandb(config, config['experiment_name'], config['wandb_entity'], 'imagenet') if config[\"G_path\"] is None: #", "print('Beginning training at epoch %d...' % state_dict['epoch']) # Train for", "c=y, w=w, G_z=None, Q=Q, multiple_layers=False) return penalty # The main", "== 'coords': print('Initializing with standard basis directions') A = torch.nn.Parameter(torch.eye(config['ndirs'],", "followed a normalization: Q = pad(norm_fn(fast_gram_schmidt(A))) if not config[\"no_ortho\"] else", "torch.optim import utils import train_fns from sync_batchnorm import patch_replication_callback from", "need to pad the z components that we are leaving", "we need to pad the z components that we are", "the number of directions we want to learn if config['load_A']", "that we are leaving alone: pad = get_direction_padding_fn(config) direction_size =", "only learn A; G is left frozen during training: optim", "# A is our (ndirs, |z|) matrix of directions, where", "config[\"G_path\"] is None: # Download a pre-trained G if necessary", "file. Config is a dictionary specifying the configuration # of", "enumerate(pbar): state_dict['itr'] += 1 z_.sample_() if config['fix_class'] is None: y_.sample_()", "sample evolution throghout training fixed_z, fixed_y = utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'],", "A = torch.nn.Parameter(torch.eye(config['ndirs'], direction_size, device=device), requires_grad=True) elif config['load_A'] == 'random':", "config['batch_size']) z_, y_ = utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) #", "G.module.G.shared(y_) # OroJaR taken w.r.t. w_sampled, NOT z: w =", "nn.DataParallel(DataParallelLoss(G)) if config['cross_replica']: patch_replication_callback(G) num_gpus = torch.cuda.device_count() print(f'Using {num_gpus} GPUs')", "config['fix_class'] is not None: y_ = y_.new_full(y_.size(), config['fix_class']) fixed_y =", "orthonormal via Gram Schmidt followed a normalization: Q = pad(norm_fn(fast_gram_schmidt(A)))", "a wrapper class to compute the OroJaR efficiently over several", "pad the z components that we are leaving alone: pad", "|z|-dimensional one-hot vector indicating each direction we are learning: direction_indicators", "in enumerate(pbar): state_dict['itr'] += 1 z_.sample_() if config['fix_class'] is None:", "taken w.r.t. w_sampled, NOT z: w = torch.zeros((G_batch_size, config['ndirs'])) #", "parallel, parallelize the GD module if config['parallel']: G = nn.DataParallel(DataParallelLoss(G))", "max(config['G_batch_size'], config['batch_size']) z_, y_ = utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16'])", "one-hot w penalty = G(z_, y, w=w, Q=Q.repeat(num_gpus, 1)).mean() optim.zero_grad()", "penalty # The main training file. Config is a dictionary", "don't need any real data path_size = config['path_size'] # Simply", "npv=1) for w_ix in range(config['ndirs']): writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], cur_training_iter,", "1)).mean() optim.zero_grad() penalty.backward() optim.step() # re-orthogonalize A for visualizations and", "utils.progress(dummy_loader, displaytype='s1k' if config['use_multiepoch_sampler'] else 'eta') else: pbar = tqdm(dummy_loader)", "config['n_classes'], device=device, fp16=config['G_fp16']) fixed_z.sample_() fixed_y.sample_() interp_z, interp_y = utils.prepare_z_y(config[\"n_samples\"], G.module.G.dim_z,", "import torch.optim import utils import train_fns from sync_batchnorm import patch_replication_callback", "basis directions') A = torch.nn.Parameter(torch.eye(config['ndirs'], direction_size, device=device), requires_grad=True) elif config['load_A']", "G.eval() G.module.optim = optim writer = SummaryWriter('%s/%s' % (config['logs_root'], experiment_name))", "If search_space != 'all', then we need to pad the", "penalty.item(), cur_training_iter) writer.add_scalar('Metrics/direction_norm', A.pow(2).mean().pow(0.5).item(), cur_training_iter) # Save directions and log", "config['n_classes'], device=device, fp16=config['G_fp16']) interp_z.sample_() interp_y.sample_() if config['fix_class'] is not None:", "elif config['load_A'] == 'random': print('Initializing with random directions') A =", "Prepare a fixed z & y to see individual sample", "i writer.add_scalar(f'Metrics/orojar', penalty.item(), cur_training_iter) writer.add_scalar('Metrics/direction_norm', A.pow(2).mean().pow(0.5).item(), cur_training_iter) # Save directions", "'imagenet') if config[\"G_path\"] is None: # Download a pre-trained G", "are learning: direction_indicators = torch.eye(config['ndirs']).to(device) G.eval() G.module.optim = optim writer", "directions orthonormal via Gram Schmidt followed a normalization: Q =", "the GD module if config['parallel']: G = nn.DataParallel(DataParallelLoss(G)) if config['cross_replica']:", "else pad(A) if config[\"vis_during_training\"]: print(\"Generating initial visualizations...\") interp_vis = visualize_directions(G.module.G,", "to compute the OroJaR efficiently over several GPUs \"\"\" def", "for specified number of epochs, although we mostly track G", "i, _ in enumerate(pbar): state_dict['itr'] += 1 z_.sample_() if config['fix_class']", "torch.optim.Adam(params=[A], lr=config['A_lr']) # Allow for different batch sizes in G", "(config['logs_root'], experiment_name)) sample_sheet = train_fns.save_and_sample(G.module.G, None, G.module.G, z_, y_, fixed_z,", "alone: pad = get_direction_padding_fn(config) direction_size = config['dim_z'] if config['search_space'] ==", "state_dict, device, experiment_name = load_G(config) # If parallel, parallelize the", "sample_sheet = train_fns.save_and_sample(G.module.G, None, G.module.G, z_, y_, fixed_z, fixed_y, state_dict,", "w penalty = G(z_, y, w=w, Q=Q.repeat(num_gpus, 1)).mean() optim.zero_grad() penalty.backward()", "+= 1 def main(): # parse command line and run", "optim = torch.optim.Adam(params=[A], lr=config['A_lr']) # Allow for different batch sizes", "A is our (ndirs, |z|) matrix of directions, where ndirs", "config['pbar'] == 'mine': pbar = utils.progress(dummy_loader, displaytype='s1k' if config['use_multiepoch_sampler'] else", "multiple_layers=False) return penalty # The main training file. Config is", "from sync_batchnorm import patch_replication_callback from torch.utils.tensorboard import SummaryWriter from orojar", "BigGAN Generator. Modified from train.py in the PyTorch BigGAN repo.", "device=device), requires_grad=True) elif config['load_A'] == 'random': print('Initializing with random directions')", "pad(A) if config[\"vis_during_training\"]: print(\"Generating initial visualizations...\") interp_vis = visualize_directions(G.module.G, interp_z,", "configuration # of this training run. def run(config): if config['wandb_entity']", "not None: init_wandb(config, config['experiment_name'], config['wandb_entity'], 'imagenet') if config[\"G_path\"] is None:", "import tqdm import torch import torch.nn as nn import torch.optim", "if config[\"G_path\"] is None: # Download a pre-trained G if", "interp_y_ = G.module.G.shared(interp_y) norm_fn = norm # Make directions orthonormal", "The main training file. Config is a dictionary specifying the", "= get_direction_padding_fn(config) direction_size = config['dim_z'] if config['search_space'] == 'all' else", "If parallel, parallelize the GD module if config['parallel']: G =", "sizes in G G_batch_size = max(config['G_batch_size'], config['batch_size']) z_, y_ =", "a |z|-dimensional one-hot vector indicating each direction we are learning:", "def forward(self, z, y, w, Q): penalty = orojar(self.G, z,", "torch.nn.init.kaiming_normal_(A) else: raise NotImplementedError # We only learn A; G", "% (config['logs_root'], experiment_name)) sample_sheet = train_fns.save_and_sample(G.module.G, None, G.module.G, z_, y_,", "training iteration: Q = pad(norm_fn(fast_gram_schmidt(A))) if not config[\"no_ortho\"] else pad(A)", "G_batch_size = max(config['G_batch_size'], config['batch_size']) z_, y_ = utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'],", "visuals: if not (state_dict['itr'] % config['save_every']): torch.save(A.cpu().detach(), '%s/%s/A_%06d.pt' % (config['weights_root'],", "if not config[\"no_ortho\"] else pad(A) # Log metrics to TensorBoard/WandB:", "NotImplementedError # We only learn A; G is left frozen", "% (config['weights_root'], experiment_name, cur_training_iter)) if config[\"vis_during_training\"]: interp_vis = visualize_directions(G.module.G, interp_z,", "G = nn.DataParallel(DataParallelLoss(G)) if config['cross_replica']: patch_replication_callback(G) num_gpus = torch.cuda.device_count() print(f'Using", "number of directions we want to learn if config['load_A'] ==", "G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) fixed_z.sample_() fixed_y.sample_() interp_z, interp_y = utils.prepare_z_y(config[\"n_samples\"],", "writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], cur_training_iter, fps=24) state_dict['epoch'] += 1 def", "equal to the one-hot w penalty = G(z_, y, w=w,", "torch.nn.Parameter(torch.eye(config['ndirs'], direction_size, device=device), requires_grad=True) elif config['load_A'] == 'random': print('Initializing with", "tqdm(dummy_loader) for i, _ in enumerate(pbar): state_dict['itr'] += 1 z_.sample_()", "init_wandb(config, config['experiment_name'], config['wandb_entity'], 'imagenet') if config[\"G_path\"] is None: # Download", "learn if config['load_A'] == 'coords': print('Initializing with standard basis directions')", "config[\"G_path\"] = 'checkpoints/138k' G, state_dict, device, experiment_name = load_G(config) #", "We only learn A; G is left frozen during training:", "config['load_A'] == 'coords': print('Initializing with standard basis directions') A =", "fp16=config['G_fp16']) # Prepare a fixed z & y to see", "= G(z_, y, w=w, Q=Q.repeat(num_gpus, 1)).mean() optim.zero_grad() penalty.backward() optim.step() #", "although we mostly track G iterations. iters_per_epoch = 1000 dummy_loader", "nn import torch.optim import utils import train_fns from sync_batchnorm import", "config['wandb_entity'] is not None: init_wandb(config, config['experiment_name'], config['wandb_entity'], 'imagenet') if config[\"G_path\"]", "state_dict['epoch']) # Train for specified number of epochs, although we", "utils.prepare_z_y(config[\"n_samples\"], G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) interp_z.sample_() interp_y.sample_() if config['fix_class'] is", "num_gpus = torch.cuda.device_count() print(f'Using {num_gpus} GPUs') # If search_space !=", "# Allow for different batch sizes in G G_batch_size =", "interp_y = interp_y.new_full(interp_y.size(), config['fix_class']) print('Beginning training at epoch %d...' %", "config['use_multiepoch_sampler'] else 'eta') else: pbar = tqdm(dummy_loader) for i, _", "Make directions orthonormal via Gram Schmidt followed a normalization: Q", "GD module if config['parallel']: G = nn.DataParallel(DataParallelLoss(G)) if config['cross_replica']: patch_replication_callback(G)", "DataParallelLoss(nn.Module): \"\"\" This is simply a wrapper class to compute", "= fixed_y.new_full(fixed_y.size(), config['fix_class']) interp_y = interp_y.new_full(interp_y.size(), config['fix_class']) print('Beginning training at", "= config['dim_z'] if config['search_space'] == 'all' else config['ndirs'] # A", "fp16=config['G_fp16']) interp_z.sample_() interp_y.sample_() if config['fix_class'] is not None: y_ =", "config, experiment_name) writer.add_image('samples', sample_sheet, 0) interp_y_ = G.module.G.shared(interp_y) norm_fn =", "%d...' % state_dict['epoch']) # Train for specified number of epochs,", "y_.sample_() y = G.module.G.shared(y_) # OroJaR taken w.r.t. w_sampled, NOT", "interp_y.sample_() if config['fix_class'] is not None: y_ = y_.new_full(y_.size(), config['fix_class'])", "one-hot vector indicating each direction we are learning: direction_indicators =", "0, fps=24) for epoch in range(state_dict['epoch'], config['num_epochs']): if config['pbar'] ==", "# If parallel, parallelize the GD module if config['parallel']: G", "range(config['ndirs']): writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], 0, fps=24) for epoch in", "fixed_y.sample_() interp_z, interp_y = utils.prepare_z_y(config[\"n_samples\"], G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) interp_z.sample_()", "the one-hot w penalty = G(z_, y, w=w, Q=Q.repeat(num_gpus, 1)).mean()", "not config[\"no_ortho\"] else pad(A) if config[\"vis_during_training\"]: print(\"Generating initial visualizations...\") interp_vis", "real data path_size = config['path_size'] # Simply stores a |z|-dimensional", "tqdm import torch import torch.nn as nn import torch.optim import", "import fast_gram_schmidt, norm class DataParallelLoss(nn.Module): \"\"\" This is simply a", "range(config['ndirs']): writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], cur_training_iter, fps=24) state_dict['epoch'] += 1", "components that we are leaving alone: pad = get_direction_padding_fn(config) direction_size", "iters_per_epoch = 1000 dummy_loader = [None] * iters_per_epoch # We", "self.G = G def forward(self, z, y, w, Q): penalty", "= G def forward(self, z, y, w, Q): penalty =", "is not None: y_ = y_.new_full(y_.size(), config['fix_class']) fixed_y = fixed_y.new_full(fixed_y.size(),", "config['ndirs'])) # equal to the one-hot w penalty = G(z_,", "is left frozen during training: optim = torch.optim.Adam(params=[A], lr=config['A_lr']) #", "in G G_batch_size = max(config['G_batch_size'], config['batch_size']) z_, y_ = utils.prepare_z_y(G_batch_size,", "efficiently over several GPUs \"\"\" def __init__(self, G): super(DataParallelLoss, self).__init__()", "config['save_every']): torch.save(A.cpu().detach(), '%s/%s/A_%06d.pt' % (config['weights_root'], experiment_name, cur_training_iter)) if config[\"vis_during_training\"]: interp_vis", "dummy_loader = [None] * iters_per_epoch # We don't need any", "else: raise NotImplementedError # We only learn A; G is", "# Train for specified number of epochs, although we mostly", "optim.step() # re-orthogonalize A for visualizations and the next training", "in range(config['ndirs']): writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], 0, fps=24) for epoch", "% w_ix, interp_vis[w_ix], 0, fps=24) for epoch in range(state_dict['epoch'], config['num_epochs']):", "y_, fixed_z, fixed_y, state_dict, config, experiment_name) writer.add_image('samples', sample_sheet, 0) interp_y_", "device=device, fp16=config['G_fp16']) interp_z.sample_() interp_y.sample_() if config['fix_class'] is not None: y_", "necessary download_G() config[\"G_path\"] = 'checkpoints/138k' G, state_dict, device, experiment_name =", "is simply a wrapper class to compute the OroJaR efficiently", "during training: optim = torch.optim.Adam(params=[A], lr=config['A_lr']) # Allow for different", "|z|) matrix of directions, where ndirs indicates the number of", "indicating each direction we are learning: direction_indicators = torch.eye(config['ndirs']).to(device) G.eval()", "import orojar from direction_utils import visualize_directions, load_G, get_direction_padding_fn, init_wandb, download_G", "requires_grad=True) torch.nn.init.kaiming_normal_(A) else: raise NotImplementedError # We only learn A;", "OroJaR efficiently over several GPUs \"\"\" def __init__(self, G): super(DataParallelLoss,", "config['cross_replica']: patch_replication_callback(G) num_gpus = torch.cuda.device_count() print(f'Using {num_gpus} GPUs') # If", "via Gram Schmidt followed a normalization: Q = pad(norm_fn(fast_gram_schmidt(A))) if", "NOT z: w = torch.zeros((G_batch_size, config['ndirs'])) # equal to the", "config[\"vis_during_training\"]: interp_vis = visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size, Q=Q, high_quality=False, npv=1)", "Z-Space directions using a pre-trained BigGAN Generator. Modified from train.py", "config = vars(parser.parse_args()) print(config) run(config) if __name__ == '__main__': main()", "penalty = orojar(self.G, z, c=y, w=w, G_z=None, Q=Q, multiple_layers=False) return", "track G iterations. iters_per_epoch = 1000 dummy_loader = [None] *", "SummaryWriter('%s/%s' % (config['logs_root'], experiment_name)) sample_sheet = train_fns.save_and_sample(G.module.G, None, G.module.G, z_,", "visualize_directions, load_G, get_direction_padding_fn, init_wandb, download_G from layers import fast_gram_schmidt, norm", "run. def run(config): if config['wandb_entity'] is not None: init_wandb(config, config['experiment_name'],", "config['num_epochs']): if config['pbar'] == 'mine': pbar = utils.progress(dummy_loader, displaytype='s1k' if", "training file. Config is a dictionary specifying the configuration #", "directions, where ndirs indicates the number of directions we want", "== 'all' else config['ndirs'] # A is our (ndirs, |z|)", "= utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) fixed_z.sample_() fixed_y.sample_() interp_z, interp_y", "for visualizations and the next training iteration: Q = pad(norm_fn(fast_gram_schmidt(A)))", "config[\"no_ortho\"] else pad(A) if config[\"vis_during_training\"]: print(\"Generating initial visualizations...\") interp_vis =", "# Save directions and log visuals: if not (state_dict['itr'] %", "experiment_name = load_G(config) # If parallel, parallelize the GD module", "learn A; G is left frozen during training: optim =", "stores a |z|-dimensional one-hot vector indicating each direction we are", "w=w, G_z=None, Q=Q, multiple_layers=False) return penalty # The main training", "{num_gpus} GPUs') # If search_space != 'all', then we need", "writer = SummaryWriter('%s/%s' % (config['logs_root'], experiment_name)) sample_sheet = train_fns.save_and_sample(G.module.G, None,", "'%s/%s/A_%06d.pt' % (config['weights_root'], experiment_name, cur_training_iter)) if config[\"vis_during_training\"]: interp_vis = visualize_directions(G.module.G,", "z components that we are leaving alone: pad = get_direction_padding_fn(config)", "the OroJaR efficiently over several GPUs \"\"\" def __init__(self, G):", "# Simply stores a |z|-dimensional one-hot vector indicating each direction", "batch sizes in G G_batch_size = max(config['G_batch_size'], config['batch_size']) z_, y_", "(ndirs, |z|) matrix of directions, where ndirs indicates the number", "= pad(norm_fn(fast_gram_schmidt(A))) if not config[\"no_ortho\"] else pad(A) # Log metrics", "device=device), requires_grad=True) torch.nn.init.kaiming_normal_(A) else: raise NotImplementedError # We only learn", "# OroJaR taken w.r.t. w_sampled, NOT z: w = torch.zeros((G_batch_size,", "= 'checkpoints/138k' G, state_dict, device, experiment_name = load_G(config) # If", "throghout training fixed_z, fixed_y = utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16'])", "config['fix_class']) interp_y = interp_y.new_full(interp_y.size(), config['fix_class']) print('Beginning training at epoch %d...'", "pad = get_direction_padding_fn(config) direction_size = config['dim_z'] if config['search_space'] == 'all'", "torch.cuda.device_count() print(f'Using {num_gpus} GPUs') # If search_space != 'all', then", "TensorBoard/WandB: cur_training_iter = epoch * iters_per_epoch + i writer.add_scalar(f'Metrics/orojar', penalty.item(),", "= train_fns.save_and_sample(G.module.G, None, G.module.G, z_, y_, fixed_z, fixed_y, state_dict, config,", "= [None] * iters_per_epoch # We don't need any real", "+= 1 z_.sample_() if config['fix_class'] is None: y_.sample_() y =", "= torch.nn.Parameter(torch.empty(config['ndirs'], direction_size, device=device), requires_grad=True) torch.nn.init.kaiming_normal_(A) else: raise NotImplementedError #", "over several GPUs \"\"\" def __init__(self, G): super(DataParallelLoss, self).__init__() self.G", "of epochs, although we mostly track G iterations. iters_per_epoch =", "parallelize the GD module if config['parallel']: G = nn.DataParallel(DataParallelLoss(G)) if", "= G.module.G.shared(y_) # OroJaR taken w.r.t. w_sampled, NOT z: w", "config['wandb_entity'], 'imagenet') if config[\"G_path\"] is None: # Download a pre-trained", "path_sizes=path_size, Q=Q, high_quality=False, npv=1) for w_ix in range(config['ndirs']): writer.add_video('G_ema/w%03d' %", "SummaryWriter from orojar import orojar from direction_utils import visualize_directions, load_G,", "training: optim = torch.optim.Adam(params=[A], lr=config['A_lr']) # Allow for different batch", "GPUs \"\"\" def __init__(self, G): super(DataParallelLoss, self).__init__() self.G = G", "re-orthogonalize A for visualizations and the next training iteration: Q", "to the one-hot w penalty = G(z_, y, w=w, Q=Q.repeat(num_gpus,", "visualizations and the next training iteration: Q = pad(norm_fn(fast_gram_schmidt(A))) if", "matrix of Z-Space directions using a pre-trained BigGAN Generator. Modified", "G): super(DataParallelLoss, self).__init__() self.G = G def forward(self, z, y,", "not (state_dict['itr'] % config['save_every']): torch.save(A.cpu().detach(), '%s/%s/A_%06d.pt' % (config['weights_root'], experiment_name, cur_training_iter))", "os from tqdm import tqdm import torch import torch.nn as", "'coords': print('Initializing with standard basis directions') A = torch.nn.Parameter(torch.eye(config['ndirs'], direction_size,", "z_, y_ = utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) # Prepare", "= torch.nn.Parameter(torch.eye(config['ndirs'], direction_size, device=device), requires_grad=True) elif config['load_A'] == 'random': print('Initializing", "We don't need any real data path_size = config['path_size'] #", "state_dict['epoch'] += 1 def main(): # parse command line and", "# We don't need any real data path_size = config['path_size']", "device, experiment_name = load_G(config) # If parallel, parallelize the GD", "A.pow(2).mean().pow(0.5).item(), cur_training_iter) # Save directions and log visuals: if not", "Train for specified number of epochs, although we mostly track", "several GPUs \"\"\" def __init__(self, G): super(DataParallelLoss, self).__init__() self.G =", "z, y, w, Q): penalty = orojar(self.G, z, c=y, w=w,", "module if config['parallel']: G = nn.DataParallel(DataParallelLoss(G)) if config['cross_replica']: patch_replication_callback(G) num_gpus", "device=device, fp16=config['G_fp16']) fixed_z.sample_() fixed_y.sample_() interp_z, interp_y = utils.prepare_z_y(config[\"n_samples\"], G.module.G.dim_z, config['n_classes'],", "= load_G(config) # If parallel, parallelize the GD module if", "a pre-trained BigGAN Generator. Modified from train.py in the PyTorch", "None, G.module.G, z_, y_, fixed_z, fixed_y, state_dict, config, experiment_name) writer.add_image('samples',", "def __init__(self, G): super(DataParallelLoss, self).__init__() self.G = G def forward(self,", "lr=config['A_lr']) # Allow for different batch sizes in G G_batch_size", "A; G is left frozen during training: optim = torch.optim.Adam(params=[A],", "norm class DataParallelLoss(nn.Module): \"\"\" This is simply a wrapper class", "super(DataParallelLoss, self).__init__() self.G = G def forward(self, z, y, w,", "_ in enumerate(pbar): state_dict['itr'] += 1 z_.sample_() if config['fix_class'] is", "pad(norm_fn(fast_gram_schmidt(A))) if not config[\"no_ortho\"] else pad(A) if config[\"vis_during_training\"]: print(\"Generating initial", "print(f'Using {num_gpus} GPUs') # If search_space != 'all', then we", "command line and run parser = utils.prepare_parser() config = vars(parser.parse_args())", "fixed_y = fixed_y.new_full(fixed_y.size(), config['fix_class']) interp_y = interp_y.new_full(interp_y.size(), config['fix_class']) print('Beginning training", "w_ix in range(config['ndirs']): writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], 0, fps=24) for", "y, w=w, Q=Q.repeat(num_gpus, 1)).mean() optim.zero_grad() penalty.backward() optim.step() # re-orthogonalize A", "G.module.optim = optim writer = SummaryWriter('%s/%s' % (config['logs_root'], experiment_name)) sample_sheet", "y, w, Q): penalty = orojar(self.G, z, c=y, w=w, G_z=None,", "& y to see individual sample evolution throghout training fixed_z,", "see individual sample evolution throghout training fixed_z, fixed_y = utils.prepare_z_y(G_batch_size,", "def main(): # parse command line and run parser =", "= norm # Make directions orthonormal via Gram Schmidt followed", "need any real data path_size = config['path_size'] # Simply stores", "fixed_y = utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) fixed_z.sample_() fixed_y.sample_() interp_z,", "if config[\"vis_during_training\"]: print(\"Generating initial visualizations...\") interp_vis = visualize_directions(G.module.G, interp_z, interp_y_,", "patch_replication_callback from torch.utils.tensorboard import SummaryWriter from orojar import orojar from", "compute the OroJaR efficiently over several GPUs \"\"\" def __init__(self,", "in range(config['ndirs']): writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], cur_training_iter, fps=24) state_dict['epoch'] +=", "\"\"\" import os from tqdm import tqdm import torch import", "iters_per_epoch # We don't need any real data path_size =", "any real data path_size = config['path_size'] # Simply stores a", "= interp_y.new_full(interp_y.size(), config['fix_class']) print('Beginning training at epoch %d...' % state_dict['epoch'])", "= torch.cuda.device_count() print(f'Using {num_gpus} GPUs') # If search_space != 'all',", "fast_gram_schmidt, norm class DataParallelLoss(nn.Module): \"\"\" This is simply a wrapper", "requires_grad=True) elif config['load_A'] == 'random': print('Initializing with random directions') A", "from orojar import orojar from direction_utils import visualize_directions, load_G, get_direction_padding_fn,", "(config['weights_root'], experiment_name, cur_training_iter)) if config[\"vis_during_training\"]: interp_vis = visualize_directions(G.module.G, interp_z, interp_y_,", "pbar = utils.progress(dummy_loader, displaytype='s1k' if config['use_multiepoch_sampler'] else 'eta') else: pbar", "interp_vis = visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size, Q=Q, high_quality=False, npv=1) for" ]
[ "[a, b, c, d, e, f, g, h]) sqlformula14 =", "sheet_name='Malaysia') myanmar=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Myanmar') new_zeland=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='New Zeland') philipines=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Philipines') singapore=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Singapore')", "japan['GDP_pc'], japan['Inflation'], japan['Unemployment_Rate'], japan['Net_LB'], japan['Account_Balance']): mycursor.execute(sqlformula6, [a, b, c, d,", "singapore VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\" for", "brunei['Account_Balance']): mycursor.execute(sqlformula2, [a, b, c, d, e, f, g, h])", "= \"INSERT INTO philipines VALUES(%s, %s, %s, %s, %s, %s,", "h]) sqlformula3 = \"INSERT INTO cambodia VALUES(%s, %s, %s, %s,", "malaysia['Inflation'], malaysia['Unemployment_Rate'], malaysia['Net_LB'], malaysia['Account_Balance']): mycursor.execute(sqlformula8, [a, b, c, d, e,", "g, h in zip(malaysia['Year'], malaysia['RGDP'], malaysia['NGDP'], malaysia['GDP_pc'], malaysia['Inflation'], malaysia['Unemployment_Rate'], malaysia['Net_LB'],", "c, d, e, f, g, h]) sqlformula6 = \"INSERT INTO", "vietnam['Unemployment_Rate'], vietnam['Net_LB'], vietnam['Account_Balance']): mycursor.execute(sqlformula14, [a, b, c, d, e, f,", "brunei VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\" for", "h]) sqlformula4 = \"INSERT INTO china VALUES(%s, %s, %s, %s,", "sheet_name='China') indonesia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Indonesia') japan=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Japan') lao=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Lao') malaysia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Malaysia') myanmar=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx',", "d, e, f, g, h in zip(australia['Year'], australia['RGDP'], australia['NGDP'], australia['GDP_pc'],", "g, h]) sqlformula13 = \"INSERT INTO thailand VALUES(%s, %s, %s,", "as pd import numpy as np import mysql.connector australia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Australia')", "c, d, e, f, g, h in zip(brunei['Year'], brunei['RGDP'], brunei['NGDP'],", "philipines['Net_LB'], philipines['Account_Balance']): mycursor.execute(sqlformula11, [a, b, c, d, e, f, g,", "china['Unemployment_Rate'], china['Net_LB'], china['Account_Balance']): mycursor.execute(sqlformula4, [a, b, c, d, e, f,", "b, c, d, e, f, g, h in zip(vietnam['Year'], vietnam['RGDP'],", "h in zip(singapore['Year'], singapore['RGDP'], singapore['NGDP'], singapore['GDP_pc'], singapore['Inflation'], singapore['Unemployment_Rate'], singapore['Net_LB'], singapore['Account_Balance']):", "= \"\", database = \"\" ) mycursor = mydb.cursor() sqlformula1", "import xlsxwriter import pandas as pd import numpy as np", "vietnam['Net_LB'], vietnam['Account_Balance']): mycursor.execute(sqlformula14, [a, b, c, d, e, f, g,", "japan['RGDP'], japan['NGDP'], japan['GDP_pc'], japan['Inflation'], japan['Unemployment_Rate'], japan['Net_LB'], japan['Account_Balance']): mycursor.execute(sqlformula6, [a, b,", "mycursor.execute(sqlformula13, [a, b, c, d, e, f, g, h]) sqlformula14", "g, h in zip(new_zeland['Year'], new_zeland['RGDP'], new_zeland['NGDP'], new_zeland['GDP_pc'], new_zeland['Inflation'], new_zeland['Unemployment_Rate'], new_zeland['Net_LB'],", "d, e, f, g, h]) sqlformula2 = \"INSERT INTO brunei", "c, d, e, f, g, h]) sqlformula11 = \"INSERT INTO", "c, d, e, f, g, h in zip(indonesia['Year'], indonesia['RGDP'], indonesia['NGDP'],", "h in zip(vietnam['Year'], vietnam['RGDP'], vietnam['NGDP'], vietnam['GDP_pc'], vietnam['Inflation'], vietnam['Unemployment_Rate'], vietnam['Net_LB'], vietnam['Account_Balance']):", "cambodia['RGDP'], cambodia['NGDP'], cambodia['GDP_pc'], cambodia['Inflation'], cambodia['Unemployment_Rate'], cambodia['Net_LB'], cambodia['Account_Balance']): mycursor.execute(sqlformula3, [a, b,", "h]) sqlformula7 = \"INSERT INTO lao VALUES(%s, %s, %s, %s,", "malaysia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Malaysia') myanmar=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Myanmar') new_zeland=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='New Zeland') philipines=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Philipines') singapore=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx',", "d, e, f, g, h in zip(japan['Year'], japan['RGDP'], japan['NGDP'], japan['GDP_pc'],", "zip(philipines['Year'], philipines['RGDP'], philipines['NGDP'], philipines['GDP_pc'], philipines['Inflation'], philipines['Unemployment_Rate'], philipines['Net_LB'], philipines['Account_Balance']): mycursor.execute(sqlformula11, [a,", "%s, %s)\" for a, b, c, d, e, f, g,", "INTO brunei VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\"", "d, e, f, g, h in zip(malaysia['Year'], malaysia['RGDP'], malaysia['NGDP'], malaysia['GDP_pc'],", "h in zip(cambodia['Year'], cambodia['RGDP'], cambodia['NGDP'], cambodia['GDP_pc'], cambodia['Inflation'], cambodia['Unemployment_Rate'], cambodia['Net_LB'], cambodia['Account_Balance']):", "singapore['Account_Balance']): mycursor.execute(sqlformula12, [a, b, c, d, e, f, g, h])", "japan['NGDP'], japan['GDP_pc'], japan['Inflation'], japan['Unemployment_Rate'], japan['Net_LB'], japan['Account_Balance']): mycursor.execute(sqlformula6, [a, b, c,", "new_zeland['Unemployment_Rate'], new_zeland['Net_LB'], new_zeland['Account_Balance']): mycursor.execute(sqlformula10, [a, b, c, d, e, f,", "g, h in zip(philipines['Year'], philipines['RGDP'], philipines['NGDP'], philipines['GDP_pc'], philipines['Inflation'], philipines['Unemployment_Rate'], philipines['Net_LB'],", "singapore['GDP_pc'], singapore['Inflation'], singapore['Unemployment_Rate'], singapore['Net_LB'], singapore['Account_Balance']): mycursor.execute(sqlformula12, [a, b, c, d,", "indonesia['GDP_pc'], indonesia['Inflation'], indonesia['Unemployment_Rate'], indonesia['Net_LB'], indonesia['Account_Balance']): mycursor.execute(sqlformula5, [a, b, c, d,", "e, f, g, h]) sqlformula13 = \"INSERT INTO thailand VALUES(%s,", "f, g, h]) sqlformula13 = \"INSERT INTO thailand VALUES(%s, %s,", "c, d, e, f, g, h in zip(lao['Year'], lao['RGDP'], lao['NGDP'],", "INTO cambodia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\"", "singapore['NGDP'], singapore['GDP_pc'], singapore['Inflation'], singapore['Unemployment_Rate'], singapore['Net_LB'], singapore['Account_Balance']): mycursor.execute(sqlformula12, [a, b, c,", "a, b, c, d, e, f, g, h in zip(philipines['Year'],", "singapore['Net_LB'], singapore['Account_Balance']): mycursor.execute(sqlformula12, [a, b, c, d, e, f, g,", "g, h in zip(japan['Year'], japan['RGDP'], japan['NGDP'], japan['GDP_pc'], japan['Inflation'], japan['Unemployment_Rate'], japan['Net_LB'],", "b, c, d, e, f, g, h]) sqlformula2 = \"INSERT", "= \"INSERT INTO lao VALUES(%s, %s, %s, %s, %s, %s,", "a, b, c, d, e, f, g, h in zip(japan['Year'],", "f, g, h in zip(lao['Year'], lao['RGDP'], lao['NGDP'], lao['GDP_pc'], lao['Inflation'], lao['Unemployment_Rate'],", "f, g, h in zip(indonesia['Year'], indonesia['RGDP'], indonesia['NGDP'], indonesia['GDP_pc'], indonesia['Inflation'], indonesia['Unemployment_Rate'],", "INTO singapore VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\"", "h]) sqlformula10 = \"INSERT INTO new_zeland VALUES(%s, %s, %s, %s,", "h in zip(philipines['Year'], philipines['RGDP'], philipines['NGDP'], philipines['GDP_pc'], philipines['Inflation'], philipines['Unemployment_Rate'], philipines['Net_LB'], philipines['Account_Balance']):", "zip(australia['Year'], australia['RGDP'], australia['NGDP'], australia['GDP_pc'], australia['Inflation'], australia['Unemployment_Rate'], australia['Net_LB'], australia['Account_Balance']): mycursor.execute(sqlformula1, [a,", "passwd = \"\", database = \"\" ) mycursor = mydb.cursor()", "[a, b, c, d, e, f, g, h]) sqlformula13 =", "brunei=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Brunei') cambodia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Cambodia') china=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='China') indonesia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Indonesia') japan=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Japan')", "sqlformula6 = \"INSERT INTO japan VALUES(%s, %s, %s, %s, %s,", "mycursor.execute(sqlformula9, [a, b, c, d, e, f, g, h]) sqlformula10", "c, d, e, f, g, h]) sqlformula13 = \"INSERT INTO", "in zip(japan['Year'], japan['RGDP'], japan['NGDP'], japan['GDP_pc'], japan['Inflation'], japan['Unemployment_Rate'], japan['Net_LB'], japan['Account_Balance']): mycursor.execute(sqlformula6,", "a, b, c, d, e, f, g, h in zip(malaysia['Year'],", "g, h in zip(australia['Year'], australia['RGDP'], australia['NGDP'], australia['GDP_pc'], australia['Inflation'], australia['Unemployment_Rate'], australia['Net_LB'],", "zip(japan['Year'], japan['RGDP'], japan['NGDP'], japan['GDP_pc'], japan['Inflation'], japan['Unemployment_Rate'], japan['Net_LB'], japan['Account_Balance']): mycursor.execute(sqlformula6, [a,", "g, h in zip(myanmar['Year'], myanmar['RGDP'], myanmar['NGDP'], myanmar['GDP_pc'], myanmar['Inflation'], myanmar['Unemployment_Rate'], myanmar['Net_LB'],", "c, d, e, f, g, h in zip(new_zeland['Year'], new_zeland['RGDP'], new_zeland['NGDP'],", "f, g, h]) sqlformula6 = \"INSERT INTO japan VALUES(%s, %s,", "china['RGDP'], china['NGDP'], china['GDP_pc'], china['Inflation'], china['Unemployment_Rate'], china['Net_LB'], china['Account_Balance']): mycursor.execute(sqlformula4, [a, b,", "f, g, h]) sqlformula7 = \"INSERT INTO lao VALUES(%s, %s,", "vietnam['NGDP'], vietnam['GDP_pc'], vietnam['Inflation'], vietnam['Unemployment_Rate'], vietnam['Net_LB'], vietnam['Account_Balance']): mycursor.execute(sqlformula14, [a, b, c,", "e, f, g, h]) sqlformula14 = \"INSERT INTO vietnam VALUES(%s,", ") mycursor = mydb.cursor() sqlformula1 = \"INSERT INTO australia VALUES(%s,", "e, f, g, h in zip(new_zeland['Year'], new_zeland['RGDP'], new_zeland['NGDP'], new_zeland['GDP_pc'], new_zeland['Inflation'],", "= \"\" ) mycursor = mydb.cursor() sqlformula1 = \"INSERT INTO", "g, h in zip(china['Year'], china['RGDP'], china['NGDP'], china['GDP_pc'], china['Inflation'], china['Unemployment_Rate'], china['Net_LB'],", "indonesia['Inflation'], indonesia['Unemployment_Rate'], indonesia['Net_LB'], indonesia['Account_Balance']): mycursor.execute(sqlformula5, [a, b, c, d, e,", "in zip(malaysia['Year'], malaysia['RGDP'], malaysia['NGDP'], malaysia['GDP_pc'], malaysia['Inflation'], malaysia['Unemployment_Rate'], malaysia['Net_LB'], malaysia['Account_Balance']): mycursor.execute(sqlformula8,", "f, g, h]) sqlformula10 = \"INSERT INTO new_zeland VALUES(%s, %s,", "d, e, f, g, h in zip(cambodia['Year'], cambodia['RGDP'], cambodia['NGDP'], cambodia['GDP_pc'],", "d, e, f, g, h]) sqlformula5 = \"INSERT INTO indonesia", "INTO malaysia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\"", "australia['Inflation'], australia['Unemployment_Rate'], australia['Net_LB'], australia['Account_Balance']): mycursor.execute(sqlformula1, [a, b, c, d, e,", "new_zeland['Net_LB'], new_zeland['Account_Balance']): mycursor.execute(sqlformula10, [a, b, c, d, e, f, g,", "%s, %s, %s, %s, %s, %s, %s)\" for a, b,", "g, h]) sqlformula3 = \"INSERT INTO cambodia VALUES(%s, %s, %s,", "\"INSERT INTO indonesia VALUES(%s, %s, %s, %s, %s, %s, %s,", "\"INSERT INTO singapore VALUES(%s, %s, %s, %s, %s, %s, %s,", "f, g, h]) sqlformula12 = \"INSERT INTO singapore VALUES(%s, %s,", "h]) sqlformula14 = \"INSERT INTO vietnam VALUES(%s, %s, %s, %s,", "e, f, g, h]) sqlformula5 = \"INSERT INTO indonesia VALUES(%s,", "= \"INSERT INTO cambodia VALUES(%s, %s, %s, %s, %s, %s,", "INTO lao VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\"", "mycursor.execute(sqlformula8, [a, b, c, d, e, f, g, h]) sqlformula9", "= \"INSERT INTO malaysia VALUES(%s, %s, %s, %s, %s, %s,", "INTO vietnam VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\"", "philipines['RGDP'], philipines['NGDP'], philipines['GDP_pc'], philipines['Inflation'], philipines['Unemployment_Rate'], philipines['Net_LB'], philipines['Account_Balance']): mycursor.execute(sqlformula11, [a, b,", "b, c, d, e, f, g, h]) sqlformula11 = \"INSERT", "australia['Unemployment_Rate'], australia['Net_LB'], australia['Account_Balance']): mycursor.execute(sqlformula1, [a, b, c, d, e, f,", "h in zip(lao['Year'], lao['RGDP'], lao['NGDP'], lao['GDP_pc'], lao['Inflation'], lao['Unemployment_Rate'], lao['Net_LB'], lao['Account_Balance']):", "sqlformula5 = \"INSERT INTO indonesia VALUES(%s, %s, %s, %s, %s,", "sqlformula12 = \"INSERT INTO singapore VALUES(%s, %s, %s, %s, %s,", "= mysql.connector.connect( host = \"localhost\", user = \"root\", passwd =", "d, e, f, g, h in zip(philipines['Year'], philipines['RGDP'], philipines['NGDP'], philipines['GDP_pc'],", "new_zeland VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\" for", "VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\" for a,", "e, f, g, h]) sqlformula2 = \"INSERT INTO brunei VALUES(%s,", "f, g, h in zip(myanmar['Year'], myanmar['RGDP'], myanmar['NGDP'], myanmar['GDP_pc'], myanmar['Inflation'], myanmar['Unemployment_Rate'],", "vietnam['Inflation'], vietnam['Unemployment_Rate'], vietnam['Net_LB'], vietnam['Account_Balance']): mycursor.execute(sqlformula14, [a, b, c, d, e,", "g, h in zip(singapore['Year'], singapore['RGDP'], singapore['NGDP'], singapore['GDP_pc'], singapore['Inflation'], singapore['Unemployment_Rate'], singapore['Net_LB'],", "in zip(indonesia['Year'], indonesia['RGDP'], indonesia['NGDP'], indonesia['GDP_pc'], indonesia['Inflation'], indonesia['Unemployment_Rate'], indonesia['Net_LB'], indonesia['Account_Balance']): mycursor.execute(sqlformula5,", "sheet_name='Lao') malaysia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Malaysia') myanmar=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Myanmar') new_zeland=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='New Zeland') philipines=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Philipines')", "myanmar['GDP_pc'], myanmar['Inflation'], myanmar['Unemployment_Rate'], myanmar['Net_LB'], myanmar['Account_Balance']): mycursor.execute(sqlformula9, [a, b, c, d,", "sqlformula9 = \"INSERT INTO myanmar VALUES(%s, %s, %s, %s, %s,", "sheet_name='Australia') brunei=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Brunei') cambodia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Cambodia') china=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='China') indonesia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Indonesia') japan=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx',", "sheet_name='Myanmar') new_zeland=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='New Zeland') philipines=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Philipines') singapore=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Singapore') thailand=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Thailand')", "mysql.connector australia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Australia') brunei=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Brunei') cambodia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Cambodia') china=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='China') indonesia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx',", "sheet_name='Brunei') cambodia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Cambodia') china=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='China') indonesia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Indonesia') japan=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Japan') lao=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx',", "h]) sqlformula5 = \"INSERT INTO indonesia VALUES(%s, %s, %s, %s,", "indonesia['Net_LB'], indonesia['Account_Balance']): mycursor.execute(sqlformula5, [a, b, c, d, e, f, g,", "g, h]) sqlformula10 = \"INSERT INTO new_zeland VALUES(%s, %s, %s,", "= \"INSERT INTO thailand VALUES(%s, %s, %s, %s, %s, %s,", "a, b, c, d, e, f, g, h in zip(cambodia['Year'],", "h]) sqlformula8 = \"INSERT INTO malaysia VALUES(%s, %s, %s, %s,", "lao=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Lao') malaysia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Malaysia') myanmar=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Myanmar') new_zeland=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='New Zeland') philipines=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx',", "myanmar['Unemployment_Rate'], myanmar['Net_LB'], myanmar['Account_Balance']): mycursor.execute(sqlformula9, [a, b, c, d, e, f,", "myanmar['NGDP'], myanmar['GDP_pc'], myanmar['Inflation'], myanmar['Unemployment_Rate'], myanmar['Net_LB'], myanmar['Account_Balance']): mycursor.execute(sqlformula9, [a, b, c,", "[a, b, c, d, e, f, g, h]) sqlformula3 =", "d, e, f, g, h]) sqlformula11 = \"INSERT INTO philipines", "b, c, d, e, f, g, h]) sqlformula8 = \"INSERT", "= \"INSERT INTO brunei VALUES(%s, %s, %s, %s, %s, %s,", "h]) sqlformula12 = \"INSERT INTO singapore VALUES(%s, %s, %s, %s,", "b, c, d, e, f, g, h in zip(philipines['Year'], philipines['RGDP'],", "singapore['Inflation'], singapore['Unemployment_Rate'], singapore['Net_LB'], singapore['Account_Balance']): mycursor.execute(sqlformula12, [a, b, c, d, e,", "a, b, c, d, e, f, g, h in zip(new_zeland['Year'],", "[a, b, c, d, e, f, g, h]) sqlformula9 =", "malaysia['Account_Balance']): mycursor.execute(sqlformula8, [a, b, c, d, e, f, g, h])", "vietnam['RGDP'], vietnam['NGDP'], vietnam['GDP_pc'], vietnam['Inflation'], vietnam['Unemployment_Rate'], vietnam['Net_LB'], vietnam['Account_Balance']): mycursor.execute(sqlformula14, [a, b,", "f, g, h in zip(vietnam['Year'], vietnam['RGDP'], vietnam['NGDP'], vietnam['GDP_pc'], vietnam['Inflation'], vietnam['Unemployment_Rate'],", "e, f, g, h in zip(australia['Year'], australia['RGDP'], australia['NGDP'], australia['GDP_pc'], australia['Inflation'],", "g, h]) sqlformula7 = \"INSERT INTO lao VALUES(%s, %s, %s,", "myanmar VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\" for", "malaysia['RGDP'], malaysia['NGDP'], malaysia['GDP_pc'], malaysia['Inflation'], malaysia['Unemployment_Rate'], malaysia['Net_LB'], malaysia['Account_Balance']): mycursor.execute(sqlformula8, [a, b,", "b, c, d, e, f, g, h]) sqlformula7 = \"INSERT", "philipines['Unemployment_Rate'], philipines['Net_LB'], philipines['Account_Balance']): mycursor.execute(sqlformula11, [a, b, c, d, e, f,", "g, h]) sqlformula12 = \"INSERT INTO singapore VALUES(%s, %s, %s,", "sheet_name='Cambodia') china=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='China') indonesia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Indonesia') japan=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Japan') lao=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Lao') malaysia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx',", "h in zip(australia['Year'], australia['RGDP'], australia['NGDP'], australia['GDP_pc'], australia['Inflation'], australia['Unemployment_Rate'], australia['Net_LB'], australia['Account_Balance']):", "thailand['Unemployment_Rate'], thailand['Net_LB'], thailand['Account_Balance']): mycursor.execute(sqlformula13, [a, b, c, d, e, f,", "sqlformula14 = \"INSERT INTO vietnam VALUES(%s, %s, %s, %s, %s,", "lao['NGDP'], lao['GDP_pc'], lao['Inflation'], lao['Unemployment_Rate'], lao['Net_LB'], lao['Account_Balance']): mycursor.execute(sqlformula7, [a, b, c,", "b, c, d, e, f, g, h in zip(brunei['Year'], brunei['RGDP'],", "b, c, d, e, f, g, h]) sqlformula3 = \"INSERT", "d, e, f, g, h in zip(new_zeland['Year'], new_zeland['RGDP'], new_zeland['NGDP'], new_zeland['GDP_pc'],", "sheet_name='New Zeland') philipines=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Philipines') singapore=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Singapore') thailand=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Thailand') vietnam=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Vietnam')", "zip(malaysia['Year'], malaysia['RGDP'], malaysia['NGDP'], malaysia['GDP_pc'], malaysia['Inflation'], malaysia['Unemployment_Rate'], malaysia['Net_LB'], malaysia['Account_Balance']): mycursor.execute(sqlformula8, [a,", "in zip(singapore['Year'], singapore['RGDP'], singapore['NGDP'], singapore['GDP_pc'], singapore['Inflation'], singapore['Unemployment_Rate'], singapore['Net_LB'], singapore['Account_Balance']): mycursor.execute(sqlformula12,", "host = \"localhost\", user = \"root\", passwd = \"\", database", "in zip(vietnam['Year'], vietnam['RGDP'], vietnam['NGDP'], vietnam['GDP_pc'], vietnam['Inflation'], vietnam['Unemployment_Rate'], vietnam['Net_LB'], vietnam['Account_Balance']): mycursor.execute(sqlformula14,", "new_zeland['NGDP'], new_zeland['GDP_pc'], new_zeland['Inflation'], new_zeland['Unemployment_Rate'], new_zeland['Net_LB'], new_zeland['Account_Balance']): mycursor.execute(sqlformula10, [a, b, c,", "indonesia['Account_Balance']): mycursor.execute(sqlformula5, [a, b, c, d, e, f, g, h])", "h in zip(myanmar['Year'], myanmar['RGDP'], myanmar['NGDP'], myanmar['GDP_pc'], myanmar['Inflation'], myanmar['Unemployment_Rate'], myanmar['Net_LB'], myanmar['Account_Balance']):", "f, g, h]) sqlformula3 = \"INSERT INTO cambodia VALUES(%s, %s,", "database = \"\" ) mycursor = mydb.cursor() sqlformula1 = \"INSERT", "b, c, d, e, f, g, h]) sqlformula5 = \"INSERT", "h in zip(china['Year'], china['RGDP'], china['NGDP'], china['GDP_pc'], china['Inflation'], china['Unemployment_Rate'], china['Net_LB'], china['Account_Balance']):", "mycursor.execute(sqlformula10, [a, b, c, d, e, f, g, h]) sqlformula11", "philipines['Inflation'], philipines['Unemployment_Rate'], philipines['Net_LB'], philipines['Account_Balance']): mycursor.execute(sqlformula11, [a, b, c, d, e,", "as np import mysql.connector australia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Australia') brunei=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Brunei') cambodia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Cambodia')", "\"INSERT INTO myanmar VALUES(%s, %s, %s, %s, %s, %s, %s,", "e, f, g, h in zip(philipines['Year'], philipines['RGDP'], philipines['NGDP'], philipines['GDP_pc'], philipines['Inflation'],", "import numpy as np import mysql.connector australia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Australia') brunei=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Brunei')", "b, c, d, e, f, g, h in zip(indonesia['Year'], indonesia['RGDP'],", "= \"INSERT INTO new_zeland VALUES(%s, %s, %s, %s, %s, %s,", "b, c, d, e, f, g, h in zip(cambodia['Year'], cambodia['RGDP'],", "\"INSERT INTO lao VALUES(%s, %s, %s, %s, %s, %s, %s,", "f, g, h in zip(china['Year'], china['RGDP'], china['NGDP'], china['GDP_pc'], china['Inflation'], china['Unemployment_Rate'],", "f, g, h]) sqlformula8 = \"INSERT INTO malaysia VALUES(%s, %s,", "\"INSERT INTO cambodia VALUES(%s, %s, %s, %s, %s, %s, %s,", "sqlformula4 = \"INSERT INTO china VALUES(%s, %s, %s, %s, %s,", "cambodia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Cambodia') china=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='China') indonesia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Indonesia') japan=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Japan') lao=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Lao')", "\"INSERT INTO japan VALUES(%s, %s, %s, %s, %s, %s, %s,", "e, f, g, h in zip(malaysia['Year'], malaysia['RGDP'], malaysia['NGDP'], malaysia['GDP_pc'], malaysia['Inflation'],", "b, c, d, e, f, g, h]) sqlformula14 = \"INSERT", "thailand=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Thailand') vietnam=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Vietnam') ''' mydb = mysql.connector.connect( host =", "g, h]) sqlformula6 = \"INSERT INTO japan VALUES(%s, %s, %s,", "np import mysql.connector australia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Australia') brunei=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Brunei') cambodia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Cambodia') china=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx',", "c, d, e, f, g, h in zip(thailand['Year'], thailand['RGDP'], thailand['NGDP'],", "myanmar['Net_LB'], myanmar['Account_Balance']): mycursor.execute(sqlformula9, [a, b, c, d, e, f, g,", "a, b, c, d, e, f, g, h in zip(australia['Year'],", "h in zip(brunei['Year'], brunei['RGDP'], brunei['NGDP'], brunei['GDP_pc'], brunei['Inflation'], brunei['Unemployment_Rate'], brunei['Net_LB'], brunei['Account_Balance']):", "numpy as np import mysql.connector australia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Australia') brunei=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Brunei') cambodia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx',", "g, h]) sqlformula5 = \"INSERT INTO indonesia VALUES(%s, %s, %s,", "b, c, d, e, f, g, h]) sqlformula12 = \"INSERT", "a, b, c, d, e, f, g, h in zip(brunei['Year'],", "thailand VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\" for", "[a, b, c, d, e, f, g, h]) sqlformula7 =", "h in zip(new_zeland['Year'], new_zeland['RGDP'], new_zeland['NGDP'], new_zeland['GDP_pc'], new_zeland['Inflation'], new_zeland['Unemployment_Rate'], new_zeland['Net_LB'], new_zeland['Account_Balance']):", "philipines=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Philipines') singapore=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Singapore') thailand=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Thailand') vietnam=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Vietnam') ''' mydb", "pd import numpy as np import mysql.connector australia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Australia') brunei=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx',", "b, c, d, e, f, g, h in zip(thailand['Year'], thailand['RGDP'],", "malaysia['GDP_pc'], malaysia['Inflation'], malaysia['Unemployment_Rate'], malaysia['Net_LB'], malaysia['Account_Balance']): mycursor.execute(sqlformula8, [a, b, c, d,", "cambodia['NGDP'], cambodia['GDP_pc'], cambodia['Inflation'], cambodia['Unemployment_Rate'], cambodia['Net_LB'], cambodia['Account_Balance']): mycursor.execute(sqlformula3, [a, b, c,", "[a, b, c, d, e, f, g, h]) sqlformula2 =", "INTO thailand VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\"", "d, e, f, g, h in zip(indonesia['Year'], indonesia['RGDP'], indonesia['NGDP'], indonesia['GDP_pc'],", "d, e, f, g, h in zip(myanmar['Year'], myanmar['RGDP'], myanmar['NGDP'], myanmar['GDP_pc'],", "new_zeland['Inflation'], new_zeland['Unemployment_Rate'], new_zeland['Net_LB'], new_zeland['Account_Balance']): mycursor.execute(sqlformula10, [a, b, c, d, e,", "d, e, f, g, h in zip(thailand['Year'], thailand['RGDP'], thailand['NGDP'], thailand['GDP_pc'],", "h in zip(indonesia['Year'], indonesia['RGDP'], indonesia['NGDP'], indonesia['GDP_pc'], indonesia['Inflation'], indonesia['Unemployment_Rate'], indonesia['Net_LB'], indonesia['Account_Balance']):", "malaysia['Net_LB'], malaysia['Account_Balance']): mycursor.execute(sqlformula8, [a, b, c, d, e, f, g,", "new_zeland['RGDP'], new_zeland['NGDP'], new_zeland['GDP_pc'], new_zeland['Inflation'], new_zeland['Unemployment_Rate'], new_zeland['Net_LB'], new_zeland['Account_Balance']): mycursor.execute(sqlformula10, [a, b,", "thailand['GDP_pc'], thailand['Inflation'], thailand['Unemployment_Rate'], thailand['Net_LB'], thailand['Account_Balance']): mycursor.execute(sqlformula13, [a, b, c, d,", "china['GDP_pc'], china['Inflation'], china['Unemployment_Rate'], china['Net_LB'], china['Account_Balance']): mycursor.execute(sqlformula4, [a, b, c, d,", "f, g, h in zip(malaysia['Year'], malaysia['RGDP'], malaysia['NGDP'], malaysia['GDP_pc'], malaysia['Inflation'], malaysia['Unemployment_Rate'],", "f, g, h in zip(brunei['Year'], brunei['RGDP'], brunei['NGDP'], brunei['GDP_pc'], brunei['Inflation'], brunei['Unemployment_Rate'],", "h in zip(malaysia['Year'], malaysia['RGDP'], malaysia['NGDP'], malaysia['GDP_pc'], malaysia['Inflation'], malaysia['Unemployment_Rate'], malaysia['Net_LB'], malaysia['Account_Balance']):", "= \"root\", passwd = \"\", database = \"\" ) mycursor", "[a, b, c, d, e, f, g, h]) sqlformula6 =", "g, h]) sqlformula11 = \"INSERT INTO philipines VALUES(%s, %s, %s,", "myanmar['RGDP'], myanmar['NGDP'], myanmar['GDP_pc'], myanmar['Inflation'], myanmar['Unemployment_Rate'], myanmar['Net_LB'], myanmar['Account_Balance']): mycursor.execute(sqlformula9, [a, b,", "indonesia['NGDP'], indonesia['GDP_pc'], indonesia['Inflation'], indonesia['Unemployment_Rate'], indonesia['Net_LB'], indonesia['Account_Balance']): mycursor.execute(sqlformula5, [a, b, c,", "sqlformula7 = \"INSERT INTO lao VALUES(%s, %s, %s, %s, %s,", "c, d, e, f, g, h in zip(cambodia['Year'], cambodia['RGDP'], cambodia['NGDP'],", "= \"INSERT INTO australia VALUES(%s, %s, %s, %s, %s, %s,", "[a, b, c, d, e, f, g, h]) sqlformula11 =", "in zip(new_zeland['Year'], new_zeland['RGDP'], new_zeland['NGDP'], new_zeland['GDP_pc'], new_zeland['Inflation'], new_zeland['Unemployment_Rate'], new_zeland['Net_LB'], new_zeland['Account_Balance']): mycursor.execute(sqlformula10,", "b, c, d, e, f, g, h]) sqlformula10 = \"INSERT", "e, f, g, h]) sqlformula4 = \"INSERT INTO china VALUES(%s,", "c, d, e, f, g, h]) sqlformula5 = \"INSERT INTO", "e, f, g, h]) sqlformula8 = \"INSERT INTO malaysia VALUES(%s,", "e, f, g, h]) sqlformula9 = \"INSERT INTO myanmar VALUES(%s,", "mycursor.execute(sqlformula11, [a, b, c, d, e, f, g, h]) sqlformula12", "sheet_name='Japan') lao=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Lao') malaysia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Malaysia') myanmar=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Myanmar') new_zeland=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='New Zeland')", "brunei['Unemployment_Rate'], brunei['Net_LB'], brunei['Account_Balance']): mycursor.execute(sqlformula2, [a, b, c, d, e, f,", "c, d, e, f, g, h in zip(malaysia['Year'], malaysia['RGDP'], malaysia['NGDP'],", "d, e, f, g, h in zip(china['Year'], china['RGDP'], china['NGDP'], china['GDP_pc'],", "[a, b, c, d, e, f, g, h]) sqlformula5 =", "c, d, e, f, g, h in zip(australia['Year'], australia['RGDP'], australia['NGDP'],", "philipines['GDP_pc'], philipines['Inflation'], philipines['Unemployment_Rate'], philipines['Net_LB'], philipines['Account_Balance']): mycursor.execute(sqlformula11, [a, b, c, d,", "b, c, d, e, f, g, h in zip(myanmar['Year'], myanmar['RGDP'],", "lao['Account_Balance']): mycursor.execute(sqlformula7, [a, b, c, d, e, f, g, h])", "cambodia['Inflation'], cambodia['Unemployment_Rate'], cambodia['Net_LB'], cambodia['Account_Balance']): mycursor.execute(sqlformula3, [a, b, c, d, e,", "zip(lao['Year'], lao['RGDP'], lao['NGDP'], lao['GDP_pc'], lao['Inflation'], lao['Unemployment_Rate'], lao['Net_LB'], lao['Account_Balance']): mycursor.execute(sqlformula7, [a,", "c, d, e, f, g, h]) sqlformula2 = \"INSERT INTO", "japan['Unemployment_Rate'], japan['Net_LB'], japan['Account_Balance']): mycursor.execute(sqlformula6, [a, b, c, d, e, f,", "= \"localhost\", user = \"root\", passwd = \"\", database =", "zip(china['Year'], china['RGDP'], china['NGDP'], china['GDP_pc'], china['Inflation'], china['Unemployment_Rate'], china['Net_LB'], china['Account_Balance']): mycursor.execute(sqlformula4, [a,", "china['Inflation'], china['Unemployment_Rate'], china['Net_LB'], china['Account_Balance']): mycursor.execute(sqlformula4, [a, b, c, d, e,", "philipines VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\" for", "\"INSERT INTO malaysia VALUES(%s, %s, %s, %s, %s, %s, %s,", "china['NGDP'], china['GDP_pc'], china['Inflation'], china['Unemployment_Rate'], china['Net_LB'], china['Account_Balance']): mycursor.execute(sqlformula4, [a, b, c,", "b, c, d, e, f, g, h]) sqlformula13 = \"INSERT", "cambodia['Account_Balance']): mycursor.execute(sqlformula3, [a, b, c, d, e, f, g, h])", "mycursor.execute(sqlformula3, [a, b, c, d, e, f, g, h]) sqlformula4", "in zip(myanmar['Year'], myanmar['RGDP'], myanmar['NGDP'], myanmar['GDP_pc'], myanmar['Inflation'], myanmar['Unemployment_Rate'], myanmar['Net_LB'], myanmar['Account_Balance']): mycursor.execute(sqlformula9,", "malaysia['Unemployment_Rate'], malaysia['Net_LB'], malaysia['Account_Balance']): mycursor.execute(sqlformula8, [a, b, c, d, e, f,", "malaysia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\" for", "h]) sqlformula6 = \"INSERT INTO japan VALUES(%s, %s, %s, %s,", "%s, %s, %s, %s)\" for a, b, c, d, e,", "a, b, c, d, e, f, g, h in zip(vietnam['Year'],", "mycursor.execute(sqlformula6, [a, b, c, d, e, f, g, h]) sqlformula7", "c, d, e, f, g, h in zip(vietnam['Year'], vietnam['RGDP'], vietnam['NGDP'],", "INTO australia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\"", "myanmar['Inflation'], myanmar['Unemployment_Rate'], myanmar['Net_LB'], myanmar['Account_Balance']): mycursor.execute(sqlformula9, [a, b, c, d, e,", "japan['Account_Balance']): mycursor.execute(sqlformula6, [a, b, c, d, e, f, g, h])", "japan['Net_LB'], japan['Account_Balance']): mycursor.execute(sqlformula6, [a, b, c, d, e, f, g,", "mysql.connector.connect( host = \"localhost\", user = \"root\", passwd = \"\",", "cambodia['GDP_pc'], cambodia['Inflation'], cambodia['Unemployment_Rate'], cambodia['Net_LB'], cambodia['Account_Balance']): mycursor.execute(sqlformula3, [a, b, c, d,", "d, e, f, g, h]) sqlformula7 = \"INSERT INTO lao", "e, f, g, h]) sqlformula6 = \"INSERT INTO japan VALUES(%s,", "a, b, c, d, e, f, g, h in zip(myanmar['Year'],", "%s)\" for a, b, c, d, e, f, g, h", "c, d, e, f, g, h in zip(japan['Year'], japan['RGDP'], japan['NGDP'],", "f, g, h in zip(singapore['Year'], singapore['RGDP'], singapore['NGDP'], singapore['GDP_pc'], singapore['Inflation'], singapore['Unemployment_Rate'],", "g, h]) sqlformula4 = \"INSERT INTO china VALUES(%s, %s, %s,", "vietnam VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\" for", "australia['NGDP'], australia['GDP_pc'], australia['Inflation'], australia['Unemployment_Rate'], australia['Net_LB'], australia['Account_Balance']): mycursor.execute(sqlformula1, [a, b, c,", "g, h in zip(thailand['Year'], thailand['RGDP'], thailand['NGDP'], thailand['GDP_pc'], thailand['Inflation'], thailand['Unemployment_Rate'], thailand['Net_LB'],", "indonesia['Unemployment_Rate'], indonesia['Net_LB'], indonesia['Account_Balance']): mycursor.execute(sqlformula5, [a, b, c, d, e, f,", "h]) sqlformula11 = \"INSERT INTO philipines VALUES(%s, %s, %s, %s,", "d, e, f, g, h]) sqlformula12 = \"INSERT INTO singapore", "myanmar=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Myanmar') new_zeland=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='New Zeland') philipines=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Philipines') singapore=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Singapore') thailand=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx',", "zip(myanmar['Year'], myanmar['RGDP'], myanmar['NGDP'], myanmar['GDP_pc'], myanmar['Inflation'], myanmar['Unemployment_Rate'], myanmar['Net_LB'], myanmar['Account_Balance']): mycursor.execute(sqlformula9, [a,", "\"\", database = \"\" ) mycursor = mydb.cursor() sqlformula1 =", "australia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Australia') brunei=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Brunei') cambodia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Cambodia') china=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='China') indonesia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Indonesia')", "d, e, f, g, h]) sqlformula10 = \"INSERT INTO new_zeland", "zip(singapore['Year'], singapore['RGDP'], singapore['NGDP'], singapore['GDP_pc'], singapore['Inflation'], singapore['Unemployment_Rate'], singapore['Net_LB'], singapore['Account_Balance']): mycursor.execute(sqlformula12, [a,", "malaysia['NGDP'], malaysia['GDP_pc'], malaysia['Inflation'], malaysia['Unemployment_Rate'], malaysia['Net_LB'], malaysia['Account_Balance']): mycursor.execute(sqlformula8, [a, b, c,", "japan VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\" for", "philipines['NGDP'], philipines['GDP_pc'], philipines['Inflation'], philipines['Unemployment_Rate'], philipines['Net_LB'], philipines['Account_Balance']): mycursor.execute(sqlformula11, [a, b, c,", "brunei['Net_LB'], brunei['Account_Balance']): mycursor.execute(sqlformula2, [a, b, c, d, e, f, g,", "\"INSERT INTO china VALUES(%s, %s, %s, %s, %s, %s, %s,", "f, g, h]) sqlformula11 = \"INSERT INTO philipines VALUES(%s, %s,", "d, e, f, g, h]) sqlformula9 = \"INSERT INTO myanmar", "c, d, e, f, g, h in zip(singapore['Year'], singapore['RGDP'], singapore['NGDP'],", "d, e, f, g, h in zip(brunei['Year'], brunei['RGDP'], brunei['NGDP'], brunei['GDP_pc'],", "e, f, g, h in zip(cambodia['Year'], cambodia['RGDP'], cambodia['NGDP'], cambodia['GDP_pc'], cambodia['Inflation'],", "e, f, g, h]) sqlformula11 = \"INSERT INTO philipines VALUES(%s,", "d, e, f, g, h]) sqlformula3 = \"INSERT INTO cambodia", "f, g, h in zip(japan['Year'], japan['RGDP'], japan['NGDP'], japan['GDP_pc'], japan['Inflation'], japan['Unemployment_Rate'],", "e, f, g, h]) sqlformula10 = \"INSERT INTO new_zeland VALUES(%s,", "e, f, g, h in zip(brunei['Year'], brunei['RGDP'], brunei['NGDP'], brunei['GDP_pc'], brunei['Inflation'],", "d, e, f, g, h]) sqlformula13 = \"INSERT INTO thailand", "australia['Net_LB'], australia['Account_Balance']): mycursor.execute(sqlformula1, [a, b, c, d, e, f, g,", "mycursor.execute(sqlformula4, [a, b, c, d, e, f, g, h]) sqlformula5", "zip(cambodia['Year'], cambodia['RGDP'], cambodia['NGDP'], cambodia['GDP_pc'], cambodia['Inflation'], cambodia['Unemployment_Rate'], cambodia['Net_LB'], cambodia['Account_Balance']): mycursor.execute(sqlformula3, [a,", "\"INSERT INTO philipines VALUES(%s, %s, %s, %s, %s, %s, %s,", "INTO new_zeland VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\"", "c, d, e, f, g, h in zip(china['Year'], china['RGDP'], china['NGDP'],", "indonesia['RGDP'], indonesia['NGDP'], indonesia['GDP_pc'], indonesia['Inflation'], indonesia['Unemployment_Rate'], indonesia['Net_LB'], indonesia['Account_Balance']): mycursor.execute(sqlformula5, [a, b,", "[a, b, c, d, e, f, g, h]) sqlformula12 =", "e, f, g, h in zip(thailand['Year'], thailand['RGDP'], thailand['NGDP'], thailand['GDP_pc'], thailand['Inflation'],", "\"\" ) mycursor = mydb.cursor() sqlformula1 = \"INSERT INTO australia", "\"INSERT INTO new_zeland VALUES(%s, %s, %s, %s, %s, %s, %s,", "g, h in zip(vietnam['Year'], vietnam['RGDP'], vietnam['NGDP'], vietnam['GDP_pc'], vietnam['Inflation'], vietnam['Unemployment_Rate'], vietnam['Net_LB'],", "australia['GDP_pc'], australia['Inflation'], australia['Unemployment_Rate'], australia['Net_LB'], australia['Account_Balance']): mycursor.execute(sqlformula1, [a, b, c, d,", "= \"INSERT INTO vietnam VALUES(%s, %s, %s, %s, %s, %s,", "[a, b, c, d, e, f, g, h]) sqlformula4 =", "new_zeland['Account_Balance']): mycursor.execute(sqlformula10, [a, b, c, d, e, f, g, h])", "%s, %s, %s)\" for a, b, c, d, e, f,", "thailand['NGDP'], thailand['GDP_pc'], thailand['Inflation'], thailand['Unemployment_Rate'], thailand['Net_LB'], thailand['Account_Balance']): mycursor.execute(sqlformula13, [a, b, c,", "\"INSERT INTO australia VALUES(%s, %s, %s, %s, %s, %s, %s,", "brunei['RGDP'], brunei['NGDP'], brunei['GDP_pc'], brunei['Inflation'], brunei['Unemployment_Rate'], brunei['Net_LB'], brunei['Account_Balance']): mycursor.execute(sqlformula2, [a, b,", "lao['RGDP'], lao['NGDP'], lao['GDP_pc'], lao['Inflation'], lao['Unemployment_Rate'], lao['Net_LB'], lao['Account_Balance']): mycursor.execute(sqlformula7, [a, b,", "a, b, c, d, e, f, g, h in zip(indonesia['Year'],", "g, h in zip(brunei['Year'], brunei['RGDP'], brunei['NGDP'], brunei['GDP_pc'], brunei['Inflation'], brunei['Unemployment_Rate'], brunei['Net_LB'],", "lao['GDP_pc'], lao['Inflation'], lao['Unemployment_Rate'], lao['Net_LB'], lao['Account_Balance']): mycursor.execute(sqlformula7, [a, b, c, d,", "= \"INSERT INTO indonesia VALUES(%s, %s, %s, %s, %s, %s,", "thailand['Net_LB'], thailand['Account_Balance']): mycursor.execute(sqlformula13, [a, b, c, d, e, f, g,", "cambodia['Net_LB'], cambodia['Account_Balance']): mycursor.execute(sqlformula3, [a, b, c, d, e, f, g,", "cambodia['Unemployment_Rate'], cambodia['Net_LB'], cambodia['Account_Balance']): mycursor.execute(sqlformula3, [a, b, c, d, e, f,", "china VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\" for", "philipines['Account_Balance']): mycursor.execute(sqlformula11, [a, b, c, d, e, f, g, h])", "h in zip(japan['Year'], japan['RGDP'], japan['NGDP'], japan['GDP_pc'], japan['Inflation'], japan['Unemployment_Rate'], japan['Net_LB'], japan['Account_Balance']):", "= \"INSERT INTO myanmar VALUES(%s, %s, %s, %s, %s, %s,", "australia['RGDP'], australia['NGDP'], australia['GDP_pc'], australia['Inflation'], australia['Unemployment_Rate'], australia['Net_LB'], australia['Account_Balance']): mycursor.execute(sqlformula1, [a, b,", "sqlformula1 = \"INSERT INTO australia VALUES(%s, %s, %s, %s, %s,", "myanmar['Account_Balance']): mycursor.execute(sqlformula9, [a, b, c, d, e, f, g, h])", "g, h]) sqlformula9 = \"INSERT INTO myanmar VALUES(%s, %s, %s,", "c, d, e, f, g, h]) sqlformula8 = \"INSERT INTO", "sqlformula2 = \"INSERT INTO brunei VALUES(%s, %s, %s, %s, %s,", "h]) sqlformula9 = \"INSERT INTO myanmar VALUES(%s, %s, %s, %s,", "= \"INSERT INTO china VALUES(%s, %s, %s, %s, %s, %s,", "f, g, h in zip(australia['Year'], australia['RGDP'], australia['NGDP'], australia['GDP_pc'], australia['Inflation'], australia['Unemployment_Rate'],", "%s, %s, %s, %s, %s, %s)\" for a, b, c,", "mycursor.execute(sqlformula2, [a, b, c, d, e, f, g, h]) sqlformula3", "f, g, h]) sqlformula9 = \"INSERT INTO myanmar VALUES(%s, %s,", "d, e, f, g, h]) sqlformula6 = \"INSERT INTO japan", "f, g, h]) sqlformula4 = \"INSERT INTO china VALUES(%s, %s,", "e, f, g, h]) sqlformula12 = \"INSERT INTO singapore VALUES(%s,", "b, c, d, e, f, g, h]) sqlformula6 = \"INSERT", "vietnam=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Vietnam') ''' mydb = mysql.connector.connect( host = \"localhost\", user", "in zip(china['Year'], china['RGDP'], china['NGDP'], china['GDP_pc'], china['Inflation'], china['Unemployment_Rate'], china['Net_LB'], china['Account_Balance']): mycursor.execute(sqlformula4,", "b, c, d, e, f, g, h]) sqlformula4 = \"INSERT", "f, g, h in zip(thailand['Year'], thailand['RGDP'], thailand['NGDP'], thailand['GDP_pc'], thailand['Inflation'], thailand['Unemployment_Rate'],", "sqlformula3 = \"INSERT INTO cambodia VALUES(%s, %s, %s, %s, %s,", "a, b, c, d, e, f, g, h in zip(singapore['Year'],", "china=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='China') indonesia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Indonesia') japan=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Japan') lao=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Lao') malaysia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Malaysia')", "f, g, h]) sqlformula14 = \"INSERT INTO vietnam VALUES(%s, %s,", "zip(brunei['Year'], brunei['RGDP'], brunei['NGDP'], brunei['GDP_pc'], brunei['Inflation'], brunei['Unemployment_Rate'], brunei['Net_LB'], brunei['Account_Balance']): mycursor.execute(sqlformula2, [a,", "e, f, g, h]) sqlformula3 = \"INSERT INTO cambodia VALUES(%s,", "f, g, h in zip(philipines['Year'], philipines['RGDP'], philipines['NGDP'], philipines['GDP_pc'], philipines['Inflation'], philipines['Unemployment_Rate'],", "\"root\", passwd = \"\", database = \"\" ) mycursor =", "australia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\" for", "g, h]) sqlformula8 = \"INSERT INTO malaysia VALUES(%s, %s, %s,", "\"INSERT INTO vietnam VALUES(%s, %s, %s, %s, %s, %s, %s,", "\"INSERT INTO brunei VALUES(%s, %s, %s, %s, %s, %s, %s,", "Zeland') philipines=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Philipines') singapore=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Singapore') thailand=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Thailand') vietnam=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Vietnam') '''", "f, g, h in zip(new_zeland['Year'], new_zeland['RGDP'], new_zeland['NGDP'], new_zeland['GDP_pc'], new_zeland['Inflation'], new_zeland['Unemployment_Rate'],", "vietnam['Account_Balance']): mycursor.execute(sqlformula14, [a, b, c, d, e, f, g, h])", "d, e, f, g, h]) sqlformula14 = \"INSERT INTO vietnam", "e, f, g, h in zip(china['Year'], china['RGDP'], china['NGDP'], china['GDP_pc'], china['Inflation'],", "lao['Inflation'], lao['Unemployment_Rate'], lao['Net_LB'], lao['Account_Balance']): mycursor.execute(sqlformula7, [a, b, c, d, e,", "singapore=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Singapore') thailand=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Thailand') vietnam=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Vietnam') ''' mydb = mysql.connector.connect(", "japan=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Japan') lao=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Lao') malaysia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Malaysia') myanmar=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Myanmar') new_zeland=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='New", "%s, %s, %s, %s, %s)\" for a, b, c, d,", "b, c, d, e, f, g, h in zip(singapore['Year'], singapore['RGDP'],", "china['Account_Balance']): mycursor.execute(sqlformula4, [a, b, c, d, e, f, g, h])", "c, d, e, f, g, h]) sqlformula12 = \"INSERT INTO", "thailand['Inflation'], thailand['Unemployment_Rate'], thailand['Net_LB'], thailand['Account_Balance']): mycursor.execute(sqlformula13, [a, b, c, d, e,", "sheet_name='Singapore') thailand=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Thailand') vietnam=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Vietnam') ''' mydb = mysql.connector.connect( host", "zip(thailand['Year'], thailand['RGDP'], thailand['NGDP'], thailand['GDP_pc'], thailand['Inflation'], thailand['Unemployment_Rate'], thailand['Net_LB'], thailand['Account_Balance']): mycursor.execute(sqlformula13, [a,", "d, e, f, g, h in zip(lao['Year'], lao['RGDP'], lao['NGDP'], lao['GDP_pc'],", "a, b, c, d, e, f, g, h in zip(thailand['Year'],", "thailand['RGDP'], thailand['NGDP'], thailand['GDP_pc'], thailand['Inflation'], thailand['Unemployment_Rate'], thailand['Net_LB'], thailand['Account_Balance']): mycursor.execute(sqlformula13, [a, b,", "b, c, d, e, f, g, h in zip(malaysia['Year'], malaysia['RGDP'],", "g, h]) sqlformula2 = \"INSERT INTO brunei VALUES(%s, %s, %s,", "sqlformula11 = \"INSERT INTO philipines VALUES(%s, %s, %s, %s, %s,", "b, c, d, e, f, g, h in zip(japan['Year'], japan['RGDP'],", "mycursor.execute(sqlformula7, [a, b, c, d, e, f, g, h]) sqlformula8", "e, f, g, h in zip(indonesia['Year'], indonesia['RGDP'], indonesia['NGDP'], indonesia['GDP_pc'], indonesia['Inflation'],", "brunei['Inflation'], brunei['Unemployment_Rate'], brunei['Net_LB'], brunei['Account_Balance']): mycursor.execute(sqlformula2, [a, b, c, d, e,", "b, c, d, e, f, g, h]) sqlformula9 = \"INSERT", "g, h in zip(lao['Year'], lao['RGDP'], lao['NGDP'], lao['GDP_pc'], lao['Inflation'], lao['Unemployment_Rate'], lao['Net_LB'],", "new_zeland=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='New Zeland') philipines=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Philipines') singapore=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Singapore') thailand=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Thailand') vietnam=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx',", "e, f, g, h in zip(vietnam['Year'], vietnam['RGDP'], vietnam['NGDP'], vietnam['GDP_pc'], vietnam['Inflation'],", "INTO philipines VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\"", "australia['Account_Balance']): mycursor.execute(sqlformula1, [a, b, c, d, e, f, g, h])", "f, g, h]) sqlformula5 = \"INSERT INTO indonesia VALUES(%s, %s,", "brunei['GDP_pc'], brunei['Inflation'], brunei['Unemployment_Rate'], brunei['Net_LB'], brunei['Account_Balance']): mycursor.execute(sqlformula2, [a, b, c, d,", "lao['Net_LB'], lao['Account_Balance']): mycursor.execute(sqlformula7, [a, b, c, d, e, f, g,", "zip(vietnam['Year'], vietnam['RGDP'], vietnam['NGDP'], vietnam['GDP_pc'], vietnam['Inflation'], vietnam['Unemployment_Rate'], vietnam['Net_LB'], vietnam['Account_Balance']): mycursor.execute(sqlformula14, [a,", "for a, b, c, d, e, f, g, h in", "mycursor = mydb.cursor() sqlformula1 = \"INSERT INTO australia VALUES(%s, %s,", "c, d, e, f, g, h]) sqlformula14 = \"INSERT INTO", "sheet_name='Thailand') vietnam=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Vietnam') ''' mydb = mysql.connector.connect( host = \"localhost\",", "brunei['NGDP'], brunei['GDP_pc'], brunei['Inflation'], brunei['Unemployment_Rate'], brunei['Net_LB'], brunei['Account_Balance']): mycursor.execute(sqlformula2, [a, b, c,", "zip(indonesia['Year'], indonesia['RGDP'], indonesia['NGDP'], indonesia['GDP_pc'], indonesia['Inflation'], indonesia['Unemployment_Rate'], indonesia['Net_LB'], indonesia['Account_Balance']): mycursor.execute(sqlformula5, [a,", "[a, b, c, d, e, f, g, h]) sqlformula10 =", "import mysql.connector australia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Australia') brunei=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Brunei') cambodia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Cambodia') china=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='China')", "d, e, f, g, h]) sqlformula4 = \"INSERT INTO china", "import pandas as pd import numpy as np import mysql.connector", "xlsxwriter import pandas as pd import numpy as np import", "sheet_name='Vietnam') ''' mydb = mysql.connector.connect( host = \"localhost\", user =", "mycursor.execute(sqlformula1, [a, b, c, d, e, f, g, h]) sqlformula2", "sqlformula10 = \"INSERT INTO new_zeland VALUES(%s, %s, %s, %s, %s,", "''' mydb = mysql.connector.connect( host = \"localhost\", user = \"root\",", "sqlformula13 = \"INSERT INTO thailand VALUES(%s, %s, %s, %s, %s,", "d, e, f, g, h]) sqlformula8 = \"INSERT INTO malaysia", "in zip(brunei['Year'], brunei['RGDP'], brunei['NGDP'], brunei['GDP_pc'], brunei['Inflation'], brunei['Unemployment_Rate'], brunei['Net_LB'], brunei['Account_Balance']): mycursor.execute(sqlformula2,", "mycursor.execute(sqlformula12, [a, b, c, d, e, f, g, h]) sqlformula13", "mydb = mysql.connector.connect( host = \"localhost\", user = \"root\", passwd", "= \"INSERT INTO singapore VALUES(%s, %s, %s, %s, %s, %s,", "c, d, e, f, g, h]) sqlformula4 = \"INSERT INTO", "g, h in zip(indonesia['Year'], indonesia['RGDP'], indonesia['NGDP'], indonesia['GDP_pc'], indonesia['Inflation'], indonesia['Unemployment_Rate'], indonesia['Net_LB'],", "INTO japan VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\"", "in zip(philipines['Year'], philipines['RGDP'], philipines['NGDP'], philipines['GDP_pc'], philipines['Inflation'], philipines['Unemployment_Rate'], philipines['Net_LB'], philipines['Account_Balance']): mycursor.execute(sqlformula11,", "\"INSERT INTO thailand VALUES(%s, %s, %s, %s, %s, %s, %s,", "e, f, g, h]) sqlformula7 = \"INSERT INTO lao VALUES(%s,", "e, f, g, h in zip(myanmar['Year'], myanmar['RGDP'], myanmar['NGDP'], myanmar['GDP_pc'], myanmar['Inflation'],", "b, c, d, e, f, g, h in zip(china['Year'], china['RGDP'],", "e, f, g, h in zip(singapore['Year'], singapore['RGDP'], singapore['NGDP'], singapore['GDP_pc'], singapore['Inflation'],", "a, b, c, d, e, f, g, h in zip(china['Year'],", "b, c, d, e, f, g, h in zip(australia['Year'], australia['RGDP'],", "a, b, c, d, e, f, g, h in zip(lao['Year'],", "[a, b, c, d, e, f, g, h]) sqlformula8 =", "zip(new_zeland['Year'], new_zeland['RGDP'], new_zeland['NGDP'], new_zeland['GDP_pc'], new_zeland['Inflation'], new_zeland['Unemployment_Rate'], new_zeland['Net_LB'], new_zeland['Account_Balance']): mycursor.execute(sqlformula10, [a,", "mycursor.execute(sqlformula14, [a, b, c, d, e, f, g, h]) '''", "f, g, h in zip(cambodia['Year'], cambodia['RGDP'], cambodia['NGDP'], cambodia['GDP_pc'], cambodia['Inflation'], cambodia['Unemployment_Rate'],", "f, g, h]) sqlformula2 = \"INSERT INTO brunei VALUES(%s, %s,", "c, d, e, f, g, h]) sqlformula9 = \"INSERT INTO", "singapore['Unemployment_Rate'], singapore['Net_LB'], singapore['Account_Balance']): mycursor.execute(sqlformula12, [a, b, c, d, e, f,", "singapore['RGDP'], singapore['NGDP'], singapore['GDP_pc'], singapore['Inflation'], singapore['Unemployment_Rate'], singapore['Net_LB'], singapore['Account_Balance']): mycursor.execute(sqlformula12, [a, b,", "INTO china VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\"", "in zip(thailand['Year'], thailand['RGDP'], thailand['NGDP'], thailand['GDP_pc'], thailand['Inflation'], thailand['Unemployment_Rate'], thailand['Net_LB'], thailand['Account_Balance']): mycursor.execute(sqlformula13,", "g, h]) sqlformula14 = \"INSERT INTO vietnam VALUES(%s, %s, %s,", "d, e, f, g, h in zip(vietnam['Year'], vietnam['RGDP'], vietnam['NGDP'], vietnam['GDP_pc'],", "h in zip(thailand['Year'], thailand['RGDP'], thailand['NGDP'], thailand['GDP_pc'], thailand['Inflation'], thailand['Unemployment_Rate'], thailand['Net_LB'], thailand['Account_Balance']):", "in zip(lao['Year'], lao['RGDP'], lao['NGDP'], lao['GDP_pc'], lao['Inflation'], lao['Unemployment_Rate'], lao['Net_LB'], lao['Account_Balance']): mycursor.execute(sqlformula7,", "e, f, g, h in zip(japan['Year'], japan['RGDP'], japan['NGDP'], japan['GDP_pc'], japan['Inflation'],", "in zip(cambodia['Year'], cambodia['RGDP'], cambodia['NGDP'], cambodia['GDP_pc'], cambodia['Inflation'], cambodia['Unemployment_Rate'], cambodia['Net_LB'], cambodia['Account_Balance']): mycursor.execute(sqlformula3,", "INTO myanmar VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\"", "d, e, f, g, h in zip(singapore['Year'], singapore['RGDP'], singapore['NGDP'], singapore['GDP_pc'],", "in zip(australia['Year'], australia['RGDP'], australia['NGDP'], australia['GDP_pc'], australia['Inflation'], australia['Unemployment_Rate'], australia['Net_LB'], australia['Account_Balance']): mycursor.execute(sqlformula1,", "h]) sqlformula13 = \"INSERT INTO thailand VALUES(%s, %s, %s, %s,", "lao['Unemployment_Rate'], lao['Net_LB'], lao['Account_Balance']): mycursor.execute(sqlformula7, [a, b, c, d, e, f,", "= mydb.cursor() sqlformula1 = \"INSERT INTO australia VALUES(%s, %s, %s,", "new_zeland['GDP_pc'], new_zeland['Inflation'], new_zeland['Unemployment_Rate'], new_zeland['Net_LB'], new_zeland['Account_Balance']): mycursor.execute(sqlformula10, [a, b, c, d,", "g, h in zip(cambodia['Year'], cambodia['RGDP'], cambodia['NGDP'], cambodia['GDP_pc'], cambodia['Inflation'], cambodia['Unemployment_Rate'], cambodia['Net_LB'],", "mycursor.execute(sqlformula5, [a, b, c, d, e, f, g, h]) sqlformula6", "INTO indonesia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\"", "e, f, g, h in zip(lao['Year'], lao['RGDP'], lao['NGDP'], lao['GDP_pc'], lao['Inflation'],", "user = \"root\", passwd = \"\", database = \"\" )", "sheet_name='Philipines') singapore=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Singapore') thailand=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Thailand') vietnam=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Vietnam') ''' mydb =", "vietnam['GDP_pc'], vietnam['Inflation'], vietnam['Unemployment_Rate'], vietnam['Net_LB'], vietnam['Account_Balance']): mycursor.execute(sqlformula14, [a, b, c, d,", "mydb.cursor() sqlformula1 = \"INSERT INTO australia VALUES(%s, %s, %s, %s,", "cambodia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\" for", "japan['Inflation'], japan['Unemployment_Rate'], japan['Net_LB'], japan['Account_Balance']): mycursor.execute(sqlformula6, [a, b, c, d, e,", "c, d, e, f, g, h]) sqlformula3 = \"INSERT INTO", "b, c, d, e, f, g, h in zip(new_zeland['Year'], new_zeland['RGDP'],", "[a, b, c, d, e, f, g, h]) ''' #mydb.commit()", "c, d, e, f, g, h in zip(philipines['Year'], philipines['RGDP'], philipines['NGDP'],", "c, d, e, f, g, h in zip(myanmar['Year'], myanmar['RGDP'], myanmar['NGDP'],", "h]) sqlformula2 = \"INSERT INTO brunei VALUES(%s, %s, %s, %s,", "c, d, e, f, g, h]) sqlformula10 = \"INSERT INTO", "c, d, e, f, g, h]) sqlformula7 = \"INSERT INTO", "\"localhost\", user = \"root\", passwd = \"\", database = \"\"", "indonesia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\" for", "sheet_name='Indonesia') japan=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Japan') lao=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Lao') malaysia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Malaysia') myanmar=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Myanmar') new_zeland=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx',", "china['Net_LB'], china['Account_Balance']): mycursor.execute(sqlformula4, [a, b, c, d, e, f, g,", "thailand['Account_Balance']): mycursor.execute(sqlformula13, [a, b, c, d, e, f, g, h])", "pandas as pd import numpy as np import mysql.connector australia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx',", "lao VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\" for", "= \"INSERT INTO japan VALUES(%s, %s, %s, %s, %s, %s,", "b, c, d, e, f, g, h in zip(lao['Year'], lao['RGDP'],", "indonesia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Indonesia') japan=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Japan') lao=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Lao') malaysia=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Malaysia') myanmar=pd.read_excel(r'\\Users\\jesica\\Desktop\\RCEP_economic_analysis.xlsx', sheet_name='Myanmar')", "sqlformula8 = \"INSERT INTO malaysia VALUES(%s, %s, %s, %s, %s," ]
[ "= subprocess.Popen('gdisk {0}'.format(device).split(), stdin=p_echo.stdout, stdout=subprocess.PIPE) stdout, stderr = p_fdisk.communicate() print(stdout)", "0 device_char = 'z' while i < len(volume_ids): v_id =", "stdout=subprocess.PIPE) stdout, stderr = p_ip.communicate() r_subnet_rules = [] for line", "= l_subnet_rule[-1] r_subnet_rules.append( { 'device': device, 'ip': ip, 'subnet_rule': subnet_rule", "if volume.tags is not None: for tag in volume.tags: if", "= p_partprobe.communicate() # print(stdout) # print(stderr) sleep(3) pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id)", "ip, 'subnet_rule': subnet_rule } ) r_default_route = '' for line", "ensure device is attached sleep(3) if not check_ebs(v_id): prepare_ebs(v_id) add_fstab_entries(v_id,", "stderr = p_ip.communicate() r_subnet_rules = [] for line in stdout.", "= p_ip.communicate() r_subnet_rules = [] for line in stdout. decode().splitlines():", ".*', line) if res is not None: r_default_route = res.group(0)", "'0', '0' ] with open('/etc/fstab', 'a') as f: f.write('{0} {1}\\n'.format(partition,", "{1}\\n'.format(rule['ip'], rule_index)) f.write('ip r add {0} table {1}\\n'.format(default_route, rule_index)) f.write('ip", "if not check_ebs(v_id): prepare_ebs(v_id) add_fstab_entries(v_id, MOUNT_POINT) p_mount = subprocess.Popen('mount -a'.split(),", "v_id = volume_id.replace('vol-', 'vol') pattern = '/dev/disk/by-id/*{0}'.format(v_id) device = glob.glob(pattern)[0]", "if len(ebss) > 0 else None def attach_ebs(): ec2 =", "device_char = chr(ord(device_char) - 1) i += 1 def check_ebs(volume_id):", "{ 'device': device, 'ip': ip, 'subnet_rule': subnet_rule } ) r_default_route", "boto3.resource('ec2') i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text eni_ids = retrieve_eni_ids() device_number = len(r_ec2.Instance(i_id).network_interfaces)", "f.close() def wait_device_ready(timeout=3): c = 0 while c < timeout:", "< len(volume_ids): v_id = volume_ids[i] device = '/dev/xvd{0}'.format(device_char) ec2.attach_volume(Device=device, InstanceId=i_id,", "in stdout.decode().splitlines(): res = re.match('.*inet {0}/[0-9]{{2}}'.format(NIC_IP), line) if res is", "def retrieve_ebs_ids(): ec2 = boto3.resource('ec2') ebss = [] for volume", "< timeout: sleep(1) p_ip = subprocess.Popen('ip a'.split(), stdout=subprocess.PIPE) stdout, stderr", "r_subnet_rules: default_route = re.sub('eth.', rule['device'], r_default_route) f.write('ip rule add from", "ebss if len(ebss) > 0 else None def attach_ebs(): ec2", "f.write('ip rule add from {0} table {1}\\n'.format(rule['ip'], rule_index)) f.write('ip r", "# uses: DEPLOY_UUID, TAG_KEY attach_eni_ids() # uses: MOUNT_POINT, SERVICE_NAME, DEPLOY_UUID,", "re import glob import boto3 import requests import subprocess from", "'ip': ip, 'subnet_rule': subnet_rule } ) r_default_route = '' for", "return enis if len(enis) > 0 else None def attach_eni_ids():", "= re.sub('eth.', rule['device'], r_default_route) f.write('ip rule add from {0} table", "SERVICE_NAME = os.environ['SERVICE_NAME'] MOUNT_POINT = \"/var/lib/\" + SERVICE_NAME NIC_IP =", "fstab_entries = [ mount_point, 'xfs', 'defaults', '0', '0' ] with", "== TAG_KEY: if tag['Value'] == DEPLOY_UUID: enis.append(eni.network_interface_id) return enis if", "is not None: r_default_route = res.group(0) break with open('/etc/rc.local', 'a')", "l_subnet_rule[-1] r_subnet_rules.append( { 'device': device, 'ip': ip, 'subnet_rule': subnet_rule }", "volume_ids[i] device = '/dev/xvd{0}'.format(device_char) ec2.attach_volume(Device=device, InstanceId=i_id, VolumeId=v_id) # Wait to", "'a') as f: f.write('#!/bin/bash\\n\\n') rule_index = 128 default_route_device = ''", "default_route_device, r_default_route) f.write('ip r del default\\n') f.write('ip r add {0}\\n\\n'.format(default_route))", "boto3.setup_default_session(region_name=AWS_REGION) # uses: DEPLOY_UUID, TAG_KEY attach_eni_ids() # uses: MOUNT_POINT, SERVICE_NAME,", "requests import subprocess from time import sleep AWS_REGION = os.environ['AWS_REGION']", "= 128 default_route_device = '' for rule in r_subnet_rules: default_route", "rule_index)) f.write('ip r add {0} table {1}\\n'.format(default_route, rule_index)) f.write('ip r", "check_ebs(v_id): prepare_ebs(v_id) add_fstab_entries(v_id, MOUNT_POINT) p_mount = subprocess.Popen('mount -a'.split(), stdout=subprocess.PIPE) stdout,", "not None: for tag in volume.tags: if tag['Key'] == TAG_KEY:", "add from {0} table {1}\\n'.format(rule['ip'], rule_index)) f.write('ip r add {0}", "ec2.volumes.all(): if volume.tags is not None: for tag in volume.tags:", "{1}\\n\\n'.format(rule['subnet_rule'], rule_index)) if rule['ip'] == NIC_IP: default_route_device = rule['device'] rule_index", "# p_partprobe = subprocess.Popen('partprobe'.split(' '), stdout=subprocess.PIPE) # stdout, stderr =", "){2}eth[0-9](?! $).*', line) if res is not None: subnet_rule =", "= volume_id.replace('vol-', 'vol') pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) partition = glob.glob(pattern)[0] fstab_entries", "p_partprobe = subprocess.Popen('partprobe'.split(' '), stdout=subprocess.PIPE) # stdout, stderr = p_partprobe.communicate()", "= volume_ids[i] device = '/dev/xvd{0}'.format(device_char) ec2.attach_volume(Device=device, InstanceId=i_id, VolumeId=v_id) # Wait", "{0}/[0-9]{{2}}'.format(NIC_IP), line) if res is not None: return None c", "== DEPLOY_UUID: enis.append(eni.network_interface_id) return enis if len(enis) > 0 else", "= glob.glob(pattern)[0] p_xfs = subprocess.Popen('mkfs.xfs {0}'.format(partition).split(), stdout=subprocess.PIPE) stdout, stderr =", "import os import re import glob import boto3 import requests", "MOUNT_POINT).split(), stdout=subprocess.PIPE) stdout, stderr = p_chown.communicate() device_char = chr(ord(device_char) -", "if __name__ == '__main__': boto3.setup_default_session(region_name=AWS_REGION) # uses: DEPLOY_UUID, TAG_KEY attach_eni_ids()", "import subprocess from time import sleep AWS_REGION = os.environ['AWS_REGION'] DEPLOY_UUID", "p_mount = subprocess.Popen('mount -a'.split(), stdout=subprocess.PIPE) stdout, stderr = p_mount.communicate() p_chown", "= boto3.client('ec2') r_ec2 = boto3.resource('ec2') i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text eni_ids =", "stderr = p_ip.communicate() for line in stdout.decode().splitlines(): res = re.match('.*inet", "128 default_route_device = '' for rule in r_subnet_rules: default_route =", "python3 import os import re import glob import boto3 import", "= glob.glob(pattern)[0] gdisk_commands = '\\n'.join([ 'n', '1', '34', '', '',", "'', 'w', 'Y', '' ]) p_echo = subprocess.Popen('echo -ne {0}'.format(gdisk_commands).split('", "raise Exception('Device with address {0} not ready'.format(NIC_IP)) def change_default_route(): wait_device_ready(10)", "p_chown = subprocess.Popen('chown -R {0}:{0} {1}'.format(SERVICE_NAME, MOUNT_POINT).split(), stdout=subprocess.PIPE) stdout, stderr", "ec2 = boto3.client('ec2') i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text volume_ids = retrieve_ebs_ids() i", "= rule['device'] rule_index += 1 default_route = re.sub('eth.', default_route_device, r_default_route)", "in eni_ids: c_ec2.attach_network_interface(DeviceIndex=device_number, InstanceId=i_id, NetworkInterfaceId=eni_id) def retrieve_ebs_ids(): ec2 = boto3.resource('ec2')", "glob.glob(pattern)[0] gdisk_commands = '\\n'.join([ 'n', '1', '34', '', '', 'w',", "'a') as f: f.write('{0} {1}\\n'.format(partition, ' '.join(fstab_entries))) f.flush() f.close() def", "prepare_ebs(v_id) add_fstab_entries(v_id, MOUNT_POINT) p_mount = subprocess.Popen('mount -a'.split(), stdout=subprocess.PIPE) stdout, stderr", "'/dev/disk/by-id/*{0}'.format(v_id) device = glob.glob(pattern)[0] gdisk_commands = '\\n'.join([ 'n', '1', '34',", "[] for line in stdout. decode().splitlines(): res = re.match('(.* ){2}eth[0-9](?!", "{1}\\n'.format(partition, ' '.join(fstab_entries))) f.flush() f.close() def wait_device_ready(timeout=3): c = 0", "retrieve_eni_ids() device_number = len(r_ec2.Instance(i_id).network_interfaces) + 1 for eni_id in eni_ids:", "stdout, stderr = p_mount.communicate() p_chown = subprocess.Popen('chown -R {0}:{0} {1}'.format(SERVICE_NAME,", "with open('/etc/rc.local', 'a') as f: f.write('#!/bin/bash\\n\\n') rule_index = 128 default_route_device", "pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) partition = glob.glob(pattern)[0] fstab_entries = [ mount_point,", "rule['ip'] == NIC_IP: default_route_device = rule['device'] rule_index += 1 default_route", "boto3.client('ec2') i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text volume_ids = retrieve_ebs_ids() i = 0", "add {0} table {1}\\n'.format(default_route, rule_index)) f.write('ip r add {0} table", "'vol') pattern = '/dev/disk/by-id/*{0}'.format(v_id) device = glob.glob(pattern)[0] gdisk_commands = '\\n'.join([", "return ebss if len(ebss) > 0 else None def attach_ebs():", "== '__main__': boto3.setup_default_session(region_name=AWS_REGION) # uses: DEPLOY_UUID, TAG_KEY attach_eni_ids() # uses:", "def check_ebs(volume_id): v_id = volume_id.replace('vol-', 'vol') pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) return", "'__main__': boto3.setup_default_session(region_name=AWS_REGION) # uses: DEPLOY_UUID, TAG_KEY attach_eni_ids() # uses: MOUNT_POINT,", "not None: subnet_rule = res.group(0) l_subnet_rule = subnet_rule.split() device =", "default_route_device = rule['device'] rule_index += 1 default_route = re.sub('eth.', default_route_device,", "v_id = volume_id.replace('vol-', 'vol') pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) partition = glob.glob(pattern)[0]", "add {0}\\n\\n'.format(default_route)) f.write('exit 0\\n') f.flush() f.close() os.chmod('/etc/rc.local', 0o0755) p_rc_local =", "None: for tag in volume.tags: if tag['Key'] == TAG_KEY: if", "== TAG_KEY: if tag['Value'] == DEPLOY_UUID: ebss.append(volume.volume_id) return ebss if", "+ SERVICE_NAME NIC_IP = os.environ['NIC_IP'] TAG_KEY = os.environ['TAG_KEY'] def retrieve_eni_ids():", "import boto3 import requests import subprocess from time import sleep", "subprocess.Popen('chown -R {0}:{0} {1}'.format(SERVICE_NAME, MOUNT_POINT).split(), stdout=subprocess.PIPE) stdout, stderr = p_chown.communicate()", "p_rc_local.communicate() if __name__ == '__main__': boto3.setup_default_session(region_name=AWS_REGION) # uses: DEPLOY_UUID, TAG_KEY", "'' for line in stdout.decode().splitlines(): res = re.match('default .*', line)", "retrieve_eni_ids(): ec2 = boto3.resource('ec2') enis = [] for eni in", "attach_eni_ids() # uses: MOUNT_POINT, SERVICE_NAME, DEPLOY_UUID, TAG_KEY attach_ebs() # uses:", "sleep AWS_REGION = os.environ['AWS_REGION'] DEPLOY_UUID = os.environ['DEPLOY_UUID'] SERVICE_NAME = os.environ['SERVICE_NAME']", "len(ebss) > 0 else None def attach_ebs(): ec2 = boto3.client('ec2')", "' '.join(fstab_entries))) f.flush() f.close() def wait_device_ready(timeout=3): c = 0 while", "stderr = p_fdisk.communicate() print(stdout) print(stderr) # p_partprobe = subprocess.Popen('partprobe'.split(' '),", "p_ip = subprocess.Popen('ip r'.split(), stdout=subprocess.PIPE) stdout, stderr = p_ip.communicate() r_subnet_rules", "re.match('(.* ){2}eth[0-9](?! $).*', line) if res is not None: subnet_rule", "is not None: subnet_rule = res.group(0) l_subnet_rule = subnet_rule.split() device", "MOUNT_POINT = \"/var/lib/\" + SERVICE_NAME NIC_IP = os.environ['NIC_IP'] TAG_KEY =", "= volume_id.replace('vol-', 'vol') pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) return bool(len(glob.glob(pattern))) def prepare_ebs(volume_id):", "= p_ip.communicate() for line in stdout.decode().splitlines(): res = re.match('.*inet {0}/[0-9]{{2}}'.format(NIC_IP),", "= os.environ['DEPLOY_UUID'] SERVICE_NAME = os.environ['SERVICE_NAME'] MOUNT_POINT = \"/var/lib/\" + SERVICE_NAME", "os.environ['TAG_KEY'] def retrieve_eni_ids(): ec2 = boto3.resource('ec2') enis = [] for", "device_number = len(r_ec2.Instance(i_id).network_interfaces) + 1 for eni_id in eni_ids: c_ec2.attach_network_interface(DeviceIndex=device_number,", "boto3.client('ec2') r_ec2 = boto3.resource('ec2') i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text eni_ids = retrieve_eni_ids()", "print(stderr) def add_fstab_entries(volume_id, mount_point): v_id = volume_id.replace('vol-', 'vol') pattern =", "def attach_eni_ids(): c_ec2 = boto3.client('ec2') r_ec2 = boto3.resource('ec2') i_id =", "= p_xfs.communicate() print(stdout) print(stderr) def add_fstab_entries(volume_id, mount_point): v_id = volume_id.replace('vol-',", "line in stdout.decode().splitlines(): res = re.match('.*inet {0}/[0-9]{{2}}'.format(NIC_IP), line) if res", "with open('/etc/fstab', 'a') as f: f.write('{0} {1}\\n'.format(partition, ' '.join(fstab_entries))) f.flush()", "rule_index = 128 default_route_device = '' for rule in r_subnet_rules:", "chr(ord(device_char) - 1) i += 1 def check_ebs(volume_id): v_id =", "re.match('.*inet {0}/[0-9]{{2}}'.format(NIC_IP), line) if res is not None: return None", "f.write('ip r del default\\n') f.write('ip r add {0}\\n\\n'.format(default_route)) f.write('exit 0\\n')", "is not None: return None c += 1 raise Exception('Device", "device_char = 'z' while i < len(volume_ids): v_id = volume_ids[i]", "a'.split(), stdout=subprocess.PIPE) stdout, stderr = p_ip.communicate() for line in stdout.decode().splitlines():", "1 raise Exception('Device with address {0} not ready'.format(NIC_IP)) def change_default_route():", "is not None: for tag in volume.tags: if tag['Key'] ==", "res is not None: subnet_rule = res.group(0) l_subnet_rule = subnet_rule.split()", "default\\n') f.write('ip r add {0}\\n\\n'.format(default_route)) f.write('exit 0\\n') f.flush() f.close() os.chmod('/etc/rc.local',", "volume.tags: if tag['Key'] == TAG_KEY: if tag['Value'] == DEPLOY_UUID: ebss.append(volume.volume_id)", "stderr = p_rc_local.communicate() if __name__ == '__main__': boto3.setup_default_session(region_name=AWS_REGION) # uses:", "pattern = '/dev/disk/by-id/*{0}'.format(v_id) device = glob.glob(pattern)[0] gdisk_commands = '\\n'.join([ 'n',", "mount_point, 'xfs', 'defaults', '0', '0' ] with open('/etc/fstab', 'a') as", "{0} table {1}\\n\\n'.format(rule['subnet_rule'], rule_index)) if rule['ip'] == NIC_IP: default_route_device =", "= subprocess.Popen('ip a'.split(), stdout=subprocess.PIPE) stdout, stderr = p_ip.communicate() for line", "TAG_KEY attach_eni_ids() # uses: MOUNT_POINT, SERVICE_NAME, DEPLOY_UUID, TAG_KEY attach_ebs() #", "in stdout.decode().splitlines(): res = re.match('default .*', line) if res is", "os.environ['DEPLOY_UUID'] SERVICE_NAME = os.environ['SERVICE_NAME'] MOUNT_POINT = \"/var/lib/\" + SERVICE_NAME NIC_IP", "stdout.decode().splitlines(): res = re.match('default .*', line) if res is not", "DEPLOY_UUID, TAG_KEY attach_eni_ids() # uses: MOUNT_POINT, SERVICE_NAME, DEPLOY_UUID, TAG_KEY attach_ebs()", "if rule['ip'] == NIC_IP: default_route_device = rule['device'] rule_index += 1", "0 else None def attach_eni_ids(): c_ec2 = boto3.client('ec2') r_ec2 =", "= subprocess.Popen('mount -a'.split(), stdout=subprocess.PIPE) stdout, stderr = p_mount.communicate() p_chown =", "stdout=subprocess.PIPE) stdout, stderr = p_chown.communicate() device_char = chr(ord(device_char) - 1)", "p_fdisk = subprocess.Popen('gdisk {0}'.format(device).split(), stdin=p_echo.stdout, stdout=subprocess.PIPE) stdout, stderr = p_fdisk.communicate()", "r add {0}\\n\\n'.format(default_route)) f.write('exit 0\\n') f.flush() f.close() os.chmod('/etc/rc.local', 0o0755) p_rc_local", "= volume_id.replace('vol-', 'vol') pattern = '/dev/disk/by-id/*{0}'.format(v_id) device = glob.glob(pattern)[0] gdisk_commands", "p_rc_local = subprocess.Popen('/etc/rc.local'.split(), stdout=subprocess.PIPE) stdout, stderr = p_rc_local.communicate() if __name__", "sleep(3) if not check_ebs(v_id): prepare_ebs(v_id) add_fstab_entries(v_id, MOUNT_POINT) p_mount = subprocess.Popen('mount", "add_fstab_entries(volume_id, mount_point): v_id = volume_id.replace('vol-', 'vol') pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) partition", "as f: f.write('#!/bin/bash\\n\\n') rule_index = 128 default_route_device = '' for", "for line in stdout.decode().splitlines(): res = re.match('.*inet {0}/[0-9]{{2}}'.format(NIC_IP), line) if", "from time import sleep AWS_REGION = os.environ['AWS_REGION'] DEPLOY_UUID = os.environ['DEPLOY_UUID']", "> 0 else None def attach_eni_ids(): c_ec2 = boto3.client('ec2') r_ec2", "del default\\n') f.write('ip r add {0}\\n\\n'.format(default_route)) f.write('exit 0\\n') f.flush() f.close()", "volume_id.replace('vol-', 'vol') pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) partition = glob.glob(pattern)[0] fstab_entries =", "for tag in eni.tag_set: if tag['Key'] == TAG_KEY: if tag['Value']", "'subnet_rule': subnet_rule } ) r_default_route = '' for line in", "os.chmod('/etc/rc.local', 0o0755) p_rc_local = subprocess.Popen('/etc/rc.local'.split(), stdout=subprocess.PIPE) stdout, stderr = p_rc_local.communicate()", "prepare_ebs(volume_id): v_id = volume_id.replace('vol-', 'vol') pattern = '/dev/disk/by-id/*{0}'.format(v_id) device =", "= os.environ['SERVICE_NAME'] MOUNT_POINT = \"/var/lib/\" + SERVICE_NAME NIC_IP = os.environ['NIC_IP']", "for eni_id in eni_ids: c_ec2.attach_network_interface(DeviceIndex=device_number, InstanceId=i_id, NetworkInterfaceId=eni_id) def retrieve_ebs_ids(): ec2", "i = 0 device_char = 'z' while i < len(volume_ids):", "glob import boto3 import requests import subprocess from time import", "eni_ids = retrieve_eni_ids() device_number = len(r_ec2.Instance(i_id).network_interfaces) + 1 for eni_id", "= 'z' while i < len(volume_ids): v_id = volume_ids[i] device", "'', '', 'w', 'Y', '' ]) p_echo = subprocess.Popen('echo -ne", "'vol') pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) return bool(len(glob.glob(pattern))) def prepare_ebs(volume_id): v_id =", "0 while c < timeout: sleep(1) p_ip = subprocess.Popen('ip a'.split(),", "glob.glob(pattern)[0] fstab_entries = [ mount_point, 'xfs', 'defaults', '0', '0' ]", "= res.group(0) break with open('/etc/rc.local', 'a') as f: f.write('#!/bin/bash\\n\\n') rule_index", "for volume in ec2.volumes.all(): if volume.tags is not None: for", "# Wait to ensure device is attached sleep(3) if not", "attach_ebs(): ec2 = boto3.client('ec2') i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text volume_ids = retrieve_ebs_ids()", "uses: DEPLOY_UUID, TAG_KEY attach_eni_ids() # uses: MOUNT_POINT, SERVICE_NAME, DEPLOY_UUID, TAG_KEY", "0o0755) p_rc_local = subprocess.Popen('/etc/rc.local'.split(), stdout=subprocess.PIPE) stdout, stderr = p_rc_local.communicate() if", "= l_subnet_rule[2] ip = l_subnet_rule[-1] r_subnet_rules.append( { 'device': device, 'ip':", "import re import glob import boto3 import requests import subprocess", "table {1}\\n\\n'.format(rule['subnet_rule'], rule_index)) if rule['ip'] == NIC_IP: default_route_device = rule['device']", "__name__ == '__main__': boto3.setup_default_session(region_name=AWS_REGION) # uses: DEPLOY_UUID, TAG_KEY attach_eni_ids() #", "= os.environ['AWS_REGION'] DEPLOY_UUID = os.environ['DEPLOY_UUID'] SERVICE_NAME = os.environ['SERVICE_NAME'] MOUNT_POINT =", "in r_subnet_rules: default_route = re.sub('eth.', rule['device'], r_default_route) f.write('ip rule add", "rule add from {0} table {1}\\n'.format(rule['ip'], rule_index)) f.write('ip r add", "ec2.network_interfaces.all(): for tag in eni.tag_set: if tag['Key'] == TAG_KEY: if", "attach_eni_ids(): c_ec2 = boto3.client('ec2') r_ec2 = boto3.resource('ec2') i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text", "{0} table {1}\\n'.format(default_route, rule_index)) f.write('ip r add {0} table {1}\\n\\n'.format(rule['subnet_rule'],", "l_subnet_rule[2] ip = l_subnet_rule[-1] r_subnet_rules.append( { 'device': device, 'ip': ip,", "os.environ['NIC_IP'] TAG_KEY = os.environ['TAG_KEY'] def retrieve_eni_ids(): ec2 = boto3.resource('ec2') enis", "'\\n'.join([ 'n', '1', '34', '', '', 'w', 'Y', '' ])", "uses: MOUNT_POINT, SERVICE_NAME, DEPLOY_UUID, TAG_KEY attach_ebs() # uses: NIC_IP change_default_route()", "= subprocess.Popen('echo -ne {0}'.format(gdisk_commands).split(' '), stdout=subprocess.PIPE) p_fdisk = subprocess.Popen('gdisk {0}'.format(device).split(),", "tag in volume.tags: if tag['Key'] == TAG_KEY: if tag['Value'] ==", "{0}\\n\\n'.format(default_route)) f.write('exit 0\\n') f.flush() f.close() os.chmod('/etc/rc.local', 0o0755) p_rc_local = subprocess.Popen('/etc/rc.local'.split(),", "} ) r_default_route = '' for line in stdout.decode().splitlines(): res", "stdout=subprocess.PIPE) p_fdisk = subprocess.Popen('gdisk {0}'.format(device).split(), stdin=p_echo.stdout, stdout=subprocess.PIPE) stdout, stderr =", "rule_index)) f.write('ip r add {0} table {1}\\n\\n'.format(rule['subnet_rule'], rule_index)) if rule['ip']", "= subprocess.Popen('mkfs.xfs {0}'.format(partition).split(), stdout=subprocess.PIPE) stdout, stderr = p_xfs.communicate() print(stdout) print(stderr)", "= chr(ord(device_char) - 1) i += 1 def check_ebs(volume_id): v_id", "def prepare_ebs(volume_id): v_id = volume_id.replace('vol-', 'vol') pattern = '/dev/disk/by-id/*{0}'.format(v_id) device", "None c += 1 raise Exception('Device with address {0} not", "pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) partition = glob.glob(pattern)[0] p_xfs = subprocess.Popen('mkfs.xfs {0}'.format(partition).split(),", "import requests import subprocess from time import sleep AWS_REGION =", "#!/usr/bin/env python3 import os import re import glob import boto3", "volume.tags is not None: for tag in volume.tags: if tag['Key']", "= retrieve_eni_ids() device_number = len(r_ec2.Instance(i_id).network_interfaces) + 1 for eni_id in", "enis if len(enis) > 0 else None def attach_eni_ids(): c_ec2", "= res.group(0) l_subnet_rule = subnet_rule.split() device = l_subnet_rule[2] ip =", "'defaults', '0', '0' ] with open('/etc/fstab', 'a') as f: f.write('{0}", "table {1}\\n'.format(rule['ip'], rule_index)) f.write('ip r add {0} table {1}\\n'.format(default_route, rule_index))", "else None def attach_eni_ids(): c_ec2 = boto3.client('ec2') r_ec2 = boto3.resource('ec2')", "'xfs', 'defaults', '0', '0' ] with open('/etc/fstab', 'a') as f:", "add {0} table {1}\\n\\n'.format(rule['subnet_rule'], rule_index)) if rule['ip'] == NIC_IP: default_route_device", "subprocess.Popen('mount -a'.split(), stdout=subprocess.PIPE) stdout, stderr = p_mount.communicate() p_chown = subprocess.Popen('chown", "rule['device'] rule_index += 1 default_route = re.sub('eth.', default_route_device, r_default_route) f.write('ip", "subprocess.Popen('ip r'.split(), stdout=subprocess.PIPE) stdout, stderr = p_ip.communicate() r_subnet_rules = []", "VolumeId=v_id) # Wait to ensure device is attached sleep(3) if", "in ec2.volumes.all(): if volume.tags is not None: for tag in", "f.flush() f.close() os.chmod('/etc/rc.local', 0o0755) p_rc_local = subprocess.Popen('/etc/rc.local'.split(), stdout=subprocess.PIPE) stdout, stderr", "== DEPLOY_UUID: ebss.append(volume.volume_id) return ebss if len(ebss) > 0 else", "-ne {0}'.format(gdisk_commands).split(' '), stdout=subprocess.PIPE) p_fdisk = subprocess.Popen('gdisk {0}'.format(device).split(), stdin=p_echo.stdout, stdout=subprocess.PIPE)", "NIC_IP = os.environ['NIC_IP'] TAG_KEY = os.environ['TAG_KEY'] def retrieve_eni_ids(): ec2 =", "InstanceId=i_id, VolumeId=v_id) # Wait to ensure device is attached sleep(3)", "= '/dev/disk/by-id/*{0}-part1'.format(v_id) partition = glob.glob(pattern)[0] p_xfs = subprocess.Popen('mkfs.xfs {0}'.format(partition).split(), stdout=subprocess.PIPE)", "ec2 = boto3.resource('ec2') enis = [] for eni in ec2.network_interfaces.all():", "i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text eni_ids = retrieve_eni_ids() device_number = len(r_ec2.Instance(i_id).network_interfaces) +", "== NIC_IP: default_route_device = rule['device'] rule_index += 1 default_route =", "r'.split(), stdout=subprocess.PIPE) stdout, stderr = p_ip.communicate() r_subnet_rules = [] for", "rule_index += 1 default_route = re.sub('eth.', default_route_device, r_default_route) f.write('ip r", "device = l_subnet_rule[2] ip = l_subnet_rule[-1] r_subnet_rules.append( { 'device': device,", "c += 1 raise Exception('Device with address {0} not ready'.format(NIC_IP))", "'.join(fstab_entries))) f.flush() f.close() def wait_device_ready(timeout=3): c = 0 while c", "stderr = p_chown.communicate() device_char = chr(ord(device_char) - 1) i +=", "ec2.attach_volume(Device=device, InstanceId=i_id, VolumeId=v_id) # Wait to ensure device is attached", "'z' while i < len(volume_ids): v_id = volume_ids[i] device =", "DEPLOY_UUID: enis.append(eni.network_interface_id) return enis if len(enis) > 0 else None", "p_echo = subprocess.Popen('echo -ne {0}'.format(gdisk_commands).split(' '), stdout=subprocess.PIPE) p_fdisk = subprocess.Popen('gdisk", "in eni.tag_set: if tag['Key'] == TAG_KEY: if tag['Value'] == DEPLOY_UUID:", "enis = [] for eni in ec2.network_interfaces.all(): for tag in", "c_ec2.attach_network_interface(DeviceIndex=device_number, InstanceId=i_id, NetworkInterfaceId=eni_id) def retrieve_ebs_ids(): ec2 = boto3.resource('ec2') ebss =", "c_ec2 = boto3.client('ec2') r_ec2 = boto3.resource('ec2') i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text eni_ids", "= glob.glob(pattern)[0] fstab_entries = [ mount_point, 'xfs', 'defaults', '0', '0'", "i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text volume_ids = retrieve_ebs_ids() i = 0 device_char", "tag['Key'] == TAG_KEY: if tag['Value'] == DEPLOY_UUID: ebss.append(volume.volume_id) return ebss", "subnet_rule.split() device = l_subnet_rule[2] ip = l_subnet_rule[-1] r_subnet_rules.append( { 'device':", "= requests.get('http://169.254.169.254/latest/meta-data/instance-id').text eni_ids = retrieve_eni_ids() device_number = len(r_ec2.Instance(i_id).network_interfaces) + 1", "line in stdout.decode().splitlines(): res = re.match('default .*', line) if res", "stdout, stderr = p_chown.communicate() device_char = chr(ord(device_char) - 1) i", "def add_fstab_entries(volume_id, mount_point): v_id = volume_id.replace('vol-', 'vol') pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id)", "line) if res is not None: r_default_route = res.group(0) break", "stdout=subprocess.PIPE) stdout, stderr = p_rc_local.communicate() if __name__ == '__main__': boto3.setup_default_session(region_name=AWS_REGION)", "mount_point): v_id = volume_id.replace('vol-', 'vol') pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) partition =", "boto3.resource('ec2') ebss = [] for volume in ec2.volumes.all(): if volume.tags", "res = re.match('.*inet {0}/[0-9]{{2}}'.format(NIC_IP), line) if res is not None:", "enis.append(eni.network_interface_id) return enis if len(enis) > 0 else None def", "c < timeout: sleep(1) p_ip = subprocess.Popen('ip a'.split(), stdout=subprocess.PIPE) stdout,", "while c < timeout: sleep(1) p_ip = subprocess.Popen('ip a'.split(), stdout=subprocess.PIPE)", "change_default_route(): wait_device_ready(10) p_ip = subprocess.Popen('ip r'.split(), stdout=subprocess.PIPE) stdout, stderr =", "= p_mount.communicate() p_chown = subprocess.Popen('chown -R {0}:{0} {1}'.format(SERVICE_NAME, MOUNT_POINT).split(), stdout=subprocess.PIPE)", "r_default_route = '' for line in stdout.decode().splitlines(): res = re.match('default", "for line in stdout.decode().splitlines(): res = re.match('default .*', line) if", "tag['Value'] == DEPLOY_UUID: enis.append(eni.network_interface_id) return enis if len(enis) > 0", "line) if res is not None: subnet_rule = res.group(0) l_subnet_rule", "1 default_route = re.sub('eth.', default_route_device, r_default_route) f.write('ip r del default\\n')", "Wait to ensure device is attached sleep(3) if not check_ebs(v_id):", "= '/dev/disk/by-id/*{0}-part1'.format(v_id) return bool(len(glob.glob(pattern))) def prepare_ebs(volume_id): v_id = volume_id.replace('vol-', 'vol')", "i < len(volume_ids): v_id = volume_ids[i] device = '/dev/xvd{0}'.format(device_char) ec2.attach_volume(Device=device,", "'), stdout=subprocess.PIPE) p_fdisk = subprocess.Popen('gdisk {0}'.format(device).split(), stdin=p_echo.stdout, stdout=subprocess.PIPE) stdout, stderr", "= re.sub('eth.', default_route_device, r_default_route) f.write('ip r del default\\n') f.write('ip r", "= boto3.resource('ec2') i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text eni_ids = retrieve_eni_ids() device_number =", "res.group(0) l_subnet_rule = subnet_rule.split() device = l_subnet_rule[2] ip = l_subnet_rule[-1]", "r add {0} table {1}\\n'.format(default_route, rule_index)) f.write('ip r add {0}", "NetworkInterfaceId=eni_id) def retrieve_ebs_ids(): ec2 = boto3.resource('ec2') ebss = [] for", "r_ec2 = boto3.resource('ec2') i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text eni_ids = retrieve_eni_ids() device_number", "'0' ] with open('/etc/fstab', 'a') as f: f.write('{0} {1}\\n'.format(partition, '", "subprocess.Popen('/etc/rc.local'.split(), stdout=subprocess.PIPE) stdout, stderr = p_rc_local.communicate() if __name__ == '__main__':", "None def attach_ebs(): ec2 = boto3.client('ec2') i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text volume_ids", "device, 'ip': ip, 'subnet_rule': subnet_rule } ) r_default_route = ''", "r del default\\n') f.write('ip r add {0}\\n\\n'.format(default_route)) f.write('exit 0\\n') f.flush()", "table {1}\\n'.format(default_route, rule_index)) f.write('ip r add {0} table {1}\\n\\n'.format(rule['subnet_rule'], rule_index))", "subnet_rule } ) r_default_route = '' for line in stdout.decode().splitlines():", "print(stdout) # print(stderr) sleep(3) pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) partition = glob.glob(pattern)[0]", "$).*', line) if res is not None: subnet_rule = res.group(0)", "os import re import glob import boto3 import requests import", "gdisk_commands = '\\n'.join([ 'n', '1', '34', '', '', 'w', 'Y',", "+ 1 for eni_id in eni_ids: c_ec2.attach_network_interface(DeviceIndex=device_number, InstanceId=i_id, NetworkInterfaceId=eni_id) def", "= len(r_ec2.Instance(i_id).network_interfaces) + 1 for eni_id in eni_ids: c_ec2.attach_network_interface(DeviceIndex=device_number, InstanceId=i_id,", "f.write('{0} {1}\\n'.format(partition, ' '.join(fstab_entries))) f.flush() f.close() def wait_device_ready(timeout=3): c =", "partition = glob.glob(pattern)[0] p_xfs = subprocess.Popen('mkfs.xfs {0}'.format(partition).split(), stdout=subprocess.PIPE) stdout, stderr", "p_partprobe.communicate() # print(stdout) # print(stderr) sleep(3) pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) partition", "# print(stderr) sleep(3) pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) partition = glob.glob(pattern)[0] p_xfs", "SERVICE_NAME NIC_IP = os.environ['NIC_IP'] TAG_KEY = os.environ['TAG_KEY'] def retrieve_eni_ids(): ec2", "sleep(3) pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) partition = glob.glob(pattern)[0] p_xfs = subprocess.Popen('mkfs.xfs", "stdout=subprocess.PIPE) # stdout, stderr = p_partprobe.communicate() # print(stdout) # print(stderr)", "len(r_ec2.Instance(i_id).network_interfaces) + 1 for eni_id in eni_ids: c_ec2.attach_network_interface(DeviceIndex=device_number, InstanceId=i_id, NetworkInterfaceId=eni_id)", "device = '/dev/xvd{0}'.format(device_char) ec2.attach_volume(Device=device, InstanceId=i_id, VolumeId=v_id) # Wait to ensure", "def attach_ebs(): ec2 = boto3.client('ec2') i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text volume_ids =", "ebss.append(volume.volume_id) return ebss if len(ebss) > 0 else None def", "default_route = re.sub('eth.', rule['device'], r_default_route) f.write('ip rule add from {0}", "break with open('/etc/rc.local', 'a') as f: f.write('#!/bin/bash\\n\\n') rule_index = 128", "AWS_REGION = os.environ['AWS_REGION'] DEPLOY_UUID = os.environ['DEPLOY_UUID'] SERVICE_NAME = os.environ['SERVICE_NAME'] MOUNT_POINT", "in volume.tags: if tag['Key'] == TAG_KEY: if tag['Value'] == DEPLOY_UUID:", "= requests.get('http://169.254.169.254/latest/meta-data/instance-id').text volume_ids = retrieve_ebs_ids() i = 0 device_char =", "'w', 'Y', '' ]) p_echo = subprocess.Popen('echo -ne {0}'.format(gdisk_commands).split(' '),", "= subprocess.Popen('partprobe'.split(' '), stdout=subprocess.PIPE) # stdout, stderr = p_partprobe.communicate() #", "= subprocess.Popen('ip r'.split(), stdout=subprocess.PIPE) stdout, stderr = p_ip.communicate() r_subnet_rules =", "NIC_IP: default_route_device = rule['device'] rule_index += 1 default_route = re.sub('eth.',", "volume_ids = retrieve_ebs_ids() i = 0 device_char = 'z' while", "def change_default_route(): wait_device_ready(10) p_ip = subprocess.Popen('ip r'.split(), stdout=subprocess.PIPE) stdout, stderr", "= 0 device_char = 'z' while i < len(volume_ids): v_id", "retrieve_ebs_ids(): ec2 = boto3.resource('ec2') ebss = [] for volume in", "None: r_default_route = res.group(0) break with open('/etc/rc.local', 'a') as f:", "DEPLOY_UUID: ebss.append(volume.volume_id) return ebss if len(ebss) > 0 else None", "stderr = p_mount.communicate() p_chown = subprocess.Popen('chown -R {0}:{0} {1}'.format(SERVICE_NAME, MOUNT_POINT).split(),", "p_chown.communicate() device_char = chr(ord(device_char) - 1) i += 1 def", "subprocess.Popen('partprobe'.split(' '), stdout=subprocess.PIPE) # stdout, stderr = p_partprobe.communicate() # print(stdout)", "re.sub('eth.', rule['device'], r_default_route) f.write('ip rule add from {0} table {1}\\n'.format(rule['ip'],", "re.match('default .*', line) if res is not None: r_default_route =", "= [] for line in stdout. decode().splitlines(): res = re.match('(.*", "1 for eni_id in eni_ids: c_ec2.attach_network_interface(DeviceIndex=device_number, InstanceId=i_id, NetworkInterfaceId=eni_id) def retrieve_ebs_ids():", "None: subnet_rule = res.group(0) l_subnet_rule = subnet_rule.split() device = l_subnet_rule[2]", "= [] for eni in ec2.network_interfaces.all(): for tag in eni.tag_set:", "p_xfs.communicate() print(stdout) print(stderr) def add_fstab_entries(volume_id, mount_point): v_id = volume_id.replace('vol-', 'vol')", "= boto3.resource('ec2') enis = [] for eni in ec2.network_interfaces.all(): for", "r_default_route) f.write('ip r del default\\n') f.write('ip r add {0}\\n\\n'.format(default_route)) f.write('exit", "if len(enis) > 0 else None def attach_eni_ids(): c_ec2 =", "wait_device_ready(10) p_ip = subprocess.Popen('ip r'.split(), stdout=subprocess.PIPE) stdout, stderr = p_ip.communicate()", "= re.match('(.* ){2}eth[0-9](?! $).*', line) if res is not None:", "as f: f.write('{0} {1}\\n'.format(partition, ' '.join(fstab_entries))) f.flush() f.close() def wait_device_ready(timeout=3):", "in stdout. decode().splitlines(): res = re.match('(.* ){2}eth[0-9](?! $).*', line) if", "+= 1 default_route = re.sub('eth.', default_route_device, r_default_route) f.write('ip r del", "volume_id.replace('vol-', 'vol') pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) return bool(len(glob.glob(pattern))) def prepare_ebs(volume_id): v_id", "return bool(len(glob.glob(pattern))) def prepare_ebs(volume_id): v_id = volume_id.replace('vol-', 'vol') pattern =", "check_ebs(volume_id): v_id = volume_id.replace('vol-', 'vol') pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) return bool(len(glob.glob(pattern)))", "= \"/var/lib/\" + SERVICE_NAME NIC_IP = os.environ['NIC_IP'] TAG_KEY = os.environ['TAG_KEY']", "'n', '1', '34', '', '', 'w', 'Y', '' ]) p_echo", "= re.match('default .*', line) if res is not None: r_default_route", "stdout, stderr = p_rc_local.communicate() if __name__ == '__main__': boto3.setup_default_session(region_name=AWS_REGION) #", "stdout, stderr = p_xfs.communicate() print(stdout) print(stderr) def add_fstab_entries(volume_id, mount_point): v_id", "pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) return bool(len(glob.glob(pattern))) def prepare_ebs(volume_id): v_id = volume_id.replace('vol-',", "-R {0}:{0} {1}'.format(SERVICE_NAME, MOUNT_POINT).split(), stdout=subprocess.PIPE) stdout, stderr = p_chown.communicate() device_char", "i += 1 def check_ebs(volume_id): v_id = volume_id.replace('vol-', 'vol') pattern", "glob.glob(pattern)[0] p_xfs = subprocess.Popen('mkfs.xfs {0}'.format(partition).split(), stdout=subprocess.PIPE) stdout, stderr = p_xfs.communicate()", "r_default_route = res.group(0) break with open('/etc/rc.local', 'a') as f: f.write('#!/bin/bash\\n\\n')", "import sleep AWS_REGION = os.environ['AWS_REGION'] DEPLOY_UUID = os.environ['DEPLOY_UUID'] SERVICE_NAME =", "if res is not None: r_default_route = res.group(0) break with", "rule['device'], r_default_route) f.write('ip rule add from {0} table {1}\\n'.format(rule['ip'], rule_index))", "= re.match('.*inet {0}/[0-9]{{2}}'.format(NIC_IP), line) if res is not None: return", "with address {0} not ready'.format(NIC_IP)) def change_default_route(): wait_device_ready(10) p_ip =", "None: return None c += 1 raise Exception('Device with address", "'' ]) p_echo = subprocess.Popen('echo -ne {0}'.format(gdisk_commands).split(' '), stdout=subprocess.PIPE) p_fdisk", "0\\n') f.flush() f.close() os.chmod('/etc/rc.local', 0o0755) p_rc_local = subprocess.Popen('/etc/rc.local'.split(), stdout=subprocess.PIPE) stdout,", "default_route = re.sub('eth.', default_route_device, r_default_route) f.write('ip r del default\\n') f.write('ip", "stdout=subprocess.PIPE) stdout, stderr = p_mount.communicate() p_chown = subprocess.Popen('chown -R {0}:{0}", "res is not None: r_default_route = res.group(0) break with open('/etc/rc.local',", "= '' for rule in r_subnet_rules: default_route = re.sub('eth.', rule['device'],", "] with open('/etc/fstab', 'a') as f: f.write('{0} {1}\\n'.format(partition, ' '.join(fstab_entries)))", "not check_ebs(v_id): prepare_ebs(v_id) add_fstab_entries(v_id, MOUNT_POINT) p_mount = subprocess.Popen('mount -a'.split(), stdout=subprocess.PIPE)", "if res is not None: subnet_rule = res.group(0) l_subnet_rule =", "import glob import boto3 import requests import subprocess from time", "'' for rule in r_subnet_rules: default_route = re.sub('eth.', rule['device'], r_default_route)", "DEPLOY_UUID = os.environ['DEPLOY_UUID'] SERVICE_NAME = os.environ['SERVICE_NAME'] MOUNT_POINT = \"/var/lib/\" +", "stdin=p_echo.stdout, stdout=subprocess.PIPE) stdout, stderr = p_fdisk.communicate() print(stdout) print(stderr) # p_partprobe", "for tag in volume.tags: if tag['Key'] == TAG_KEY: if tag['Value']", "tag['Key'] == TAG_KEY: if tag['Value'] == DEPLOY_UUID: enis.append(eni.network_interface_id) return enis", "InstanceId=i_id, NetworkInterfaceId=eni_id) def retrieve_ebs_ids(): ec2 = boto3.resource('ec2') ebss = []", "device is attached sleep(3) if not check_ebs(v_id): prepare_ebs(v_id) add_fstab_entries(v_id, MOUNT_POINT)", "= subprocess.Popen('chown -R {0}:{0} {1}'.format(SERVICE_NAME, MOUNT_POINT).split(), stdout=subprocess.PIPE) stdout, stderr =", "requests.get('http://169.254.169.254/latest/meta-data/instance-id').text eni_ids = retrieve_eni_ids() device_number = len(r_ec2.Instance(i_id).network_interfaces) + 1 for", "line) if res is not None: return None c +=", "= '/dev/disk/by-id/*{0}-part1'.format(v_id) partition = glob.glob(pattern)[0] fstab_entries = [ mount_point, 'xfs',", "stdout=subprocess.PIPE) stdout, stderr = p_xfs.communicate() print(stdout) print(stderr) def add_fstab_entries(volume_id, mount_point):", "= p_chown.communicate() device_char = chr(ord(device_char) - 1) i += 1", "= retrieve_ebs_ids() i = 0 device_char = 'z' while i", "stdout=subprocess.PIPE) stdout, stderr = p_fdisk.communicate() print(stdout) print(stderr) # p_partprobe =", "{0}:{0} {1}'.format(SERVICE_NAME, MOUNT_POINT).split(), stdout=subprocess.PIPE) stdout, stderr = p_chown.communicate() device_char =", "boto3 import requests import subprocess from time import sleep AWS_REGION", "'/dev/disk/by-id/*{0}-part1'.format(v_id) partition = glob.glob(pattern)[0] fstab_entries = [ mount_point, 'xfs', 'defaults',", "volume_id.replace('vol-', 'vol') pattern = '/dev/disk/by-id/*{0}'.format(v_id) device = glob.glob(pattern)[0] gdisk_commands =", "stdout, stderr = p_ip.communicate() r_subnet_rules = [] for line in", "else None def attach_ebs(): ec2 = boto3.client('ec2') i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text", "eni_ids: c_ec2.attach_network_interface(DeviceIndex=device_number, InstanceId=i_id, NetworkInterfaceId=eni_id) def retrieve_ebs_ids(): ec2 = boto3.resource('ec2') ebss", "not ready'.format(NIC_IP)) def change_default_route(): wait_device_ready(10) p_ip = subprocess.Popen('ip r'.split(), stdout=subprocess.PIPE)", "f: f.write('{0} {1}\\n'.format(partition, ' '.join(fstab_entries))) f.flush() f.close() def wait_device_ready(timeout=3): c", "re.sub('eth.', default_route_device, r_default_route) f.write('ip r del default\\n') f.write('ip r add", "ec2 = boto3.resource('ec2') ebss = [] for volume in ec2.volumes.all():", "'/dev/xvd{0}'.format(device_char) ec2.attach_volume(Device=device, InstanceId=i_id, VolumeId=v_id) # Wait to ensure device is", "r_subnet_rules = [] for line in stdout. decode().splitlines(): res =", "- 1) i += 1 def check_ebs(volume_id): v_id = volume_id.replace('vol-',", "= '/dev/disk/by-id/*{0}'.format(v_id) device = glob.glob(pattern)[0] gdisk_commands = '\\n'.join([ 'n', '1',", "-a'.split(), stdout=subprocess.PIPE) stdout, stderr = p_mount.communicate() p_chown = subprocess.Popen('chown -R", "MOUNT_POINT) p_mount = subprocess.Popen('mount -a'.split(), stdout=subprocess.PIPE) stdout, stderr = p_mount.communicate()", "print(stdout) print(stderr) def add_fstab_entries(volume_id, mount_point): v_id = volume_id.replace('vol-', 'vol') pattern", "wait_device_ready(timeout=3): c = 0 while c < timeout: sleep(1) p_ip", "r_subnet_rules.append( { 'device': device, 'ip': ip, 'subnet_rule': subnet_rule } )", "time import sleep AWS_REGION = os.environ['AWS_REGION'] DEPLOY_UUID = os.environ['DEPLOY_UUID'] SERVICE_NAME", "TAG_KEY = os.environ['TAG_KEY'] def retrieve_eni_ids(): ec2 = boto3.resource('ec2') enis =", "volume in ec2.volumes.all(): if volume.tags is not None: for tag", "def retrieve_eni_ids(): ec2 = boto3.resource('ec2') enis = [] for eni", "from {0} table {1}\\n'.format(rule['ip'], rule_index)) f.write('ip r add {0} table", "[] for eni in ec2.network_interfaces.all(): for tag in eni.tag_set: if", "{0}'.format(device).split(), stdin=p_echo.stdout, stdout=subprocess.PIPE) stdout, stderr = p_fdisk.communicate() print(stdout) print(stderr) #", "v_id = volume_ids[i] device = '/dev/xvd{0}'.format(device_char) ec2.attach_volume(Device=device, InstanceId=i_id, VolumeId=v_id) #", "f.write('#!/bin/bash\\n\\n') rule_index = 128 default_route_device = '' for rule in", "= [ mount_point, 'xfs', 'defaults', '0', '0' ] with open('/etc/fstab',", "f.write('ip r add {0} table {1}\\n'.format(default_route, rule_index)) f.write('ip r add", "= '' for line in stdout.decode().splitlines(): res = re.match('default .*',", "= os.environ['TAG_KEY'] def retrieve_eni_ids(): ec2 = boto3.resource('ec2') enis = []", "rule_index)) if rule['ip'] == NIC_IP: default_route_device = rule['device'] rule_index +=", "'), stdout=subprocess.PIPE) # stdout, stderr = p_partprobe.communicate() # print(stdout) #", "r add {0} table {1}\\n\\n'.format(rule['subnet_rule'], rule_index)) if rule['ip'] == NIC_IP:", "print(stderr) sleep(3) pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) partition = glob.glob(pattern)[0] p_xfs =", "{0}'.format(partition).split(), stdout=subprocess.PIPE) stdout, stderr = p_xfs.communicate() print(stdout) print(stderr) def add_fstab_entries(volume_id,", "default_route_device = '' for rule in r_subnet_rules: default_route = re.sub('eth.',", "# stdout, stderr = p_partprobe.communicate() # print(stdout) # print(stderr) sleep(3)", "print(stdout) print(stderr) # p_partprobe = subprocess.Popen('partprobe'.split(' '), stdout=subprocess.PIPE) # stdout,", "address {0} not ready'.format(NIC_IP)) def change_default_route(): wait_device_ready(10) p_ip = subprocess.Popen('ip", "r_default_route) f.write('ip rule add from {0} table {1}\\n'.format(rule['ip'], rule_index)) f.write('ip", "TAG_KEY: if tag['Value'] == DEPLOY_UUID: enis.append(eni.network_interface_id) return enis if len(enis)", "[] for volume in ec2.volumes.all(): if volume.tags is not None:", "{0}'.format(gdisk_commands).split(' '), stdout=subprocess.PIPE) p_fdisk = subprocess.Popen('gdisk {0}'.format(device).split(), stdin=p_echo.stdout, stdout=subprocess.PIPE) stdout,", "'/dev/disk/by-id/*{0}-part1'.format(v_id) return bool(len(glob.glob(pattern))) def prepare_ebs(volume_id): v_id = volume_id.replace('vol-', 'vol') pattern", "'vol') pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) partition = glob.glob(pattern)[0] fstab_entries = [", "line in stdout. decode().splitlines(): res = re.match('(.* ){2}eth[0-9](?! $).*', line)", "if tag['Key'] == TAG_KEY: if tag['Value'] == DEPLOY_UUID: ebss.append(volume.volume_id) return", "'Y', '' ]) p_echo = subprocess.Popen('echo -ne {0}'.format(gdisk_commands).split(' '), stdout=subprocess.PIPE)", "subprocess.Popen('echo -ne {0}'.format(gdisk_commands).split(' '), stdout=subprocess.PIPE) p_fdisk = subprocess.Popen('gdisk {0}'.format(device).split(), stdin=p_echo.stdout,", "requests.get('http://169.254.169.254/latest/meta-data/instance-id').text volume_ids = retrieve_ebs_ids() i = 0 device_char = 'z'", "p_ip.communicate() for line in stdout.decode().splitlines(): res = re.match('.*inet {0}/[0-9]{{2}}'.format(NIC_IP), line)", "ready'.format(NIC_IP)) def change_default_route(): wait_device_ready(10) p_ip = subprocess.Popen('ip r'.split(), stdout=subprocess.PIPE) stdout,", "bool(len(glob.glob(pattern))) def prepare_ebs(volume_id): v_id = volume_id.replace('vol-', 'vol') pattern = '/dev/disk/by-id/*{0}'.format(v_id)", "p_xfs = subprocess.Popen('mkfs.xfs {0}'.format(partition).split(), stdout=subprocess.PIPE) stdout, stderr = p_xfs.communicate() print(stdout)", "TAG_KEY: if tag['Value'] == DEPLOY_UUID: ebss.append(volume.volume_id) return ebss if len(ebss)", "'1', '34', '', '', 'w', 'Y', '' ]) p_echo =", "in ec2.network_interfaces.all(): for tag in eni.tag_set: if tag['Key'] == TAG_KEY:", "timeout: sleep(1) p_ip = subprocess.Popen('ip a'.split(), stdout=subprocess.PIPE) stdout, stderr =", "+= 1 raise Exception('Device with address {0} not ready'.format(NIC_IP)) def", "'34', '', '', 'w', 'Y', '' ]) p_echo = subprocess.Popen('echo", "f.write('ip r add {0} table {1}\\n\\n'.format(rule['subnet_rule'], rule_index)) if rule['ip'] ==", "= 0 while c < timeout: sleep(1) p_ip = subprocess.Popen('ip", "to ensure device is attached sleep(3) if not check_ebs(v_id): prepare_ebs(v_id)", "f.write('ip r add {0}\\n\\n'.format(default_route)) f.write('exit 0\\n') f.flush() f.close() os.chmod('/etc/rc.local', 0o0755)", "{1}\\n'.format(default_route, rule_index)) f.write('ip r add {0} table {1}\\n\\n'.format(rule['subnet_rule'], rule_index)) if", "for rule in r_subnet_rules: default_route = re.sub('eth.', rule['device'], r_default_route) f.write('ip", "for eni in ec2.network_interfaces.all(): for tag in eni.tag_set: if tag['Key']", "stderr = p_xfs.communicate() print(stdout) print(stderr) def add_fstab_entries(volume_id, mount_point): v_id =", "while i < len(volume_ids): v_id = volume_ids[i] device = '/dev/xvd{0}'.format(device_char)", "rule in r_subnet_rules: default_route = re.sub('eth.', rule['device'], r_default_route) f.write('ip rule", "+= 1 def check_ebs(volume_id): v_id = volume_id.replace('vol-', 'vol') pattern =", "= subprocess.Popen('/etc/rc.local'.split(), stdout=subprocess.PIPE) stdout, stderr = p_rc_local.communicate() if __name__ ==", ") r_default_route = '' for line in stdout.decode().splitlines(): res =", "= [] for volume in ec2.volumes.all(): if volume.tags is not", "{0} table {1}\\n'.format(rule['ip'], rule_index)) f.write('ip r add {0} table {1}\\n'.format(default_route,", "attached sleep(3) if not check_ebs(v_id): prepare_ebs(v_id) add_fstab_entries(v_id, MOUNT_POINT) p_mount =", "print(stderr) # p_partprobe = subprocess.Popen('partprobe'.split(' '), stdout=subprocess.PIPE) # stdout, stderr", "p_ip = subprocess.Popen('ip a'.split(), stdout=subprocess.PIPE) stdout, stderr = p_ip.communicate() for", "= p_fdisk.communicate() print(stdout) print(stderr) # p_partprobe = subprocess.Popen('partprobe'.split(' '), stdout=subprocess.PIPE)", "{1}'.format(SERVICE_NAME, MOUNT_POINT).split(), stdout=subprocess.PIPE) stdout, stderr = p_chown.communicate() device_char = chr(ord(device_char)", "= '/dev/xvd{0}'.format(device_char) ec2.attach_volume(Device=device, InstanceId=i_id, VolumeId=v_id) # Wait to ensure device", "res.group(0) break with open('/etc/rc.local', 'a') as f: f.write('#!/bin/bash\\n\\n') rule_index =", "device = glob.glob(pattern)[0] gdisk_commands = '\\n'.join([ 'n', '1', '34', '',", "eni_id in eni_ids: c_ec2.attach_network_interface(DeviceIndex=device_number, InstanceId=i_id, NetworkInterfaceId=eni_id) def retrieve_ebs_ids(): ec2 =", "Exception('Device with address {0} not ready'.format(NIC_IP)) def change_default_route(): wait_device_ready(10) p_ip", "ip = l_subnet_rule[-1] r_subnet_rules.append( { 'device': device, 'ip': ip, 'subnet_rule':", "c = 0 while c < timeout: sleep(1) p_ip =", "p_ip.communicate() r_subnet_rules = [] for line in stdout. decode().splitlines(): res", "v_id = volume_id.replace('vol-', 'vol') pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) return bool(len(glob.glob(pattern))) def", "subnet_rule = res.group(0) l_subnet_rule = subnet_rule.split() device = l_subnet_rule[2] ip", "[ mount_point, 'xfs', 'defaults', '0', '0' ] with open('/etc/fstab', 'a')", "not None: r_default_route = res.group(0) break with open('/etc/rc.local', 'a') as", "= subnet_rule.split() device = l_subnet_rule[2] ip = l_subnet_rule[-1] r_subnet_rules.append( {", "os.environ['AWS_REGION'] DEPLOY_UUID = os.environ['DEPLOY_UUID'] SERVICE_NAME = os.environ['SERVICE_NAME'] MOUNT_POINT = \"/var/lib/\"", "partition = glob.glob(pattern)[0] fstab_entries = [ mount_point, 'xfs', 'defaults', '0',", "add_fstab_entries(v_id, MOUNT_POINT) p_mount = subprocess.Popen('mount -a'.split(), stdout=subprocess.PIPE) stdout, stderr =", "p_mount.communicate() p_chown = subprocess.Popen('chown -R {0}:{0} {1}'.format(SERVICE_NAME, MOUNT_POINT).split(), stdout=subprocess.PIPE) stdout,", "None def attach_eni_ids(): c_ec2 = boto3.client('ec2') r_ec2 = boto3.resource('ec2') i_id", "eni.tag_set: if tag['Key'] == TAG_KEY: if tag['Value'] == DEPLOY_UUID: enis.append(eni.network_interface_id)", "= boto3.client('ec2') i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text volume_ids = retrieve_ebs_ids() i =", "tag in eni.tag_set: if tag['Key'] == TAG_KEY: if tag['Value'] ==", "subprocess.Popen('mkfs.xfs {0}'.format(partition).split(), stdout=subprocess.PIPE) stdout, stderr = p_xfs.communicate() print(stdout) print(stderr) def", "stdout, stderr = p_partprobe.communicate() # print(stdout) # print(stderr) sleep(3) pattern", "res = re.match('default .*', line) if res is not None:", "= boto3.resource('ec2') ebss = [] for volume in ec2.volumes.all(): if", "len(volume_ids): v_id = volume_ids[i] device = '/dev/xvd{0}'.format(device_char) ec2.attach_volume(Device=device, InstanceId=i_id, VolumeId=v_id)", "def wait_device_ready(timeout=3): c = 0 while c < timeout: sleep(1)", "f: f.write('#!/bin/bash\\n\\n') rule_index = 128 default_route_device = '' for rule", "os.environ['SERVICE_NAME'] MOUNT_POINT = \"/var/lib/\" + SERVICE_NAME NIC_IP = os.environ['NIC_IP'] TAG_KEY", "f.close() os.chmod('/etc/rc.local', 0o0755) p_rc_local = subprocess.Popen('/etc/rc.local'.split(), stdout=subprocess.PIPE) stdout, stderr =", "\"/var/lib/\" + SERVICE_NAME NIC_IP = os.environ['NIC_IP'] TAG_KEY = os.environ['TAG_KEY'] def", "return None c += 1 raise Exception('Device with address {0}", "{0} not ready'.format(NIC_IP)) def change_default_route(): wait_device_ready(10) p_ip = subprocess.Popen('ip r'.split(),", "= p_rc_local.communicate() if __name__ == '__main__': boto3.setup_default_session(region_name=AWS_REGION) # uses: DEPLOY_UUID,", "open('/etc/rc.local', 'a') as f: f.write('#!/bin/bash\\n\\n') rule_index = 128 default_route_device =", "res is not None: return None c += 1 raise", "stdout.decode().splitlines(): res = re.match('.*inet {0}/[0-9]{{2}}'.format(NIC_IP), line) if res is not", "if tag['Key'] == TAG_KEY: if tag['Value'] == DEPLOY_UUID: enis.append(eni.network_interface_id) return", "if res is not None: return None c += 1", "tag['Value'] == DEPLOY_UUID: ebss.append(volume.volume_id) return ebss if len(ebss) > 0", "eni in ec2.network_interfaces.all(): for tag in eni.tag_set: if tag['Key'] ==", "open('/etc/fstab', 'a') as f: f.write('{0} {1}\\n'.format(partition, ' '.join(fstab_entries))) f.flush() f.close()", "subprocess.Popen('gdisk {0}'.format(device).split(), stdin=p_echo.stdout, stdout=subprocess.PIPE) stdout, stderr = p_fdisk.communicate() print(stdout) print(stderr)", "= os.environ['NIC_IP'] TAG_KEY = os.environ['TAG_KEY'] def retrieve_eni_ids(): ec2 = boto3.resource('ec2')", "decode().splitlines(): res = re.match('(.* ){2}eth[0-9](?! $).*', line) if res is", "stdout, stderr = p_fdisk.communicate() print(stdout) print(stderr) # p_partprobe = subprocess.Popen('partprobe'.split('", "1) i += 1 def check_ebs(volume_id): v_id = volume_id.replace('vol-', 'vol')", "stderr = p_partprobe.communicate() # print(stdout) # print(stderr) sleep(3) pattern =", "if tag['Value'] == DEPLOY_UUID: ebss.append(volume.volume_id) return ebss if len(ebss) >", "]) p_echo = subprocess.Popen('echo -ne {0}'.format(gdisk_commands).split(' '), stdout=subprocess.PIPE) p_fdisk =", "retrieve_ebs_ids() i = 0 device_char = 'z' while i <", "subprocess from time import sleep AWS_REGION = os.environ['AWS_REGION'] DEPLOY_UUID =", "0 else None def attach_ebs(): ec2 = boto3.client('ec2') i_id =", "# print(stdout) # print(stderr) sleep(3) pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id) partition =", "ebss = [] for volume in ec2.volumes.all(): if volume.tags is", "len(enis) > 0 else None def attach_eni_ids(): c_ec2 = boto3.client('ec2')", "1 def check_ebs(volume_id): v_id = volume_id.replace('vol-', 'vol') pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id)", "is attached sleep(3) if not check_ebs(v_id): prepare_ebs(v_id) add_fstab_entries(v_id, MOUNT_POINT) p_mount", "res = re.match('(.* ){2}eth[0-9](?! $).*', line) if res is not", "'device': device, 'ip': ip, 'subnet_rule': subnet_rule } ) r_default_route =", "= '\\n'.join([ 'n', '1', '34', '', '', 'w', 'Y', ''", "l_subnet_rule = subnet_rule.split() device = l_subnet_rule[2] ip = l_subnet_rule[-1] r_subnet_rules.append(", "# uses: MOUNT_POINT, SERVICE_NAME, DEPLOY_UUID, TAG_KEY attach_ebs() # uses: NIC_IP", "stdout, stderr = p_ip.communicate() for line in stdout.decode().splitlines(): res =", "stdout. decode().splitlines(): res = re.match('(.* ){2}eth[0-9](?! $).*', line) if res", "boto3.resource('ec2') enis = [] for eni in ec2.network_interfaces.all(): for tag", "> 0 else None def attach_ebs(): ec2 = boto3.client('ec2') i_id", "if tag['Value'] == DEPLOY_UUID: enis.append(eni.network_interface_id) return enis if len(enis) >", "for line in stdout. decode().splitlines(): res = re.match('(.* ){2}eth[0-9](?! $).*',", "'/dev/disk/by-id/*{0}-part1'.format(v_id) partition = glob.glob(pattern)[0] p_xfs = subprocess.Popen('mkfs.xfs {0}'.format(partition).split(), stdout=subprocess.PIPE) stdout,", "p_fdisk.communicate() print(stdout) print(stderr) # p_partprobe = subprocess.Popen('partprobe'.split(' '), stdout=subprocess.PIPE) #", "not None: return None c += 1 raise Exception('Device with", "f.flush() f.close() def wait_device_ready(timeout=3): c = 0 while c <", "stdout=subprocess.PIPE) stdout, stderr = p_ip.communicate() for line in stdout.decode().splitlines(): res", "sleep(1) p_ip = subprocess.Popen('ip a'.split(), stdout=subprocess.PIPE) stdout, stderr = p_ip.communicate()", "f.write('exit 0\\n') f.flush() f.close() os.chmod('/etc/rc.local', 0o0755) p_rc_local = subprocess.Popen('/etc/rc.local'.split(), stdout=subprocess.PIPE)", "subprocess.Popen('ip a'.split(), stdout=subprocess.PIPE) stdout, stderr = p_ip.communicate() for line in" ]
[ "\"AppId\", \"UserId\"], \"SruDbCheckpointTable.csv\": [], \"SruDbIdMapTable.csv\": [], \"Network Usage.csv\": [\"TimeStamp\", \"AppId\",", "args output = r\"{}\\srum_{}\".format(self.temp_result_path, random.randint(1, 1000000)) os.mkdir(output) command = self.PARSE_COMMAND.format(parser_path=self.PARSING_TOOL,", "software_hive=software_hive) self._run_command(command) for csv_file in os.listdir(output): srum_records = [] full_path", "[], \"SruDbIdMapTable.csv\": [], \"Network Usage.csv\": [\"TimeStamp\", \"AppId\", \"UserId\", \"InterfaceLuid\", \"L2ProfileId\",", "self._run_command(command) for csv_file in os.listdir(output): srum_records = [] full_path =", "PARSING_TOOL = r\"Tools\\ese-analyst-master\\ese2csv.exe\" PARSE_COMMAND = \"{parser_path} -o {output_path} -p srudb_plugin", "= csv.DictReader(f) for line in reader: cur_record = {} for", "os.mkdir(output) command = self.PARSE_COMMAND.format(parser_path=self.PARSING_TOOL, output_path=output, srum_db=srum_db, software_hive=software_hive) self._run_command(command) for csv_file", "output_path=output, srum_db=srum_db, software_hive=software_hive) self._run_command(command) for csv_file in os.listdir(output): srum_records =", "[\"TimeStamp\", \"AppId\", \"UserId\"], \"Application Resource Usage.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"] }", "datetime.datetime(1601, 1, 1) FILE_TIME_MICROSECOND = 10 def filetime_to_epoch_datetime(file_time): if isinstance(file_time,", "config): super().__init__(config) self.temp_result_path = temp def parse(self, args): srum_db, software_hive", "= duration else: cur_record[\"time\"] = datetime.datetime(1970, 1, 1).isoformat() cur_record[\"AppId\"] =", "\"_\")] = value.decode() elif str.isdigit(value): cur_record[header.lower().replace(\" \", \"_\")] = int(value)", "continue if csv_file == \"Unknown1.csv\": with open(full_path, \"r\") as f:", "= value.decode() elif str.isdigit(value): cur_record[header.lower().replace(\" \", \"_\")] = int(value) else:", "{srum_db} --plugin-args {software_hive}\" def __init__(self, temp, config): super().__init__(config) self.temp_result_path =", "datetime import random import os from parsers.parser_base import ParserBase FILE_TIME_EPOCH", "= datetime.datetime(1970, 1, 1).isoformat() cur_record[\"AppId\"] = line.get(\"AppId\") cur_record[\"UserId\"] = line.get(\"UserId\")", "in reader: cur_record = {} for header in headers: if", "__init__(self, temp, config): super().__init__(config) self.temp_result_path = temp def parse(self, args):", "\"AppId\", \"UserId\", \"EndTime\", \"DurationMS\"], \"Unknown2.csv\": [], \"Unknown3.csv\": [], \"Unknown4.csv\": [\"TimeStamp\",", "1000000)) os.mkdir(output) command = self.PARSE_COMMAND.format(parser_path=self.PARSING_TOOL, output_path=output, srum_db=srum_db, software_hive=software_hive) self._run_command(command) for", "= temp def parse(self, args): srum_db, software_hive = args output", "PARSE_COMMAND = \"{parser_path} -o {output_path} -p srudb_plugin {srum_db} --plugin-args {software_hive}\"", "r\"{}\\srum_{}\".format(self.temp_result_path, random.randint(1, 1000000)) os.mkdir(output) command = self.PARSE_COMMAND.format(parser_path=self.PARSING_TOOL, output_path=output, srum_db=srum_db, software_hive=software_hive)", "[], \"Network Usage.csv\": [\"TimeStamp\", \"AppId\", \"UserId\", \"InterfaceLuid\", \"L2ProfileId\", \"BytesSent\", \"BytesRecvd\"],", "import random import os from parsers.parser_base import ParserBase FILE_TIME_EPOCH =", "\"{parser_path} -o {output_path} -p srudb_plugin {srum_db} --plugin-args {software_hive}\" def __init__(self,", "duration: cur_record[\"time\"] = filetime_to_epoch_datetime(int(endTime) - int(duration)).isoformat() cur_record[\"EndTime\"] = filetime_to_epoch_datetime(endTime).isoformat() cur_record[\"DurationMS\"]", "line.pop(\"TimeStamp\") value = line.get(header) if value: if isinstance(value, bytes): cur_record[header.lower().replace(\"", "def filetime_to_epoch_datetime(file_time): if isinstance(file_time, int): microseconds_since_file_time_epoch = file_time / FILE_TIME_MICROSECOND", "1) FILE_TIME_MICROSECOND = 10 def filetime_to_epoch_datetime(file_time): if isinstance(file_time, int): microseconds_since_file_time_epoch", "{ \"Unknown1.csv\": [\"TimeStamp\", \"AppId\", \"UserId\", \"EndTime\", \"DurationMS\"], \"Unknown2.csv\": [], \"Unknown3.csv\":", "-o {output_path} -p srudb_plugin {srum_db} --plugin-args {software_hive}\" def __init__(self, temp,", "cur_record[\"time\"] = line.get(\"TimeStamp\").replace(\" \", \"T\") line.pop(\"TimeStamp\") value = line.get(header) if", "[] full_path = os.path.join(output, csv_file) headers = self.CSV_FIELDS.get(csv_file) if not", "\"_\")] = value else: cur_record[header.lower().replace(\" \", \"_\")] = \"\" srum_records.append(cur_record)", "--plugin-args {software_hive}\" def __init__(self, temp, config): super().__init__(config) self.temp_result_path = temp", "1, 1) FILE_TIME_MICROSECOND = 10 def filetime_to_epoch_datetime(file_time): if isinstance(file_time, int):", "open(full_path, \"r\") as f: reader = csv.DictReader(f) for line in", "cur_record[header.lower().replace(\" \", \"_\")] = value else: cur_record[header.lower().replace(\" \", \"_\")] =", "= [] full_path = os.path.join(output, csv_file) headers = self.CSV_FIELDS.get(csv_file) if", "os.listdir(output): srum_records = [] full_path = os.path.join(output, csv_file) headers =", "- int(duration)).isoformat() cur_record[\"EndTime\"] = filetime_to_epoch_datetime(endTime).isoformat() cur_record[\"DurationMS\"] = duration else: cur_record[\"time\"]", "= r\"{}\\srum_{}\".format(self.temp_result_path, random.randint(1, 1000000)) os.mkdir(output) command = self.PARSE_COMMAND.format(parser_path=self.PARSING_TOOL, output_path=output, srum_db=srum_db,", "header in headers: if header == \"TimeStamp\": cur_record[\"time\"] = line.get(\"TimeStamp\").replace(\"", "in headers: if header == \"TimeStamp\": cur_record[\"time\"] = line.get(\"TimeStamp\").replace(\" \",", "value: if isinstance(value, bytes): cur_record[header.lower().replace(\" \", \"_\")] = value.decode() elif", "csv import datetime import random import os from parsers.parser_base import", "temp def parse(self, args): srum_db, software_hive = args output =", "reader = csv.DictReader(f) for line in reader: cur_record = {}", "cur_record = {} for header in headers: if header ==", "[], \"Energy Usage.csv\": [], \"Energy Usage(Long - Term).csv\": [], \"Application", "[], \"Energy Usage(Long - Term).csv\": [], \"Application Resources.csv\": [\"TimeStamp\", \"AppId\",", "random import os from parsers.parser_base import ParserBase FILE_TIME_EPOCH = datetime.datetime(1601,", "\"Network Usage.csv\": [\"TimeStamp\", \"AppId\", \"UserId\", \"InterfaceLuid\", \"L2ProfileId\", \"BytesSent\", \"BytesRecvd\"], \"Network", "10 def filetime_to_epoch_datetime(file_time): if isinstance(file_time, int): microseconds_since_file_time_epoch = file_time /", "import csv import datetime import random import os from parsers.parser_base", "\"UserId\", \"EndTime\", \"DurationMS\"], \"Unknown2.csv\": [], \"Unknown3.csv\": [], \"Unknown4.csv\": [\"TimeStamp\", \"AppId\",", "\"UserId\"], \"SruDbCheckpointTable.csv\": [], \"SruDbIdMapTable.csv\": [], \"Network Usage.csv\": [\"TimeStamp\", \"AppId\", \"UserId\",", "Usage(Long - Term).csv\": [], \"Application Resources.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"], \"Application", "cur_record[\"UserId\"] = line.get(\"UserId\") srum_records.append(cur_record) else: with open(full_path, \"r\") as f:", "\"Unknown1.csv\": [\"TimeStamp\", \"AppId\", \"UserId\", \"EndTime\", \"DurationMS\"], \"Unknown2.csv\": [], \"Unknown3.csv\": [],", "line.get(\"UserId\") srum_records.append(cur_record) else: with open(full_path, \"r\") as f: reader =", "headers: continue if csv_file == \"Unknown1.csv\": with open(full_path, \"r\") as", "bytes): cur_record[header.lower().replace(\" \", \"_\")] = value.decode() elif str.isdigit(value): cur_record[header.lower().replace(\" \",", "\", \"_\")] = int(value) else: cur_record[header.lower().replace(\" \", \"_\")] = value", "== \"TimeStamp\": cur_record[\"time\"] = line.get(\"TimeStamp\").replace(\" \", \"T\") line.pop(\"TimeStamp\") value =", "cur_record[\"EndTime\"] = filetime_to_epoch_datetime(endTime).isoformat() cur_record[\"DurationMS\"] = duration else: cur_record[\"time\"] = datetime.datetime(1970,", "1).isoformat() cur_record[\"AppId\"] = line.get(\"AppId\") cur_record[\"UserId\"] = line.get(\"UserId\") srum_records.append(cur_record) else: with", "microseconds_since_file_time_epoch = file_time / FILE_TIME_MICROSECOND else: microseconds_since_file_time_epoch = int(file_time) /", "= line.get(header) if value: if isinstance(value, bytes): cur_record[header.lower().replace(\" \", \"_\")]", "from parsers.parser_base import ParserBase FILE_TIME_EPOCH = datetime.datetime(1601, 1, 1) FILE_TIME_MICROSECOND", "= filetime_to_epoch_datetime(endTime).isoformat() cur_record[\"DurationMS\"] = duration else: cur_record[\"time\"] = datetime.datetime(1970, 1,", "FILE_TIME_EPOCH = datetime.datetime(1601, 1, 1) FILE_TIME_MICROSECOND = 10 def filetime_to_epoch_datetime(file_time):", "def parse(self, args): srum_db, software_hive = args output = r\"{}\\srum_{}\".format(self.temp_result_path,", "\"SruDbCheckpointTable.csv\": [], \"SruDbIdMapTable.csv\": [], \"Network Usage.csv\": [\"TimeStamp\", \"AppId\", \"UserId\", \"InterfaceLuid\",", "csv_file in os.listdir(output): srum_records = [] full_path = os.path.join(output, csv_file)", "line in reader: cur_record = {} for header in headers:", "command = self.PARSE_COMMAND.format(parser_path=self.PARSING_TOOL, output_path=output, srum_db=srum_db, software_hive=software_hive) self._run_command(command) for csv_file in", "filetime_to_epoch_datetime(file_time): if isinstance(file_time, int): microseconds_since_file_time_epoch = file_time / FILE_TIME_MICROSECOND else:", "\"BytesRecvd\"], \"Network Connections.csv\": [], \"Energy Usage.csv\": [], \"Energy Usage(Long -", "cur_record[header.lower().replace(\" \", \"_\")] = \"\" srum_records.append(cur_record) self._write_results_list([(\"srum-{}\".format(csv_file.split(\".\")[0].lower().replace(\" \", \"_\")), srum_records)])", "return FILE_TIME_EPOCH + datetime.timedelta(microseconds=microseconds_since_file_time_epoch) class SrumParser(ParserBase): CSV_FIELDS = { \"Unknown1.csv\":", "\"_\")] = int(value) else: cur_record[header.lower().replace(\" \", \"_\")] = value else:", "def __init__(self, temp, config): super().__init__(config) self.temp_result_path = temp def parse(self,", "duration = line.get(\"DurationMS\") if endTime and duration: cur_record[\"time\"] = filetime_to_epoch_datetime(int(endTime)", "1, 1).isoformat() cur_record[\"AppId\"] = line.get(\"AppId\") cur_record[\"UserId\"] = line.get(\"UserId\") srum_records.append(cur_record) else:", "headers = self.CSV_FIELDS.get(csv_file) if not headers: continue if csv_file ==", "\"Network Connections.csv\": [], \"Energy Usage.csv\": [], \"Energy Usage(Long - Term).csv\":", "str.isdigit(value): cur_record[header.lower().replace(\" \", \"_\")] = int(value) else: cur_record[header.lower().replace(\" \", \"_\")]", "Usage.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"] } PARSING_TOOL = r\"Tools\\ese-analyst-master\\ese2csv.exe\" PARSE_COMMAND =", "{software_hive}\" def __init__(self, temp, config): super().__init__(config) self.temp_result_path = temp def", "with open(full_path, \"r\") as f: reader = csv.DictReader(f) for line", "= csv.DictReader(f) for line in reader: cur_record = {} endTime", "duration else: cur_record[\"time\"] = datetime.datetime(1970, 1, 1).isoformat() cur_record[\"AppId\"] = line.get(\"AppId\")", "line.get(\"DurationMS\") if endTime and duration: cur_record[\"time\"] = filetime_to_epoch_datetime(int(endTime) - int(duration)).isoformat()", "line.get(\"EndTime\") duration = line.get(\"DurationMS\") if endTime and duration: cur_record[\"time\"] =", "= line.get(\"TimeStamp\").replace(\" \", \"T\") line.pop(\"TimeStamp\") value = line.get(header) if value:", "\"Application Resource Usage.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"] } PARSING_TOOL = r\"Tools\\ese-analyst-master\\ese2csv.exe\"", "/ FILE_TIME_MICROSECOND return FILE_TIME_EPOCH + datetime.timedelta(microseconds=microseconds_since_file_time_epoch) class SrumParser(ParserBase): CSV_FIELDS =", "random.randint(1, 1000000)) os.mkdir(output) command = self.PARSE_COMMAND.format(parser_path=self.PARSING_TOOL, output_path=output, srum_db=srum_db, software_hive=software_hive) self._run_command(command)", "else: microseconds_since_file_time_epoch = int(file_time) / FILE_TIME_MICROSECOND return FILE_TIME_EPOCH + datetime.timedelta(microseconds=microseconds_since_file_time_epoch)", "value.decode() elif str.isdigit(value): cur_record[header.lower().replace(\" \", \"_\")] = int(value) else: cur_record[header.lower().replace(\"", "\"Application Resources.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"], \"Application Resource Usage.csv\": [\"TimeStamp\", \"AppId\",", "} PARSING_TOOL = r\"Tools\\ese-analyst-master\\ese2csv.exe\" PARSE_COMMAND = \"{parser_path} -o {output_path} -p", "for csv_file in os.listdir(output): srum_records = [] full_path = os.path.join(output,", "= { \"Unknown1.csv\": [\"TimeStamp\", \"AppId\", \"UserId\", \"EndTime\", \"DurationMS\"], \"Unknown2.csv\": [],", "self.CSV_FIELDS.get(csv_file) if not headers: continue if csv_file == \"Unknown1.csv\": with", "isinstance(file_time, int): microseconds_since_file_time_epoch = file_time / FILE_TIME_MICROSECOND else: microseconds_since_file_time_epoch =", "filetime_to_epoch_datetime(endTime).isoformat() cur_record[\"DurationMS\"] = duration else: cur_record[\"time\"] = datetime.datetime(1970, 1, 1).isoformat()", "reader: cur_record = {} endTime = line.get(\"EndTime\") duration = line.get(\"DurationMS\")", "[], \"Application Resources.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"], \"Application Resource Usage.csv\": [\"TimeStamp\",", "\"Unknown1.csv\": with open(full_path, \"r\") as f: reader = csv.DictReader(f) for", "cur_record[header.lower().replace(\" \", \"_\")] = value.decode() elif str.isdigit(value): cur_record[header.lower().replace(\" \", \"_\")]", "if not headers: continue if csv_file == \"Unknown1.csv\": with open(full_path,", "\"BytesSent\", \"BytesRecvd\"], \"Network Connections.csv\": [], \"Energy Usage.csv\": [], \"Energy Usage(Long", "\"AppId\", \"UserId\"] } PARSING_TOOL = r\"Tools\\ese-analyst-master\\ese2csv.exe\" PARSE_COMMAND = \"{parser_path} -o", "\"T\") line.pop(\"TimeStamp\") value = line.get(header) if value: if isinstance(value, bytes):", "class SrumParser(ParserBase): CSV_FIELDS = { \"Unknown1.csv\": [\"TimeStamp\", \"AppId\", \"UserId\", \"EndTime\",", "[\"TimeStamp\", \"AppId\", \"UserId\"] } PARSING_TOOL = r\"Tools\\ese-analyst-master\\ese2csv.exe\" PARSE_COMMAND = \"{parser_path}", "software_hive = args output = r\"{}\\srum_{}\".format(self.temp_result_path, random.randint(1, 1000000)) os.mkdir(output) command", "else: cur_record[\"time\"] = datetime.datetime(1970, 1, 1).isoformat() cur_record[\"AppId\"] = line.get(\"AppId\") cur_record[\"UserId\"]", "Resource Usage.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"] } PARSING_TOOL = r\"Tools\\ese-analyst-master\\ese2csv.exe\" PARSE_COMMAND", "cur_record[\"time\"] = datetime.datetime(1970, 1, 1).isoformat() cur_record[\"AppId\"] = line.get(\"AppId\") cur_record[\"UserId\"] =", "csv.DictReader(f) for line in reader: cur_record = {} endTime =", "= line.get(\"AppId\") cur_record[\"UserId\"] = line.get(\"UserId\") srum_records.append(cur_record) else: with open(full_path, \"r\")", "reader: cur_record = {} for header in headers: if header", "\"TimeStamp\": cur_record[\"time\"] = line.get(\"TimeStamp\").replace(\" \", \"T\") line.pop(\"TimeStamp\") value = line.get(header)", "else: cur_record[header.lower().replace(\" \", \"_\")] = \"\" srum_records.append(cur_record) self._write_results_list([(\"srum-{}\".format(csv_file.split(\".\")[0].lower().replace(\" \", \"_\")),", "in reader: cur_record = {} endTime = line.get(\"EndTime\") duration =", "self.PARSE_COMMAND.format(parser_path=self.PARSING_TOOL, output_path=output, srum_db=srum_db, software_hive=software_hive) self._run_command(command) for csv_file in os.listdir(output): srum_records", "for header in headers: if header == \"TimeStamp\": cur_record[\"time\"] =", "[], \"Unknown4.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"], \"SruDbCheckpointTable.csv\": [], \"SruDbIdMapTable.csv\": [], \"Network", "import os from parsers.parser_base import ParserBase FILE_TIME_EPOCH = datetime.datetime(1601, 1,", "srum_db, software_hive = args output = r\"{}\\srum_{}\".format(self.temp_result_path, random.randint(1, 1000000)) os.mkdir(output)", "ParserBase FILE_TIME_EPOCH = datetime.datetime(1601, 1, 1) FILE_TIME_MICROSECOND = 10 def", "\"Unknown2.csv\": [], \"Unknown3.csv\": [], \"Unknown4.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"], \"SruDbCheckpointTable.csv\": [],", "[\"TimeStamp\", \"AppId\", \"UserId\"], \"SruDbCheckpointTable.csv\": [], \"SruDbIdMapTable.csv\": [], \"Network Usage.csv\": [\"TimeStamp\",", "= line.get(\"UserId\") srum_records.append(cur_record) else: with open(full_path, \"r\") as f: reader", "file_time / FILE_TIME_MICROSECOND else: microseconds_since_file_time_epoch = int(file_time) / FILE_TIME_MICROSECOND return", "FILE_TIME_MICROSECOND else: microseconds_since_file_time_epoch = int(file_time) / FILE_TIME_MICROSECOND return FILE_TIME_EPOCH +", "\"Energy Usage(Long - Term).csv\": [], \"Application Resources.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"],", "-p srudb_plugin {srum_db} --plugin-args {software_hive}\" def __init__(self, temp, config): super().__init__(config)", "\"Energy Usage.csv\": [], \"Energy Usage(Long - Term).csv\": [], \"Application Resources.csv\":", "\"UserId\", \"InterfaceLuid\", \"L2ProfileId\", \"BytesSent\", \"BytesRecvd\"], \"Network Connections.csv\": [], \"Energy Usage.csv\":", "import ParserBase FILE_TIME_EPOCH = datetime.datetime(1601, 1, 1) FILE_TIME_MICROSECOND = 10", "value = line.get(header) if value: if isinstance(value, bytes): cur_record[header.lower().replace(\" \",", "= r\"Tools\\ese-analyst-master\\ese2csv.exe\" PARSE_COMMAND = \"{parser_path} -o {output_path} -p srudb_plugin {srum_db}", "= self.PARSE_COMMAND.format(parser_path=self.PARSING_TOOL, output_path=output, srum_db=srum_db, software_hive=software_hive) self._run_command(command) for csv_file in os.listdir(output):", "srum_records = [] full_path = os.path.join(output, csv_file) headers = self.CSV_FIELDS.get(csv_file)", "line.get(\"AppId\") cur_record[\"UserId\"] = line.get(\"UserId\") srum_records.append(cur_record) else: with open(full_path, \"r\") as", "\"Unknown4.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"], \"SruDbCheckpointTable.csv\": [], \"SruDbIdMapTable.csv\": [], \"Network Usage.csv\":", "\"AppId\", \"UserId\"], \"Application Resource Usage.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"] } PARSING_TOOL", "self.temp_result_path = temp def parse(self, args): srum_db, software_hive = args", "for line in reader: cur_record = {} for header in", "if endTime and duration: cur_record[\"time\"] = filetime_to_epoch_datetime(int(endTime) - int(duration)).isoformat() cur_record[\"EndTime\"]", "= int(file_time) / FILE_TIME_MICROSECOND return FILE_TIME_EPOCH + datetime.timedelta(microseconds=microseconds_since_file_time_epoch) class SrumParser(ParserBase):", "[], \"Unknown3.csv\": [], \"Unknown4.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"], \"SruDbCheckpointTable.csv\": [], \"SruDbIdMapTable.csv\":", "+ datetime.timedelta(microseconds=microseconds_since_file_time_epoch) class SrumParser(ParserBase): CSV_FIELDS = { \"Unknown1.csv\": [\"TimeStamp\", \"AppId\",", "= 10 def filetime_to_epoch_datetime(file_time): if isinstance(file_time, int): microseconds_since_file_time_epoch = file_time", "csv_file) headers = self.CSV_FIELDS.get(csv_file) if not headers: continue if csv_file", "{} for header in headers: if header == \"TimeStamp\": cur_record[\"time\"]", "FILE_TIME_EPOCH + datetime.timedelta(microseconds=microseconds_since_file_time_epoch) class SrumParser(ParserBase): CSV_FIELDS = { \"Unknown1.csv\": [\"TimeStamp\",", "SrumParser(ParserBase): CSV_FIELDS = { \"Unknown1.csv\": [\"TimeStamp\", \"AppId\", \"UserId\", \"EndTime\", \"DurationMS\"],", "srum_db=srum_db, software_hive=software_hive) self._run_command(command) for csv_file in os.listdir(output): srum_records = []", "CSV_FIELDS = { \"Unknown1.csv\": [\"TimeStamp\", \"AppId\", \"UserId\", \"EndTime\", \"DurationMS\"], \"Unknown2.csv\":", "= filetime_to_epoch_datetime(int(endTime) - int(duration)).isoformat() cur_record[\"EndTime\"] = filetime_to_epoch_datetime(endTime).isoformat() cur_record[\"DurationMS\"] = duration", "full_path = os.path.join(output, csv_file) headers = self.CSV_FIELDS.get(csv_file) if not headers:", "FILE_TIME_MICROSECOND return FILE_TIME_EPOCH + datetime.timedelta(microseconds=microseconds_since_file_time_epoch) class SrumParser(ParserBase): CSV_FIELDS = {", "srudb_plugin {srum_db} --plugin-args {software_hive}\" def __init__(self, temp, config): super().__init__(config) self.temp_result_path", "parsers.parser_base import ParserBase FILE_TIME_EPOCH = datetime.datetime(1601, 1, 1) FILE_TIME_MICROSECOND =", "\"DurationMS\"], \"Unknown2.csv\": [], \"Unknown3.csv\": [], \"Unknown4.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"], \"SruDbCheckpointTable.csv\":", "= args output = r\"{}\\srum_{}\".format(self.temp_result_path, random.randint(1, 1000000)) os.mkdir(output) command =", "not headers: continue if csv_file == \"Unknown1.csv\": with open(full_path, \"r\")", "== \"Unknown1.csv\": with open(full_path, \"r\") as f: reader = csv.DictReader(f)", "datetime.datetime(1970, 1, 1).isoformat() cur_record[\"AppId\"] = line.get(\"AppId\") cur_record[\"UserId\"] = line.get(\"UserId\") srum_records.append(cur_record)", "\", \"T\") line.pop(\"TimeStamp\") value = line.get(header) if value: if isinstance(value,", "Term).csv\": [], \"Application Resources.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"], \"Application Resource Usage.csv\":", "else: cur_record[header.lower().replace(\" \", \"_\")] = value else: cur_record[header.lower().replace(\" \", \"_\")]", "isinstance(value, bytes): cur_record[header.lower().replace(\" \", \"_\")] = value.decode() elif str.isdigit(value): cur_record[header.lower().replace(\"", "datetime.timedelta(microseconds=microseconds_since_file_time_epoch) class SrumParser(ParserBase): CSV_FIELDS = { \"Unknown1.csv\": [\"TimeStamp\", \"AppId\", \"UserId\",", "{output_path} -p srudb_plugin {srum_db} --plugin-args {software_hive}\" def __init__(self, temp, config):", "as f: reader = csv.DictReader(f) for line in reader: cur_record", "int(file_time) / FILE_TIME_MICROSECOND return FILE_TIME_EPOCH + datetime.timedelta(microseconds=microseconds_since_file_time_epoch) class SrumParser(ParserBase): CSV_FIELDS", "Connections.csv\": [], \"Energy Usage.csv\": [], \"Energy Usage(Long - Term).csv\": [],", "if value: if isinstance(value, bytes): cur_record[header.lower().replace(\" \", \"_\")] = value.decode()", "[\"TimeStamp\", \"AppId\", \"UserId\", \"InterfaceLuid\", \"L2ProfileId\", \"BytesSent\", \"BytesRecvd\"], \"Network Connections.csv\": [],", "super().__init__(config) self.temp_result_path = temp def parse(self, args): srum_db, software_hive =", "\"r\") as f: reader = csv.DictReader(f) for line in reader:", "cur_record[\"DurationMS\"] = duration else: cur_record[\"time\"] = datetime.datetime(1970, 1, 1).isoformat() cur_record[\"AppId\"]", "\"SruDbIdMapTable.csv\": [], \"Network Usage.csv\": [\"TimeStamp\", \"AppId\", \"UserId\", \"InterfaceLuid\", \"L2ProfileId\", \"BytesSent\",", "parse(self, args): srum_db, software_hive = args output = r\"{}\\srum_{}\".format(self.temp_result_path, random.randint(1,", "temp, config): super().__init__(config) self.temp_result_path = temp def parse(self, args): srum_db,", "= {} for header in headers: if header == \"TimeStamp\":", "= value else: cur_record[header.lower().replace(\" \", \"_\")] = \"\" srum_records.append(cur_record) self._write_results_list([(\"srum-{}\".format(csv_file.split(\".\")[0].lower().replace(\"", "/ FILE_TIME_MICROSECOND else: microseconds_since_file_time_epoch = int(file_time) / FILE_TIME_MICROSECOND return FILE_TIME_EPOCH", "os.path.join(output, csv_file) headers = self.CSV_FIELDS.get(csv_file) if not headers: continue if", "import datetime import random import os from parsers.parser_base import ParserBase", "\"InterfaceLuid\", \"L2ProfileId\", \"BytesSent\", \"BytesRecvd\"], \"Network Connections.csv\": [], \"Energy Usage.csv\": [],", "output = r\"{}\\srum_{}\".format(self.temp_result_path, random.randint(1, 1000000)) os.mkdir(output) command = self.PARSE_COMMAND.format(parser_path=self.PARSING_TOOL, output_path=output,", "else: with open(full_path, \"r\") as f: reader = csv.DictReader(f) for", "Resources.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"], \"Application Resource Usage.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"]", "if csv_file == \"Unknown1.csv\": with open(full_path, \"r\") as f: reader", "cur_record = {} endTime = line.get(\"EndTime\") duration = line.get(\"DurationMS\") if", "os from parsers.parser_base import ParserBase FILE_TIME_EPOCH = datetime.datetime(1601, 1, 1)", "endTime and duration: cur_record[\"time\"] = filetime_to_epoch_datetime(int(endTime) - int(duration)).isoformat() cur_record[\"EndTime\"] =", "= line.get(\"DurationMS\") if endTime and duration: cur_record[\"time\"] = filetime_to_epoch_datetime(int(endTime) -", "f: reader = csv.DictReader(f) for line in reader: cur_record =", "microseconds_since_file_time_epoch = int(file_time) / FILE_TIME_MICROSECOND return FILE_TIME_EPOCH + datetime.timedelta(microseconds=microseconds_since_file_time_epoch) class", "\"EndTime\", \"DurationMS\"], \"Unknown2.csv\": [], \"Unknown3.csv\": [], \"Unknown4.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"],", "cur_record[header.lower().replace(\" \", \"_\")] = int(value) else: cur_record[header.lower().replace(\" \", \"_\")] =", "[\"TimeStamp\", \"AppId\", \"UserId\", \"EndTime\", \"DurationMS\"], \"Unknown2.csv\": [], \"Unknown3.csv\": [], \"Unknown4.csv\":", "cur_record[\"time\"] = filetime_to_epoch_datetime(int(endTime) - int(duration)).isoformat() cur_record[\"EndTime\"] = filetime_to_epoch_datetime(endTime).isoformat() cur_record[\"DurationMS\"] =", "header == \"TimeStamp\": cur_record[\"time\"] = line.get(\"TimeStamp\").replace(\" \", \"T\") line.pop(\"TimeStamp\") value", "= file_time / FILE_TIME_MICROSECOND else: microseconds_since_file_time_epoch = int(file_time) / FILE_TIME_MICROSECOND", "csv_file == \"Unknown1.csv\": with open(full_path, \"r\") as f: reader =", "args): srum_db, software_hive = args output = r\"{}\\srum_{}\".format(self.temp_result_path, random.randint(1, 1000000))", "= {} endTime = line.get(\"EndTime\") duration = line.get(\"DurationMS\") if endTime", "r\"Tools\\ese-analyst-master\\ese2csv.exe\" PARSE_COMMAND = \"{parser_path} -o {output_path} -p srudb_plugin {srum_db} --plugin-args", "\"L2ProfileId\", \"BytesSent\", \"BytesRecvd\"], \"Network Connections.csv\": [], \"Energy Usage.csv\": [], \"Energy", "Usage.csv\": [], \"Energy Usage(Long - Term).csv\": [], \"Application Resources.csv\": [\"TimeStamp\",", "headers: if header == \"TimeStamp\": cur_record[\"time\"] = line.get(\"TimeStamp\").replace(\" \", \"T\")", "if isinstance(file_time, int): microseconds_since_file_time_epoch = file_time / FILE_TIME_MICROSECOND else: microseconds_since_file_time_epoch", "line in reader: cur_record = {} endTime = line.get(\"EndTime\") duration", "in os.listdir(output): srum_records = [] full_path = os.path.join(output, csv_file) headers", "if header == \"TimeStamp\": cur_record[\"time\"] = line.get(\"TimeStamp\").replace(\" \", \"T\") line.pop(\"TimeStamp\")", "- Term).csv\": [], \"Application Resources.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"], \"Application Resource", "line.get(\"TimeStamp\").replace(\" \", \"T\") line.pop(\"TimeStamp\") value = line.get(header) if value: if", "line.get(header) if value: if isinstance(value, bytes): cur_record[header.lower().replace(\" \", \"_\")] =", "= datetime.datetime(1601, 1, 1) FILE_TIME_MICROSECOND = 10 def filetime_to_epoch_datetime(file_time): if", "\"Unknown3.csv\": [], \"Unknown4.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"], \"SruDbCheckpointTable.csv\": [], \"SruDbIdMapTable.csv\": [],", "int): microseconds_since_file_time_epoch = file_time / FILE_TIME_MICROSECOND else: microseconds_since_file_time_epoch = int(file_time)", "\"UserId\"] } PARSING_TOOL = r\"Tools\\ese-analyst-master\\ese2csv.exe\" PARSE_COMMAND = \"{parser_path} -o {output_path}", "= \"{parser_path} -o {output_path} -p srudb_plugin {srum_db} --plugin-args {software_hive}\" def", "FILE_TIME_MICROSECOND = 10 def filetime_to_epoch_datetime(file_time): if isinstance(file_time, int): microseconds_since_file_time_epoch =", "= self.CSV_FIELDS.get(csv_file) if not headers: continue if csv_file == \"Unknown1.csv\":", "= line.get(\"EndTime\") duration = line.get(\"DurationMS\") if endTime and duration: cur_record[\"time\"]", "and duration: cur_record[\"time\"] = filetime_to_epoch_datetime(int(endTime) - int(duration)).isoformat() cur_record[\"EndTime\"] = filetime_to_epoch_datetime(endTime).isoformat()", "int(duration)).isoformat() cur_record[\"EndTime\"] = filetime_to_epoch_datetime(endTime).isoformat() cur_record[\"DurationMS\"] = duration else: cur_record[\"time\"] =", "{} endTime = line.get(\"EndTime\") duration = line.get(\"DurationMS\") if endTime and", "if isinstance(value, bytes): cur_record[header.lower().replace(\" \", \"_\")] = value.decode() elif str.isdigit(value):", "csv.DictReader(f) for line in reader: cur_record = {} for header", "srum_records.append(cur_record) else: with open(full_path, \"r\") as f: reader = csv.DictReader(f)", "= int(value) else: cur_record[header.lower().replace(\" \", \"_\")] = value else: cur_record[header.lower().replace(\"", "\", \"_\")] = value else: cur_record[header.lower().replace(\" \", \"_\")] = \"\"", "\"UserId\"], \"Application Resource Usage.csv\": [\"TimeStamp\", \"AppId\", \"UserId\"] } PARSING_TOOL =", "for line in reader: cur_record = {} endTime = line.get(\"EndTime\")", "\"AppId\", \"UserId\", \"InterfaceLuid\", \"L2ProfileId\", \"BytesSent\", \"BytesRecvd\"], \"Network Connections.csv\": [], \"Energy", "\", \"_\")] = value.decode() elif str.isdigit(value): cur_record[header.lower().replace(\" \", \"_\")] =", "int(value) else: cur_record[header.lower().replace(\" \", \"_\")] = value else: cur_record[header.lower().replace(\" \",", "filetime_to_epoch_datetime(int(endTime) - int(duration)).isoformat() cur_record[\"EndTime\"] = filetime_to_epoch_datetime(endTime).isoformat() cur_record[\"DurationMS\"] = duration else:", "value else: cur_record[header.lower().replace(\" \", \"_\")] = \"\" srum_records.append(cur_record) self._write_results_list([(\"srum-{}\".format(csv_file.split(\".\")[0].lower().replace(\" \",", "= os.path.join(output, csv_file) headers = self.CSV_FIELDS.get(csv_file) if not headers: continue", "elif str.isdigit(value): cur_record[header.lower().replace(\" \", \"_\")] = int(value) else: cur_record[header.lower().replace(\" \",", "cur_record[\"AppId\"] = line.get(\"AppId\") cur_record[\"UserId\"] = line.get(\"UserId\") srum_records.append(cur_record) else: with open(full_path,", "endTime = line.get(\"EndTime\") duration = line.get(\"DurationMS\") if endTime and duration:", "Usage.csv\": [\"TimeStamp\", \"AppId\", \"UserId\", \"InterfaceLuid\", \"L2ProfileId\", \"BytesSent\", \"BytesRecvd\"], \"Network Connections.csv\":" ]
[ "= '<KEY>' request.META['CSRF_COOKIE'] = test_token token = csrf(request).get('csrf_token') self.assertTrue(equivalent_tokens(str(token), test_token))", "as equivalent_tokens from django.template.context_processors import csrf from django.test import SimpleTestCase", "HttpRequest() test_token = '<KEY>' request.META['CSRF_COOKIE'] = test_token token = csrf(request).get('csrf_token')", "import _compare_salted_tokens as equivalent_tokens from django.template.context_processors import csrf from django.test", "def test_force_token_to_string(self): request = HttpRequest() test_token = '<KEY>' request.META['CSRF_COOKIE'] =", "import csrf from django.test import SimpleTestCase class TestContextProcessor(SimpleTestCase): def test_force_token_to_string(self):", "_compare_salted_tokens as equivalent_tokens from django.template.context_processors import csrf from django.test import", "HttpRequest from django.middleware.csrf import _compare_salted_tokens as equivalent_tokens from django.template.context_processors import", "import HttpRequest from django.middleware.csrf import _compare_salted_tokens as equivalent_tokens from django.template.context_processors", "from django.test import SimpleTestCase class TestContextProcessor(SimpleTestCase): def test_force_token_to_string(self): request =", "class TestContextProcessor(SimpleTestCase): def test_force_token_to_string(self): request = HttpRequest() test_token = '<KEY>'", "SimpleTestCase class TestContextProcessor(SimpleTestCase): def test_force_token_to_string(self): request = HttpRequest() test_token =", "django.test import SimpleTestCase class TestContextProcessor(SimpleTestCase): def test_force_token_to_string(self): request = HttpRequest()", "TestContextProcessor(SimpleTestCase): def test_force_token_to_string(self): request = HttpRequest() test_token = '<KEY>' request.META['CSRF_COOKIE']", "csrf from django.test import SimpleTestCase class TestContextProcessor(SimpleTestCase): def test_force_token_to_string(self): request", "request = HttpRequest() test_token = '<KEY>' request.META['CSRF_COOKIE'] = test_token token", "equivalent_tokens from django.template.context_processors import csrf from django.test import SimpleTestCase class", "= HttpRequest() test_token = '<KEY>' request.META['CSRF_COOKIE'] = test_token token =", "import SimpleTestCase class TestContextProcessor(SimpleTestCase): def test_force_token_to_string(self): request = HttpRequest() test_token", "django.template.context_processors import csrf from django.test import SimpleTestCase class TestContextProcessor(SimpleTestCase): def", "from django.http import HttpRequest from django.middleware.csrf import _compare_salted_tokens as equivalent_tokens", "from django.template.context_processors import csrf from django.test import SimpleTestCase class TestContextProcessor(SimpleTestCase):", "test_token = '<KEY>' request.META['CSRF_COOKIE'] = test_token token = csrf(request).get('csrf_token') self.assertTrue(equivalent_tokens(str(token),", "django.middleware.csrf import _compare_salted_tokens as equivalent_tokens from django.template.context_processors import csrf from", "django.http import HttpRequest from django.middleware.csrf import _compare_salted_tokens as equivalent_tokens from", "from django.middleware.csrf import _compare_salted_tokens as equivalent_tokens from django.template.context_processors import csrf", "test_force_token_to_string(self): request = HttpRequest() test_token = '<KEY>' request.META['CSRF_COOKIE'] = test_token" ]
[ "rv ^= y return rv def __rxor__(self, y): rv =", "got %d\" % len(args)) for k, v in kwargs.iteritems(): k", "def __init__(self, name): super(ReservedNameError, self).__init__(\"'%s' is a reserved name\" %", "(container validation) st._validate_self(self) if hasattr(self, \"_is_global_validation_enabled\"): if not self._is_global_validation_enabled(): #", "wasset = (k in self) oldval = (self[k] if wasset", "ec, ei, tb = sys.exc_info() try: for k in remvals:", "in enumerate(y)]) try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "self).pop() except Exception, e: print(\"das.types.Sequence.append: Failed to recover sequence data", "rv def __rand__(self, y): return self.__and__(y) def __isub__(self, y): oldvals", "# -> no need to check if _k was a", "for some other purpose if len(self.symmetric_difference(oth)) == 0: return 0", "ei, tb def _get_alias(self, k): st = self._get_schema_type() if st", "+= \": %s required\" % required_version else: fullmsg += \":", "None: # if isinstance(st[k], das.schematypes.Deprecated): # message = (\"[das] Field", "return rv def __rand__(self, y): return self.__and__(y) def __isub__(self, y):", "__mul__(self, n): rv = self[:] rv.__imul__(n) return rv def __rmul__(self,", "e: print(\"das.types.Sequence.insert: Failed to recover sequence data (%s)\" % e)", "self._is_global_validation_enabled(): # Skip global validaton return gvcb = self._get_validate_globally_cb() if", "return TypeBase.TransferGlobalValidator(self, super(Dict, self).__getitem__(self._adapt_key(k))) def __delitem__(self, k): _k = self._adapt_key(k)", "self).__setitem__(i, self._adapt_value(y, index=i)) self._gvalidate() def __getitem__(self, i): return TypeBase.TransferGlobalValidator(self, super(Sequence,", "self).append(rv) except Exception, e: print(\"das.types.Sequence.pop: Failed to recover sequence data", "super(Set, self).copy() super(Set, self).__iand__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)]))", "index=0) # return super(Sequence, self).__contains__(_v) # except: # return False", "raise TypeError(\"setdefault expected at most 2 arguments, got %d\" %", "tb def itervalues(self): for v in super(Dict, self).itervalues(): yield TypeBase.TransferGlobalValidator(self,", "def __contains__(self, k): # try: # _k = self._adapt_key(k) #", "schema_type def _get_validate_globally_cb(self): return self.__dict__[\"_validate_globally_cb\"] def _set_validate_globally_cb(self, cb): self.__dict__[\"_validate_globally_cb\"] =", "IndexError(\"list index out of range\") else: return ii else: return", "k = self._get_alias(k) self._check_reserved(k) self._dict[k] = self._adapt_value(v, key=k) for k,", "self._get_alias(k) self._check_reserved(k) self._dict[k] = self._adapt_value(v, key=k) self._gvalidate() except: ec, ei,", "ec, ei, tb = sys.exc_info() try: super(Sequence, self).insert(idx, item) except", "print(\"das.types.Set.__ixor__: Failed to recover set data (%s)\" % e) raise", "if k was a valid key (otherwise __delitem__(k) would fail)", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: if wasset:", "= super(Set, self).pop() try: self._gvalidate() except: ec, ei, tb =", "tb = sys.exc_info() ei = das.ValidationError(\"Global Validation Failed (%s)\" %", "_wrap_index(self, i, n=None, clamp=False): if i < 0: if n", "e: print(\"das.types.Struct.update: Failed to recover struct data (%s)\" % e)", "itervalues(self): for v in super(Dict, self).itervalues(): yield TypeBase.TransferGlobalValidator(self, v) def", "print(\"das.types.Set.update: Failed to recover set data (%s)\" % e) raise", "y): rv = self.copy() rv -= y return rv def", "super(Sequence, self).__getslice__(i, j) newvals = [self._adapt_value(x, index=i+k) for k, x", "hasattr(self, \"_is_global_validation_enabled\"): if not self._is_global_validation_enabled(): # Skip global validaton return", "ei, tb return self def __xor__(self, y): rv = self.copy()", "= sys.exc_info() try: self._dict.clear() self._dict.update(oldvals) except Exception, e: print(\"das.types.Struct.update: Failed", "def __init__(self, *args, **kwargs): TypeBase.__init__(self) self.__dict__[\"_dict\"] = {} self._update(*args, **kwargs)", "raise ei.__class__, ei, tb def _get_schema_type(self): return self.__dict__[\"_schema_type\"] def _set_schema_type(self,", "else: k = self._get_alias(k) self._check_reserved(k) wasset = (k in self._dict)", "self).__ixor__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)])) try: self._gvalidate() except:", "import sys import das import traceback class ReservedNameError(Exception): def __init__(self,", "_itervalues(self): for v in self._dict.itervalues(): yield TypeBase.TransferGlobalValidator(self, v) def _values(self):", "self._wrap_index(i, clamp=True) super(Sequence, self).__setslice__(ii, ii+len(newvals), oldvals) except Exception, e: print(\"das.types.Sequence.__setslice__:", "= len(self) super(Sequence, self).__iadd__([self._adapt_value(x, index=n+i) for i, x in enumerate(y)])", "'%s'\" % (k, self.__class__.__name__, k2)) return getattr(self, k2) else: #print(\"Forward", "= i + n if ii < 0: if clamp:", "item = super(Dict, self).popitem() try: self._gvalidate() except: ec, ei, tb", "isinstance(oth, Struct) else oth) def __gt__(self, oth): return self._dict.__gt__(oth._dict if", "-> no need to check if _k was a valid", "rv = self.copy() rv |= y return rv def __ror__(self,", "st = self._get_schema_type() return (key if st is None else", "k in remvals: super(Dict, self).__delitem__(k) for k, v in oldvals.iteritems():", "= (self._dict[k] if wasset else None) self._dict.__setitem__(k, self._adapt_value(v, key=k)) try:", "binding if k == \"__class__\": super(Struct, self).__setattr__(k, v) else: k", "in super(Dict, self).itervalues(): yield TypeBase.TransferGlobalValidator(self, v) def values(self): return [x", "if hasattr(a0, \"keys\"): for k in a0.keys(): k = self._get_alias(k)", "def __len__(self): return self._dict.__len__() def __str__(self): return self._dict.__str__() def __repr__(self):", "return 0 else: raise IndexError(\"list index out of range\") else:", "= {} remvals = set() if len(args) == 1: a0", "of dict.clear def _clear(self): items = self._dict.items() self._dict.clear() try: self._gvalidate()", "e: print(\"das.types.Sequence.append: Failed to recover sequence data (%s)\" % e)", "ei, tb def __getitem__(self, k): k = self._get_alias(k) return TypeBase.TransferGlobalValidator(self,", "expected at most 1 arguments, got %d\" % len(args)) oldvals", "= sys.exc_info() try: super(Dict, self).update(items) except Exception, e: print(\"das.types.Dict.clear: Failed", "self._dict.clear() try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "inst._gvalidate() return inst def __init__(self, *args): super(TypeBase, self).__init__() self.__dict__[\"_schema_type\"] =", "ec, ei, tb = sys.exc_info() try: super(Sequence, self).__setslice__(oldlen, len(self), [])", "if isinstance(src, klass) and isinstance(dst, klass): dst._set_validate_globally_cb(src._gvalidate) return dst @classmethod", "None: n = das.get_schema_type_name(st) if n: msg = \"[%s] %s\"", "isinstance(oth, Struct) else oth) def __ge__(self, oth): return self._dict.__ge__(oth._dict if", "validation) st._validate_self(self) if hasattr(self, \"_is_global_validation_enabled\"): if not self._is_global_validation_enabled(): # Skip", "gvcb is not None: gvcb() if hasattr(self, \"_validate_globally\"): try: getattr(self,", "try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try: super(Dict,", "v except Exception, e: print(\"das.types.Struct.popitem: Failed to recover struct data", "self).__setslice__(ii, ii, oldvals) except Exception, e: print(\"das.types.Sequence.__setslice__: Failed to recover", "print(\"das.types.Sequence.__imul__: Failed to recover sequence data (%s)\" % e) raise", "e): ae = self._adapt_value(e, index=len(self)) if ae in self: return", "at most 1 arguments, got %d\" % len(args)) oldvals =", "% e) raise ec, ei, tb return retval # Override", "k, v = self._dict.popitem() try: self._gvalidate() except: ec, ei, tb", "current_version=None, required_version=None): fullmsg = \"ersion error\" if required_version: fullmsg +=", "Exception, e: print(\"das.types.Set.pop: Failed to recover set data (%s)\" %", "in self: if not k in oldvals: oldvals[k] = self[k]", "oldvals = super(Set, self).copy() super(Set, self).clear() try: self._gvalidate() except: ec,", "rv._set_schema_type(self._get_schema_type()) return rv def _adapt_value(self, value, key=None, index=None): return das.adapt_value(value,", "isinstance(oth, Struct) else oth) def __eq__(self, oth): return self._dict.__eq__(oth._dict if", "index=len(self)) if ae in self: return super(Set, self).add(ae) try: self._gvalidate()", "self.data def __exit__(self, type, value, traceback): if self.oldstate is not", "tb return self def __xor__(self, y): rv = self.copy() rv", "def append(self, y): n = len(self) super(Sequence, self).append(self._adapt_value(y, index=n)) try:", "aliasname is not None: # if isinstance(st[k], das.schematypes.Deprecated): # message", "in self: return super(Set, self).add(ae) try: self._gvalidate() except: ec, ei,", "(%s)\" % e) raise ec, ei, tb # Override of", "self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__ior__: Failed to recover set data", "def __add__(self, y): raise das.ValidationError(\"Expected a tuple of size %d,", "tb def __getitem__(self, k): return TypeBase.TransferGlobalValidator(self, super(Dict, self).__getitem__(self._adapt_key(k))) def __delitem__(self,", "v in a0: k = self._adapt_key(k) if k in self:", "not reach here # as dict is actually unchanged #", "return self.__mul__(n) def __iadd__(self, y): n = len(self) super(Sequence, self).__iadd__([self._adapt_value(x,", "return self._wrap(self) def add(self, e): ae = self._adapt_value(e, index=len(self)) if", "super(Sequence, self).index(self._adapt_value(y, index=0)) def insert(self, i, y): super(Sequence, self).insert(i, self._adapt_value(y,", "rv ^= y return rv def __cmp__(self, oth): # base", "required_version: fullmsg += \": %s required\" % required_version else: fullmsg", "self).extend(newvals) try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "sys.exc_info() try: super(Set, self).clear() super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__ior__:", "except Exception, e: print(\"das.types.Struct.__setitem__: Failed to recover struct data (%s)\"", "# Override of dict.update def _update(self, *args, **kwargs): if len(args)", "print(\"das.types.Sequence.__iadd__: Failed to recover sequence data (%s)\" % e) raise", "rv = super(Sequence, self).pop(*args) try: self._gvalidate() except: ec, ei, tb", "__init__(self, msg=None, current_version=None, required_version=None): fullmsg = \"ersion error\" if required_version:", "% e) raise ec, ei, tb def __iter__(self): for item", "print(\"das.types.Struct.__delitem__: Failed to recover struct data (%s)\" % e) raise", "# don't need to create forwarding attribute (set __getattr__) return", "i, y): super(Sequence, self).__setitem__(i, self._adapt_value(y, index=i)) self._gvalidate() def __getitem__(self, i):", "index=i) for i, x in enumerate(y)])) try: self._gvalidate() except: ec,", "dict.update def _update(self, *args, **kwargs): if len(args) > 1: raise", "Field %s is deprecated, use %s instead\" % (repr(k), repr(aliasname)))", "_, ei, tb = sys.exc_info() ei = das.ValidationError(\"Global Validation Failed", "self).__setslice__(i, j, newvals) try: self._gvalidate() except: ec, ei, tb =", "yield TypeBase.TransferGlobalValidator(self, v) def values(self): return [x for x in", "self.__dict__[\"_dict\"] = {} self._update(*args, **kwargs) def __getattr__(self, k): try: k", "sys.exc_info() try: super(Set, self).clear() super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__ixor__:", "here only if k was a valid key (otherwise __delitem__(k)", "if schema_type is None: schema_type = self._get_schema_type() if schema_type is", "def _get_alias(self, k): st = self._get_schema_type() if st is not", "tb return self def __mul__(self, n): rv = self[:] rv.__imul__(n)", "# return super(Dict, self).__contains__(_k) # except: # return False def", "tuple is immutable? super(Tuple, self).__init__() def __add__(self, y): raise das.ValidationError(\"Expected", "for k, v in kwargs.iteritems(): k = self._get_alias(k) self._check_reserved(k) self._dict[k]", "return dst @classmethod def ValidateGlobally(klass, inst): if isinstance(inst, klass): inst._gvalidate()", "data (%s)\" % e) raise ec, ei, tb def __getslice__(self,", "ei, tb def pop(self, *args): rv = super(Sequence, self).pop(*args) try:", "to enable dynamic function set binding if k == \"__class__\":", "dict.pop def _pop(self, k, *args): _k = k k =", "items(self): return [x for x in self.iteritems()] class Struct(TypeBase): def", "method, tuple is already created # Maybe because tuple is", "__init__(self, *args): # Funny, we need to declare *args here,", "nargs) if nargs == 2: args = (args[0], self._adapt_value(args[1], key=args[0]))", "\"__class__\": super(Struct, self).__setattr__(k, v) else: k = self._get_alias(k) self._check_reserved(k) wasset", "ei, tb def extend(self, y): newvals = [self._adapt_value(x, index=len(self)+i) for", "ec, ei, tb = sys.exc_info() try: self._dict.clear() self._dict.update(oldvals) except Exception,", "except: ec, ei, tb = sys.exc_info() try: super(Set, self).__ior__(oldvals) except", "if isinstance(oth, Struct) else oth) def __gt__(self, oth): return self._dict.__gt__(oth._dict", "extend(self, y): newvals = [self._adapt_value(x, index=len(self)+i) for i, x in", "except: ec, ei, tb = sys.exc_info() try: self._dict[k] = v", "def __ge__(self, oth): return self._dict.__ge__(oth._dict if isinstance(oth, Struct) else oth)", "TypeBase.TransferGlobalValidator(self, super(Sequence, self).__getitem__(i)) def __delitem__(self, i): ii = self._wrap_index(i, clamp=False)", "the method, tuple is already created # Maybe because tuple", "_get_validate_globally_cb(self): return self.__dict__[\"_validate_globally_cb\"] def _set_validate_globally_cb(self, cb): self.__dict__[\"_validate_globally_cb\"] = cb def", "try: # _k = self._adapt_key(k) # return super(Dict, self).__contains__(_k) #", "self).pop() try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "Exception, e: print(\"das.types.Sequence.remove: Failed to recover sequence data (%s)\" %", "\"_is_global_validation_enabled\"): if not self._is_global_validation_enabled(): # Skip global validaton return gvcb", "def __rand__(self, y): return self.__and__(y) def __isub__(self, y): oldvals =", "**kwargs) def _adapt_key(self, key): st = self._get_schema_type() return (key if", "return self._dict.__cmp__(oth._dict if isinstance(oth, Struct) else oth) def __eq__(self, oth):", "got %d\" % nargs) if nargs >= 1: self._check_reserved(args[0]) if", "not None: self.data._enable_global_validation(self.oldstate) self.oldstate = None # Always re-raise exception", "return rv def __ror__(self, y): return self.__or__(y) def __ixor__(self, y):", "try: super(Dict, self).update(items) except Exception, e: print(\"das.types.Dict.clear: Failed to recover", "__delattr__(self, k): k = self._get_alias(k) oldval = self._dict.get(k, None) self._dict.__delitem__(k)", "+ k if hasattr(self, k2): # don't need to create", "e: print(\"das.types.Dict.__setitem__: Failed to recover dict data (%s)\" % e)", "ei, tb def __delitem__(self, k): _k = k k =", "super(Sequence, self).insert(self._wrap_index(args[0], n=len(self)+1, clamp=False), rv) else: super(Sequence, self).append(rv) except Exception,", "ei, tb def append(self, y): n = len(self) super(Sequence, self).append(self._adapt_value(y,", "self).__init__() self.__dict__[\"_schema_type\"] = None self.__dict__[\"_validate_globally_cb\"] = None self.__dict__[\"_global_validation_enabled\"] = True", "dict data (%s)\" % e) raise ec, ei, tb return", "> 2: raise TypeError(\"setdefault expected at most 2 arguments, got", "try: self._gvalidate() except: ec, ei, tb = sys.exc_info() # Note:", "# Override of dict.copy def _copy(self): return self._wrap(self) # Override", "if wasset else None) super(Dict, self).__setitem__(k, self._adapt_value(v, key=k)) try: self._gvalidate()", "i, x in enumerate(y)]) try: self._gvalidate() except: ec, ei, tb", "schema_type=None): if schema_type is None: schema_type = self._get_schema_type() if schema_type", "super(Set, self).__iter__(): yield TypeBase.TransferGlobalValidator(self, item) def clear(self): oldvals = super(Set,", "super(Dict, self).update(items) except Exception, e: print(\"das.types.Dict.clear: Failed to recover dict", "self._adapt_value(args[1], key=args[0])) self._dict.setdefault(*args) # Override of dict.update def _update(self, *args,", "tb = sys.exc_info() try: self._dict.clear() self._dict.update(oldvals) except Exception, e: print(\"das.types.Struct.update:", "super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.clear: Failed to recover set", "of dict.setdefault def _setdefault(self, *args): nargs = len(args) if nargs", "super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__ixor__: Failed to recover set", "else None) self._dict[k] = self._adapt_value(v, key=k) try: self._gvalidate() except: ec,", "e: print(\"das.types.Struct.__setitem__: Failed to recover struct data (%s)\" % e)", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: super(Dict, self).__setitem__(_k,", "# the core of the method, tuple is already created", "elif hasattr(self._dict, k): k2 = \"_\" + k if hasattr(self,", "self).clear() super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__isub__: Failed to recover", "def _check_reserved(self, k): if hasattr(self.__class__, k): raise ReservedNameError(k) elif hasattr(self._dict,", "to recover dict data (%s)\" % e) raise ec, ei,", "except Exception, e: print(\"das.types.Dict.clear: Failed to recover dict data (%s)\"", "k): # try: # _k = self._adapt_key(k) # return super(Dict,", "self._dict[k] = oldval except Exception, e: print(\"das.types.Struct.pop: Failed to recover", "# but we need it for some other purpose if", "super(Sequence, self).__setslice__(oldlen, len(self), []) except Exception, e: print(\"das.types.Sequence.__imul__: Failed to", "self._adapt_key(k) if k in self: oldvals[k] = self[k] else: remvals.add(k)", "%s)\" % (k, \"has\" if hasattr(self._dict, k) else \"hasn't\")) return", "is None else das.adapt_value(key, schema_type=st.ktype)) def __setitem__(self, k, v): k", "return item def clear(self): items = super(Dict, self).items() super(Dict, self).clear()", "want to modify for # to enable dynamic function set", "= self._get_alias(k) self._check_reserved(k) self._dict[k] = self._adapt_value(a0[k], key=k) else: for k,", "y): rv = self[:] rv.__iadd__(y) return rv def __setitem__(self, i,", "self._dict[k] = self._adapt_value(v, key=k) for k, v in kwargs.iteritems(): k", "x in enumerate(y)]) try: self._gvalidate() except: ec, ei, tb =", "__cmp__(self, oth): # base set class doesn't implement __cmp__ #", "ec, ei, tb = sys.exc_info() try: super(Dict, self).__setitem__(item[0], item[1]) except", "print(\"das.types.Struct.__delattr__: Failed to recover struct data (%s)\" % e) raise", "k): # Look for an override method of the same", "e) raise ec, ei, tb def pop(self, k, *args): _k", "raise ec, ei, tb def append(self, y): n = len(self)", "self._dict[k] = self._adapt_value(v, key=k) try: self._gvalidate() except: ec, ei, tb", "self).__setslice__(ii, ii+len(newvals), oldvals) except Exception, e: print(\"das.types.Sequence.__setslice__: Failed to recover", "super(Set, self).clear() super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__ixor__: Failed to", "self: continue super(Set, self).add(item) added.add(item) try: self._gvalidate() except: ec, ei,", "i, y): super(Sequence, self).insert(i, self._adapt_value(y, index=i)) try: self._gvalidate() except: ec,", "raise ReservedNameError(k) elif hasattr(self._dict, k): k2 = \"_\" + k", "idx = self.index(y) item = self[idx] super(Sequence, self).remove(item) try: self._gvalidate()", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: for item", "raise ec, ei, tb return self def __add__(self, y): rv", "__init__(self, *args): super(TypeBase, self).__init__() self.__dict__[\"_schema_type\"] = None self.__dict__[\"_validate_globally_cb\"] = None", "(n, msg) das.print_once(msg) self.__dict__[k2] = getattr(self._dict, k) def ordered_keys(self): return", "oldval = (self[k] if wasset else None) super(Dict, self).__setitem__(k, self._adapt_value(v,", "self._adapt_value(a0[k], key=k) else: for k, v in a0: k =", "e) raise ec, ei, tb def pop(self, *args): rv =", "(%s)\" % e) raise ec, ei, tb def pop(self, *args):", "ae = self._adapt_value(e, index=len(self)) if ae in self: return super(Set,", "self).iteritems(): yield k, TypeBase.TransferGlobalValidator(self, v) def items(self): return [x for", "in kwargs.iteritems(): k = self._adapt_key(k) if k in self: if", "tb class Set(TypeBase, set): def __init__(self, args): TypeBase.__init__(self) set.__init__(self, args)", "prefixed by '_' in current class k2 = '_' +", "def __str__(self): return self._dict.__str__() def __repr__(self): return self._dict.__repr__() # Override", "for k in remvals: super(Dict, self).__delitem__(k) for k, v in", "except Exception, e: print(\"das.types.Struct.update: Failed to recover struct data (%s)\"", "of range\") else: return ii else: return i def __imul__(self,", "TypeError(\"setdefault expected at most 2 arguments, got %d\" % nargs)", "self).__setattr__(k, v) else: k = self._get_alias(k) self._check_reserved(k) wasset = (k", "sys.exc_info() try: self._dict.clear() self._dict.update(oldvals) except Exception, e: print(\"das.types.Struct.update: Failed to", "= self._adapt_key(k) wasset = (k in self) oldval = (self[k]", "TypeBase.TransferGlobalValidator(self, item) def clear(self): oldvals = super(Set, self).copy() super(Set, self).clear()", "not None: # if isinstance(st[k], das.schematypes.Deprecated): # message = (\"[das]", "= (self[k] if wasset else None) super(Dict, self).__setitem__(k, self._adapt_value(v, key=k))", "only if k was a valid key (otherwise __delitem__(k) would", "of size %d, got %d\" % (len(self), len(self) + len(y)))", "k, TypeBase.TransferGlobalValidator(self, v) def items(self): return [x for x in", "ei, tb def __getslice__(self, i, j): return self._wrap(super(Sequence, self).__getslice__(i, j))", "self: oldvals[k] = self[k] else: remvals.add(k) self[k] = self._adapt_value(v, key=k)", "data (%s)\" % e) raise ec, ei, tb def __delitem__(self,", "return self._dict.__iter__() def __len__(self): return self._dict.__len__() def __str__(self): return self._dict.__str__()", "in enumerate(y)] super(Sequence, self).__setslice__(i, j, newvals) try: self._gvalidate() except: ec,", "use '_%s(...)' to call it instead\" % (type(self).__name__, k, k,", "try: super(Sequence, self).__setslice__(n, len(self), []) except Exception, e: print(\"das.types.Sequence.__iadd__: Failed", "tb = sys.exc_info() try: self._dict[k] = v except Exception, e:", "fullmsg += \": %s required\" % required_version else: fullmsg +=", "= [self._adapt_value(x, index=i) for i, x in enumerate(y)] for item", "oth) def __eq__(self, oth): return self._dict.__eq__(oth._dict if isinstance(oth, Struct) else", "def pop(self): item = super(Set, self).pop() try: self._gvalidate() except: ec,", "ec, ei, tb return item def difference(self, rhs): return self.__sub__(rhs)", "k): raise ReservedNameError(k) else: msg = \"[das] %s's '%s(...)' method", "% e) raise ec, ei, tb def itervalues(self): for v", "def __getattr__(self, k): try: k = self._get_alias(k) return TypeBase.TransferGlobalValidator(self, self._dict[k])", "__enter__(self): try: self.oldstate = self.data._is_global_validation_enabled() self.data._enable_global_validation(False) except: pass return self.data", "ec, ei, tb = sys.exc_info() try: super(Sequence, self).pop(self._wrap_index(i, n=len(self)-1, clamp=True))", "Exception, e: print(\"das.types.Set.__ior__: Failed to recover set data (%s)\" %", "except Exception, e: print(\"das.types.Struct.popitem: Failed to recover struct data (%s)\"", "re-raise exception return False class TypeBase(object): @classmethod def TransferGlobalValidator(klass, src,", "clamp=False): if i < 0: if n is None: n", "TypeBase.TransferGlobalValidator(self, super(Dict, self).__getitem__(self._adapt_key(k))) def __delitem__(self, k): _k = self._adapt_key(k) _v", "= self._get_alias(k) oldval = self._dict.get(k, None) self._dict.__delitem__(k) try: self._gvalidate() except:", "class doesn't implement __cmp__ # but we need it for", "Exception, e: print(\"das.types.Sequence.append: Failed to recover sequence data (%s)\" %", "self.data = data self.oldstate = None def __enter__(self): try: self.oldstate", "we can reach here only if k was a valid", "other purpose if len(self.symmetric_difference(oth)) == 0: return 0 elif len(self)", "schema_type is not None: schema_type.validate(self) self._set_schema_type(schema_type) def _gvalidate(self): st =", "n is None: n = len(self) ii = i +", "except: ec, ei, tb = sys.exc_info() try: # if _k", "to declare *args here, but at the time we reach", "super(Sequence, self).__setslice__(ii, ii+len(newvals), oldvals) except Exception, e: print(\"das.types.Sequence.__setslice__: Failed to", "super(Sequence, self).insert(ii, item) except Exception, e: print(\"das.types.Sequence.__delitem__: Failed to recover", "dst @classmethod def ValidateGlobally(klass, inst): if isinstance(inst, klass): inst._gvalidate() return", "(args[0], self._adapt_value(args[1], key=args[0])) self._dict.setdefault(*args) # Override of dict.update def _update(self,", "in self, self._get_schema_type().ordered_keys()) def _itervalues(self): for v in self._dict.itervalues(): yield", "return self._dict.__eq__(oth._dict if isinstance(oth, Struct) else oth) def __ge__(self, oth):", "ei, tb return self def __or__(self, y): rv = self.copy()", "self).__getitem__(ii) super(Sequence, self).__delitem__(i) try: self._gvalidate() except: ec, ei, tb =", "super(ReservedNameError, self).__init__(\"'%s' is a reserved name\" % name) class VersionError(Exception):", "def __delitem__(self, i): ii = self._wrap_index(i, clamp=False) item = super(Sequence,", "% len(args)) oldvals = self._dict.copy() try: if len(args) == 1:", "ei, tb def _get_schema_type(self): return self.__dict__[\"_schema_type\"] def _set_schema_type(self, schema_type): self.__dict__[\"_schema_type\"]", "Override of dict.has_key def _has_key(self, k): return self._dict.has_key(self._get_alias(k)) # Override", "ei, tb = sys.exc_info() try: super(Sequence, self).pop() except Exception, e:", "TypeBase.TransferGlobalValidator(self, v) def _items(self): return [x for x in self.iteritems()]", "self).__getitem__(self._adapt_key(k))) def __delitem__(self, k): _k = self._adapt_key(k) _v = super(Dict,", "except KeyError: if hasattr(self._dict, k): # Look for an override", "key): st = self._get_schema_type() return (key if st is None", "x in enumerate(y)])) try: self._gvalidate() except: ec, ei, tb =", "== 2: args = (args[0], self._adapt_value(args[1], key=args[0])) self._dict.setdefault(*args) # Override", "self._adapt_value(v, key=k) try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "self.iteritems()] class Struct(TypeBase): def __init__(self, *args, **kwargs): TypeBase.__init__(self) self.__dict__[\"_dict\"] =", "__contains__(self, k): # try: # _k = self._adapt_key(k) # return", "e) raise ec, ei, tb return item def clear(self): items", "except: ec, ei, tb = sys.exc_info() try: self._dict.clear() self._dict.update(oldvals) except", "%s required\" % required_version else: fullmsg += \": no requirements\"", "return self.__dict__[\"_global_validation_enabled\"] def _enable_global_validation(self, on): self.__dict__[\"_global_validation_enabled\"] = on class Tuple(TypeBase,", "\"has\" if hasattr(self._dict, k) else \"hasn't\")) return self.__getattribute__(k) def __setattr__(self,", "except: ec, ei, tb = sys.exc_info() try: self._dict[k] = oldval", "try: self._dict.update(items) except Exception, e: print(\"das.types.Struct.clear: Failed to recover struct", "% (len(self), len(self) + len(y))) def __getitem__(self, i): return TypeBase.TransferGlobalValidator(self,", "k): try: k = self._get_alias(k) return TypeBase.TransferGlobalValidator(self, self._dict[k]) except KeyError:", "print(\"das.types.Set.pop: Failed to recover set data (%s)\" % e) raise", "'%s', use '_%s(...)' to call it instead\" % (type(self).__name__, k,", "e: print(\"das.types.Sequence.extend: Failed to recover sequence data (%s)\" % e)", "= super(Set, self).copy() super(Set, self).__iand__(set([self._adapt_value(x, index=i) for i, x in", "__getattr__) return if k2 in self.__dict__: if self.__dict__[k2] != getattr(self._dict,", "for i, x in enumerate(y)] super(Sequence, self).extend(newvals) try: self._gvalidate() except:", "struct data (%s)\" % e) raise ec, ei, tb def", "struct data (%s)\" % e) raise ec, ei, tb return", "rv def _adapt_value(self, value, key=None, index=None): return das.adapt_value(value, schema_type=self._get_schema_type(), key=key,", "try: super(Sequence, self).pop(self._wrap_index(i, n=len(self)-1, clamp=True)) except Exception, e: print(\"das.types.Sequence.insert: Failed", "ei, tb = sys.exc_info() try: self._dict[k] = v except Exception,", "self.__dict__: if self.__dict__[k2] != getattr(self._dict, k): raise ReservedNameError(k) else: msg", "tb # def __contains__(self, y): # try: # _v =", "0: return 0 elif len(self) <= len(oth): return -1 else:", "self.data._enable_global_validation(self.oldstate) self.oldstate = None # Always re-raise exception return False", "k in self: oldvals[k] = self[k] else: remvals.add(k) self[k] =", "__init__(self, *args): TypeBase.__init__(self) list.__init__(self, *args) def _wrap_index(self, i, n=None, clamp=False):", "tb return self def __or__(self, y): rv = self.copy() rv", "# Override of dict.has_key def _has_key(self, k): return self._dict.has_key(self._get_alias(k)) #", "except: ec, ei, tb = sys.exc_info() try: for item in", "return self.__sub__(rhs) def union(self, rhs): return self.__or__(rhs) def intersection(self, rhs):", "we need it for some other purpose if len(self.symmetric_difference(oth)) ==", "self._dict[k] = self._adapt_value(v, key=k) self._gvalidate() except: ec, ei, tb =", "e: print(\"das.types.Set.clear: Failed to recover set data (%s)\" % e)", "use\" % current_version else: fullmsg += \", no version info\"", "field '%s', use '_%s(...)' to call it instead\" % (type(self).__name__,", "= k k = self._get_alias(k) oldval = self._dict.get(k, None) self._dict.__delitem__(k)", "data (%s)\" % e) raise ec, ei, tb return retval", "item[1]) except Exception, e: print(\"das.types.Dict.popitem: Failed to recover dict data", "# def __contains__(self, y): # try: # _v = self._adapt_value(y,", "self.__class__.__name__, k2)) return getattr(self, k2) else: #print(\"Forward '%s' to dict", "k): st = self._get_schema_type() if st is not None and", "e) raise ec, ei, tb class Set(TypeBase, set): def __init__(self,", "yield TypeBase.TransferGlobalValidator(self, item) def __setslice__(self, i, j, y): oldvals =", "__iter__(self): for item in super(Set, self).__iter__(): yield TypeBase.TransferGlobalValidator(self, item) def", "e: print(\"das.types.Sequence.remove: Failed to recover sequence data (%s)\" % e)", "getattr(self._dict, k) def ordered_keys(self): return filter(lambda x: x in self,", "# try: # _k = self._adapt_key(k) # return super(Dict, self).__contains__(_k)", "ec, ei, tb # def __contains__(self, k): # try: #", "self._adapt_key(k) _v = super(Dict, self).pop(_k, *args) try: self._gvalidate() except: ec,", "tb def append(self, y): n = len(self) super(Sequence, self).append(self._adapt_value(y, index=n))", "**kwargs) def __getattr__(self, k): try: k = self._get_alias(k) return TypeBase.TransferGlobalValidator(self,", "TypeBase.__init__(self) dict.__init__(self, *args, **kwargs) def _adapt_key(self, key): st = self._get_schema_type()", "method conflicts with data field '%s', use '_%s(...)' to call", "def _values(self): return [x for x in self.itervalues()] def _iteritems(self):", "most 2 arguments, got %d\" % nargs) if nargs >=", "Exception, e: print(\"das.types.Struct.clear: Failed to recover struct data (%s)\" %", "% e) raise ec, ei, tb class Set(TypeBase, set): def", "self).__delitem__(k) for k, v in oldvals.iteritems(): super(Dict, self).__setitem__(k, v) except", "len(self), []) except Exception, e: print(\"das.types.Sequence.__iadd__: Failed to recover sequence", "def __getitem__(self, i): return TypeBase.TransferGlobalValidator(self, super(Tuple, self).__getitem__(i)) class Sequence(TypeBase, list):", "self._get_alias(k) oldval = self._dict.get(k, None) retval = self._dict.pop(k, *args) try:", "base set class doesn't implement __cmp__ # but we need", "self.oldstate = self.data._is_global_validation_enabled() self.data._enable_global_validation(False) except: pass return self.data def __exit__(self,", "ec, ei, tb = sys.exc_info() try: self._dict[k] = oldval except", "%d\" % nargs) if nargs >= 1: self._check_reserved(args[0]) if nargs", "ae in self: return super(Set, self).add(ae) try: self._gvalidate() except: ec,", "ec, ei, tb = sys.exc_info() try: if wasset: self._dict[k] =", "n: msg = \"[%s] %s\" % (n, msg) das.print_once(msg) self.__dict__[k2]", "self.__dict__[\"_schema_type\"] def _set_schema_type(self, schema_type): self.__dict__[\"_schema_type\"] = schema_type def _get_validate_globally_cb(self): return", "else: remvals.add(k) self[k] = self._adapt_value(v, key=k) elif len(args) > 1:", "Funny, we need to declare *args here, but at the", "self).__setslice__(oldlen, len(self), []) except Exception, e: print(\"das.types.Sequence.__imul__: Failed to recover", "ec, ei, tb = sys.exc_info() try: if wasset: super(Dict, self).__setitem__(k,", "_v) except Exception, e: print(\"das.types.Dict.popitem: Failed to recover dict data", "e: print(\"das.types.Sequence.__setslice__: Failed to recover sequence data (%s)\" % e)", "super(Dict, self).__setitem__(_k, _v) except Exception, e: print(\"das.types.Dict.popitem: Failed to recover", "getattr(self._dict, k) else: #raise AttributeError(\"'Struct' has no attribute '%s' (dict", "super(Sequence, self).__getitem__(ii) super(Sequence, self).__delitem__(i) try: self._gvalidate() except: ec, ei, tb", "sys.exc_info() try: super(Sequence, self).__setslice__(len(self) - len(newvals), len(self), []) except Exception,", "k == \"__class__\": super(Struct, self).__setattr__(k, v) else: k = self._get_alias(k)", "else: super(Sequence, self).append(rv) except Exception, e: print(\"das.types.Sequence.pop: Failed to recover", "ei, tb # Override of dict.copy def _copy(self): return self._wrap(self)", "try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try: super(Set,", "Exception, e: print(\"das.types.Struct.pop: Failed to recover struct data (%s)\" %", "super(Dict, self).clear() try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "for x in self.itervalues()] def _iteritems(self): for k, v in", "if n is None: n = len(self) ii = i", "except: ec, ei, tb = sys.exc_info() try: super(Set, self).clear() super(Set,", "need to create forwarding attribute (set __getattr__) return if k2", "sys.exc_info() try: super(Dict, self).__setitem__(item[0], item[1]) except Exception, e: print(\"das.types.Dict.popitem: Failed", "return self def __add__(self, y): rv = self[:] rv.__iadd__(y) return", "% e) raise ec, ei, tb # def __contains__(self, y):", "self).add(item) except Exception, e: print(\"das.types.Set.pop: Failed to recover set data", "dst): if isinstance(src, klass) and isinstance(dst, klass): dst._set_validate_globally_cb(src._gvalidate) return dst", "if wasset: super(Dict, self).__setitem__(k, oldval) else: del(self[k]) except Exception, e:", "self._dict.get(k, None) self._dict.__delitem__(k) try: self._gvalidate() except: ec, ei, tb =", "wasset: self._dict[k] = oldval else: del(self._dict[k]) except Exception, e: print(\"das.types.Struct.__setattr__:", "else oth) def __lt__(self, oth): return self._dict.__lt__(oth._dict if isinstance(oth, Struct)", "self._adapt_value(y, index=0) # return super(Sequence, self).__contains__(_v) # except: # return", "hasattr(a0, \"keys\"): for k in a0.keys(): k = self._get_alias(k) self._check_reserved(k)", "return rv def __cmp__(self, oth): # base set class doesn't", "tb def __contains__(self, k): return self._dict.__contains__(self._get_alias(k)) def __cmp__(self, oth): return", "isinstance(oth, Struct) else oth) def __lt__(self, oth): return self._dict.__lt__(oth._dict if", "TypeBase.TransferGlobalValidator(self, v) def _values(self): return [x for x in self.itervalues()]", "st = self._get_schema_type() if st is not None: # run", "TypeBase.TransferGlobalValidator(self, super(Tuple, self).__getitem__(i)) class Sequence(TypeBase, list): def __init__(self, *args): TypeBase.__init__(self)", "yield k, TypeBase.TransferGlobalValidator(self, v) def _items(self): return [x for x", "ec, ei, tb # def __contains__(self, y): # try: #", "def __ixor__(self, y): oldvals = super(Set, self).copy() super(Set, self).__ixor__(set([self._adapt_value(x, index=i)", "self._adapt_value(args[1], key=args[0])) super(Dict, self).setdefault(*args) def copy(self): return self._wrap(self) def update(self,", "raise ec, ei, tb return rv def remove(self, y): idx", "__init__(self, args): TypeBase.__init__(self) set.__init__(self, args) def __iand__(self, y): oldvals =", "\"[%s] %s\" % (n, msg) das.print_once(msg) self.__dict__[k2] = getattr(self._dict, k)", "# base set class doesn't implement __cmp__ # but we", "self._check_reserved(k) wasset = (k in self._dict) oldval = (self._dict[k] if", "das.print_once(msg) self.__dict__[k2] = getattr(self._dict, k) def ordered_keys(self): return filter(lambda x:", "Special case for __class__ member that we may want to", "ec, ei, tb return self def __sub__(self, y): rv =", "tb return item def difference(self, rhs): return self.__sub__(rhs) def union(self,", "% e) raise ec, ei, tb return item def clear(self):", "print(\"das.types.Dict.__setitem__: Failed to recover dict data (%s)\" % e) raise", "index=i+k) for k, x in enumerate(y)] super(Sequence, self).__setslice__(i, j, newvals)", "self).insert(i, self._adapt_value(y, index=i)) try: self._gvalidate() except: ec, ei, tb =", "def __iter__(self): for item in super(Set, self).__iter__(): yield TypeBase.TransferGlobalValidator(self, item)", "wasset: super(Dict, self).__setitem__(k, oldval) else: del(self[k]) except Exception, e: print(\"das.types.Dict.__setitem__:", "_set_validate_globally_cb(self, cb): self.__dict__[\"_validate_globally_cb\"] = cb def _is_global_validation_enabled(self): return self.__dict__[\"_global_validation_enabled\"] def", "sys.exc_info() try: super(Sequence, self).__setslice__(n, len(self), []) except Exception, e: print(\"das.types.Sequence.__iadd__:", "def __mul__(self, n): rv = self[:] rv.__imul__(n) return rv def", "rv |= y return rv def __ror__(self, y): return self.__or__(y)", "__imul__(self, n): oldlen = len(self) super(Sequence, self).__imul__(n) try: self._gvalidate() except:", "sys.exc_info() try: self._dict[k] = v except Exception, e: print(\"das.types.Struct.popitem: Failed", "0 else: raise IndexError(\"list index out of range\") else: return", "k = self._get_alias(k) oldval = self._dict.get(k, None) retval = self._dict.pop(k,", "= self.__class__(rhs if st is None else st._validate_self(rhs)) rv._set_schema_type(self._get_schema_type()) return", "super(Dict, self).__delitem__(k) for k, v in oldvals.iteritems(): super(Dict, self).__setitem__(k, v)", "v) def items(self): return [x for x in self.iteritems()] class", "try: super(Set, self).clear() super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__ior__: Failed", "super(Set, self).__iand__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)])) try: self._gvalidate()", "self._get_validate_globally_cb() if gvcb is not None: gvcb() if hasattr(self, \"_validate_globally\"):", "if required_version: fullmsg += \": %s required\" % required_version else:", "except: ec, ei, tb = sys.exc_info() try: super(Set, self).remove(ae) except", "tb def __getitem__(self, k): k = self._get_alias(k) return TypeBase.TransferGlobalValidator(self, self._dict.__getitem__(k))", "def _set_schema_type(self, schema_type): self.__dict__[\"_schema_type\"] = schema_type def _get_validate_globally_cb(self): return self.__dict__[\"_validate_globally_cb\"]", "cb): self.__dict__[\"_validate_globally_cb\"] = cb def _is_global_validation_enabled(self): return self.__dict__[\"_global_validation_enabled\"] def _enable_global_validation(self,", "Exception, e: print(\"das.types.Struct.update: Failed to recover struct data (%s)\" %", "__ior__(self, y): oldvals = super(Set, self).copy() super(Set, self).__ior__(set([self._adapt_value(x, index=i) for", "repr(aliasname))) # das.print_once(message) return aliasname return k def _check_reserved(self, k):", "sys.exc_info() try: super(Sequence, self).insert(ii, item) except Exception, e: print(\"das.types.Sequence.__delitem__: Failed", "ei, tb def itervalues(self): for v in super(Dict, self).itervalues(): yield", "tb def __getslice__(self, i, j): return self._wrap(super(Sequence, self).__getslice__(i, j)) def", "= data self.oldstate = None def __enter__(self): try: self.oldstate =", "len(self), []) except Exception, e: print(\"das.types.Sequence.extend: Failed to recover sequence", "in oldvals.iteritems(): super(Dict, self).__setitem__(k, v) except Exception, e: print(\"das.types.Dict.update: Failed", "schema_type=self._get_schema_type(), key=key, index=index) def _validate(self, schema_type=None): if schema_type is None:", "at most 1 arguments, got %d\" % len(args)) for k,", "(dict %s)\" % (k, \"has\" if hasattr(self._dict, k) else \"hasn't\"))", "st is not None: n = das.get_schema_type_name(st) if n: msg", "% e) raise ec, ei, tb return _v def popitem(self):", "**kwargs): oldvals = {} remvals = set() if len(args) ==", "_k i not defined but a default value is provided,", "% nargs) if nargs >= 1: self._check_reserved(args[0]) if nargs ==", "rv def __rxor__(self, y): rv = self.copy() rv ^= y", "was a valid key (otherwise __delitem__(k) would fail) try: self._dict[k]", "pop(self): item = super(Set, self).pop() try: self._gvalidate() except: ec, ei,", "else: return 1 def __iter__(self): for item in super(Set, self).__iter__():", "list.__init__(self, *args) def _wrap_index(self, i, n=None, clamp=False): if i <", "ec, ei, tb def append(self, y): n = len(self) super(Sequence,", "raise ec, ei, tb # def __contains__(self, k): # try:", "_k = self._adapt_key(k) _v = super(Dict, self).pop(_k, *args) try: self._gvalidate()", "return self def __or__(self, y): rv = self.copy() rv |=", "self).__iand__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)])) try: self._gvalidate() except:", "self def __add__(self, y): rv = self[:] rv.__iadd__(y) return rv", "for x in self.itervalues()] def iteritems(self): for k, v in", "= None self.__dict__[\"_global_validation_enabled\"] = True def _wrap(self, rhs): st =", "rv = self.copy() rv -= y return rv def __rsub__(self,", "_check_reserved(self, k): if hasattr(self.__class__, k): raise ReservedNameError(k) elif hasattr(self._dict, k):", "-1 else: return 1 def __iter__(self): for item in super(Set,", "-= y return rv def __rsub__(self, y): return self.__sub__(y) def", "= sys.exc_info() try: super(Sequence, self).__setslice__(n, len(self), []) except Exception, e:", "ei, tb def copy(self): return self._wrap(self) def add(self, e): ae", "= self._adapt_value(v, key=k) try: self._gvalidate() except: ec, ei, tb =", "super(Sequence, self).__setslice__(n, len(self), []) except Exception, e: print(\"das.types.Sequence.__iadd__: Failed to", "# das.print_once(message) return aliasname return k def _check_reserved(self, k): if", "schema_type=st.ktype)) def __setitem__(self, k, v): k = self._adapt_key(k) wasset =", "self._dict.__repr__() # Override of dict.has_key def _has_key(self, k): return self._dict.has_key(self._get_alias(k))", "raise ec, ei, tb return self def __mul__(self, n): rv", "Exception, e: print(\"das.types.Set.__ixor__: Failed to recover set data (%s)\" %", "for k, v in a0: k = self._get_alias(k) self._check_reserved(k) self._dict[k]", "range\") else: return ii else: return i def __imul__(self, n):", "try: super(Set, self).clear() super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__isub__: Failed", "self._adapt_value(v, key=k) elif len(args) > 1: raise Exception(\"update expected at", "if st is None else st._validate_self(rhs)) rv._set_schema_type(self._get_schema_type()) return rv def", "wasset: self._dict[k] = oldval else: del(self._dict[k]) except Exception, e: print(\"das.types.Struct.__setitem__:", "for k in a0.keys(): k = self._get_alias(k) self._check_reserved(k) self._dict[k] =", "ei, tb def __delattr__(self, k): k = self._get_alias(k) oldval =", "call it instead\" % (type(self).__name__, k, k, k) st =", "in self: continue super(Set, self).add(item) added.add(item) try: self._gvalidate() except: ec,", "%d, got %d\" % (len(self), len(self) + len(y))) def __getitem__(self,", "super(Set, self).add(item) added.add(item) try: self._gvalidate() except: ec, ei, tb =", "ei, tb return item def clear(self): items = super(Dict, self).items()", "function set binding if k == \"__class__\": super(Struct, self).__setattr__(k, v)", "set data (%s)\" % e) raise ec, ei, tb return", "= self._get_alias(k) self._check_reserved(k) self._dict[k] = self._adapt_value(v, key=k) for k, v", "print(\"das.types.Struct.__setattr__: Failed to recover struct data (%s)\" % e) raise", "i): return TypeBase.TransferGlobalValidator(self, super(Sequence, self).__getitem__(i)) def __delitem__(self, i): ii =", "import das import traceback class ReservedNameError(Exception): def __init__(self, name): super(ReservedNameError,", "\"ersion error\" if required_version: fullmsg += \": %s required\" %", "schema_type = self._get_schema_type() if schema_type is not None: schema_type.validate(self) self._set_schema_type(schema_type)", "% (k, k)) return getattr(self._dict, k) else: #raise AttributeError(\"'Struct' has", "of dict.pop def _pop(self, k, *args): _k = k k", "(self[k] if wasset else None) super(Dict, self).__setitem__(k, self._adapt_value(v, key=k)) try:", "# as dict is actually unchanged # -> no need", "def extend(self, y): newvals = [self._adapt_value(x, index=len(self)+i) for i, x", "sys.exc_info() try: super(Set, self).clear() super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__iand__:", "if self.__dict__[k2] != getattr(self._dict, k): raise ReservedNameError(k) else: msg =", "key=args[0])) super(Dict, self).setdefault(*args) def copy(self): return self._wrap(self) def update(self, *args,", "self._check_reserved(k) self._dict[k] = self._adapt_value(a0[k], key=k) else: for k, v in", "if self.oldstate is not None: self.data._enable_global_validation(self.oldstate) self.oldstate = None #", "= super(Dict, self).popitem() try: self._gvalidate() except: ec, ei, tb =", "if hasattr(self, k2): #print(\"Forward '%s' to %s class '%s'\" %", "n = das.get_schema_type_name(st) if n: msg = \"[%s] %s\" %", "try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try: self._dict.update(items)", "def __getslice__(self, i, j): return self._wrap(super(Sequence, self).__getslice__(i, j)) def __delslice__(self,", "for k, v in super(Dict, self).iteritems(): yield k, TypeBase.TransferGlobalValidator(self, v)", "None def __enter__(self): try: self.oldstate = self.data._is_global_validation_enabled() self.data._enable_global_validation(False) except: pass", "True def _wrap(self, rhs): st = self._get_schema_type() rv = self.__class__(rhs", "try: super(Sequence, self).insert(idx, item) except Exception, e: print(\"das.types.Sequence.remove: Failed to", "super(VersionError, self).__init__(fullmsg) class GlobalValidationDisabled(object): def __init__(self, data): super(GlobalValidationDisabled, self).__init__() self.data", "= sys.exc_info() try: self._dict[k] = oldval except Exception, e: print(\"das.types.Struct.pop:", "= None def __enter__(self): try: self.oldstate = self.data._is_global_validation_enabled() self.data._enable_global_validation(False) except:", "oldlen = len(self) super(Sequence, self).__imul__(n) try: self._gvalidate() except: ec, ei,", "should not reach here # as dict is actually unchanged", "super(Dict, self).__getitem__(self._adapt_key(k))) def __delitem__(self, k): _k = self._adapt_key(k) _v =", "clamp=True) super(Sequence, self).__setslice__(ii, ii+len(newvals), oldvals) except Exception, e: print(\"das.types.Sequence.__setslice__: Failed", "__getitem__(self, k): return TypeBase.TransferGlobalValidator(self, super(Dict, self).__getitem__(self._adapt_key(k))) def __delitem__(self, k): _k", "\", no version info\" if msg: fullmsg = msg +", "data (%s)\" % e) raise ec, ei, tb return _v", "j): oldvals = super(Sequence, self).__getslice__(i, j) super(Sequence, self).__delslice__(i, j) try:", "None) self._dict.__delitem__(k) try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "tb = sys.exc_info() try: super(Dict, self).__setitem__(_k, _v) except Exception, e:", "self).copy() super(Set, self).clear() try: self._gvalidate() except: ec, ei, tb =", "if hasattr(self, \"_validate_globally\"): try: getattr(self, \"_validate_globally\")() except: _, ei, tb", "_popitem(self): k, v = self._dict.popitem() try: self._gvalidate() except: ec, ei,", "msg = \"[%s] %s\" % (n, msg) das.print_once(msg) self.__dict__[k2] =", "except: ec, ei, tb = sys.exc_info() try: super(Sequence, self).pop(self._wrap_index(i, n=len(self)-1,", "item in super(Set, self).__iter__(): yield TypeBase.TransferGlobalValidator(self, item) def clear(self): oldvals", "def __delslice__(self, i, j): oldvals = super(Sequence, self).__getslice__(i, j) super(Sequence,", "# _k = self._adapt_key(k) # return super(Dict, self).__contains__(_k) # except:", "if k in self: if not k in oldvals: oldvals[k]", "2: raise TypeError(\"setdefault expected at most 2 arguments, got %d\"", "= sys.exc_info() try: self._dict[k] = v except Exception, e: print(\"das.types.Struct.popitem:", "self._wrap_index(i, clamp=False) item = super(Sequence, self).__getitem__(ii) super(Sequence, self).__delitem__(i) try: self._gvalidate()", "k was a valid key (otherwise __delitem__(k) would fail) try:", "return [x for x in self.iteritems()] class Struct(TypeBase): def __init__(self,", "except Exception, e: print(\"das.types.Dict.update: Failed to recover dict data (%s)\"", "is actually unchanged # -> no need to check if", "x in self.iteritems()] class Struct(TypeBase): def __init__(self, *args, **kwargs): TypeBase.__init__(self)", "if hasattr(self.__class__, k): raise ReservedNameError(k) elif hasattr(self._dict, k): k2 =", "super(Sequence, self).__getslice__(i, j) super(Sequence, self).__delslice__(i, j) try: self._gvalidate() except: ec,", "k = self._get_alias(k) oldval = self._dict.get(k, None) self._dict.__delitem__(k) try: self._gvalidate()", "self._adapt_value(v, key=k)) try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "except: # return False def setdefault(self, *args): nargs = len(args)", "% required_version else: fullmsg += \": no requirements\" if current_version:", "newvals = [self._adapt_value(x, index=len(self)+i) for i, x in enumerate(y)] super(Sequence,", "return self def __mul__(self, n): rv = self[:] rv.__imul__(n) return", "self._dict.update(oldvals) except Exception, e: print(\"das.types.Struct.update: Failed to recover struct data", "self._dict.__eq__(oth._dict if isinstance(oth, Struct) else oth) def __ge__(self, oth): return", "oth): # base set class doesn't implement __cmp__ # but", "= args[0] if hasattr(a0, \"keys\"): for k in a0.keys(): k", "super(Dict, self).setdefault(*args) def copy(self): return self._wrap(self) def update(self, *args, **kwargs):", "_setdefault(self, *args): nargs = len(args) if nargs > 2: raise", "def __enter__(self): try: self.oldstate = self.data._is_global_validation_enabled() self.data._enable_global_validation(False) except: pass return", "class '%s'\" % (k, self.__class__.__name__, k2)) return getattr(self, k2) else:", "try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try: ii", "was a valid key super(Dict, self).__setitem__(_k, _v) except Exception, e:", "(self._dict[k] if wasset else None) self._dict[k] = self._adapt_value(v, key=k) try:", "self[k] else: remvals.add(k) self[k] = self._adapt_value(a0[k], key=k) else: for k,", "self, self._get_schema_type().ordered_keys()) def _itervalues(self): for v in self._dict.itervalues(): yield TypeBase.TransferGlobalValidator(self,", "= len(self) super(Sequence, self).__imul__(n) try: self._gvalidate() except: ec, ei, tb", "self).index(self._adapt_value(y, index=0)) def insert(self, i, y): super(Sequence, self).insert(i, self._adapt_value(y, index=i))", "super(Sequence, self).remove(item) try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "y): # try: # _v = self._adapt_value(y, index=0) # return", "enumerate(y)]) try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "data (%s)\" % e) raise ec, ei, tb def __contains__(self,", "oldvals[k] = self[k] else: remvals.add(k) self[k] = self._adapt_value(v, key=k) elif", "create forwarding attribute (set __getattr__) return if k2 in self.__dict__:", "= self._dict.pop(k, *args) try: self._gvalidate() except: ec, ei, tb =", "return rv def __rxor__(self, y): rv = self.copy() rv ^=", "print(\"das.types.Sequence.extend: Failed to recover sequence data (%s)\" % e) raise", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: super(Sequence, self).insert(ii,", "item def clear(self): items = super(Dict, self).items() super(Dict, self).clear() try:", "return TypeBase.TransferGlobalValidator(self, super(Tuple, self).__getitem__(i)) class Sequence(TypeBase, list): def __init__(self, *args):", "st is not None: # run self validation first (container", "return [x for x in self.itervalues()] def iteritems(self): for k,", "= self._adapt_key(k) _v = super(Dict, self).__getitem__(_k) super(Dict, self).__delitem__(_k) try: self._gvalidate()", "check if _k was a valid key super(Dict, self).__setitem__(_k, _v)", "try: self._dict[k] = oldval except Exception, e: print(\"das.types.Struct.__delattr__: Failed to", "+= \", no version info\" if msg: fullmsg = msg", "False class TypeBase(object): @classmethod def TransferGlobalValidator(klass, src, dst): if isinstance(src,", "__isub__(self, y): oldvals = super(Set, self).copy() super(Set, self).__isub__(set([self._adapt_value(x, index=i) for", "__rsub__(self, y): return self.__sub__(y) def __ior__(self, y): oldvals = super(Set,", "% e) raise ec, ei, tb # Override of dict.clear", "self._dict.update(items) except Exception, e: print(\"das.types.Struct.clear: Failed to recover struct data", "self._dict[k] = v except Exception, e: print(\"das.types.Struct.popitem: Failed to recover", "already created # Maybe because tuple is immutable? super(Tuple, self).__init__()", "self._get_schema_type().ordered_keys()) def _itervalues(self): for v in self._dict.itervalues(): yield TypeBase.TransferGlobalValidator(self, v)", "das.ValidationError(\"Expected a tuple of size %d, got %d\" % (len(self),", "tb def pop(self, k, *args): _k = self._adapt_key(k) _v =", "a valid key (otherwise __delitem__(k) would fail) try: self._dict[k] =", "k): _k = self._adapt_key(k) _v = super(Dict, self).__getitem__(_k) super(Dict, self).__delitem__(_k)", "self._get_alias(k) return TypeBase.TransferGlobalValidator(self, self._dict[k]) except KeyError: if hasattr(self._dict, k): #", "def __getitem__(self, k): k = self._get_alias(k) return TypeBase.TransferGlobalValidator(self, self._dict.__getitem__(k)) def", "i + n if ii < 0: if clamp: return", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: # if", "def __delitem__(self, k): _k = self._adapt_key(k) _v = super(Dict, self).__getitem__(_k)", "n=None, clamp=False): if i < 0: if n is None:", "e: print(\"das.types.Sequence.pop: Failed to recover sequence data (%s)\" % e)", "data (%s)\" % e) raise ec, ei, tb return rv", "return self.__xor__(rhs) class Dict(TypeBase, dict): def __init__(self, *args, **kwargs): TypeBase.__init__(self)", "e) raise ec, ei, tb return self def __xor__(self, y):", "^= y return rv def __cmp__(self, oth): # base set", "if aliasname is not None: # if isinstance(st[k], das.schematypes.Deprecated): #", "ec, ei, tb = sys.exc_info() try: super(Dict, self).__setitem__(_k, _v) except", "*args): # Funny, we need to declare *args here, but", "class '%s'\" % (k, k)) return getattr(self._dict, k) else: #raise", "e: print(\"das.types.Sequence.__imul__: Failed to recover sequence data (%s)\" % e)", "for k, x in enumerate(y)] super(Sequence, self).__setslice__(i, j, newvals) try:", "Struct) else oth) def __iter__(self): return self._dict.__iter__() def __len__(self): return", "data (%s)\" % e) raise ec, ei, tb def __delattr__(self,", "return [x for x in self.itervalues()] def _iteritems(self): for k,", "= super(Sequence, self).__getitem__(ii) super(Sequence, self).__delitem__(i) try: self._gvalidate() except: ec, ei,", "self).__setslice__(len(self) - len(newvals), len(self), []) except Exception, e: print(\"das.types.Sequence.extend: Failed", "Exception, e: print(\"das.types.Struct.popitem: Failed to recover struct data (%s)\" %", "Override of dict.copy def _copy(self): return self._wrap(self) # Override of", "raise ec, ei, tb def __delattr__(self, k): k = self._get_alias(k)", "self).__getslice__(i, j) newvals = [self._adapt_value(x, index=i+k) for k, x in", "% e) raise ec, ei, tb def update(self, *args): added", "y in args: lst = [self._adapt_value(x, index=i) for i, x", "e) raise ec, ei, tb def pop(self): item = super(Set,", "super(Dict, self).__delitem__(_k) try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "def __init__(self, *args): # Funny, we need to declare *args", "self).items() super(Dict, self).clear() try: self._gvalidate() except: ec, ei, tb =", "i, x in enumerate(y)])) try: self._gvalidate() except: ec, ei, tb", "Always re-raise exception return False class TypeBase(object): @classmethod def TransferGlobalValidator(klass,", "ei, tb = sys.exc_info() try: ii = self._wrap_index(i, clamp=True) super(Sequence,", "if len(args) > 1: raise Exception(\"update expected at most 1", "False def setdefault(self, *args): nargs = len(args) if nargs >", "= super(Dict, self).__getitem__(_k) super(Dict, self).__delitem__(_k) try: self._gvalidate() except: ec, ei,", "= sys.exc_info() try: # if _k i not defined but", "raise ec, ei, tb def __getslice__(self, i, j): return self._wrap(super(Sequence,", "except Exception, e: print(\"das.types.Struct.__setattr__: Failed to recover struct data (%s)\"", "TypeBase.TransferGlobalValidator(self, self._dict.__getitem__(k)) def __setitem__(self, k, v): k = self._get_alias(k) self._check_reserved(k)", "# Override of dict.pop def _pop(self, k, *args): _k =", "2: args = (args[0], self._adapt_value(args[1], key=args[0])) super(Dict, self).setdefault(*args) def copy(self):", "**kwargs): TypeBase.__init__(self) self.__dict__[\"_dict\"] = {} self._update(*args, **kwargs) def __getattr__(self, k):", "k def _check_reserved(self, k): if hasattr(self.__class__, k): raise ReservedNameError(k) elif", "= self._adapt_key(k) _v = super(Dict, self).pop(_k, *args) try: self._gvalidate() except:", "iteritems(self): for k, v in super(Dict, self).iteritems(): yield k, TypeBase.TransferGlobalValidator(self,", "*args, **kwargs): if len(args) > 1: raise Exception(\"update expected at", "def _get_validate_globally_cb(self): return self.__dict__[\"_validate_globally_cb\"] def _set_validate_globally_cb(self, cb): self.__dict__[\"_validate_globally_cb\"] = cb", "sys.exc_info() try: super(Sequence, self).insert(idx, item) except Exception, e: print(\"das.types.Sequence.remove: Failed", "\"V\" + fullmsg super(VersionError, self).__init__(fullmsg) class GlobalValidationDisabled(object): def __init__(self, data):", "= super(Sequence, self).pop(*args) try: self._gvalidate() except: ec, ei, tb =", "self def __xor__(self, y): rv = self.copy() rv ^= y", "values(self): return [x for x in self.itervalues()] def iteritems(self): for", "(%s)\" % e) raise ec, ei, tb return self def", "k if hasattr(self, k2): #print(\"Forward '%s' to %s class '%s'\"", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: super(Sequence, self).pop(self._wrap_index(i,", "super(Sequence, self).__delslice__(i, j) try: self._gvalidate() except: ec, ei, tb =", "self.__dict__[\"_validate_globally_cb\"] = cb def _is_global_validation_enabled(self): return self.__dict__[\"_global_validation_enabled\"] def _enable_global_validation(self, on):", "try: ii = self._wrap_index(i, clamp=True) super(Sequence, self).__setslice__(ii, ii, oldvals) except", "sys.exc_info() try: super(Dict, self).update(items) except Exception, e: print(\"das.types.Dict.clear: Failed to", "self._dict.pop(k, *args) try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "e) raise ec, ei, tb return self def __add__(self, y):", "# if _k i not defined but a default value", "y): oldvals = super(Set, self).copy() super(Set, self).__ior__(set([self._adapt_value(x, index=i) for i,", "1 arguments, got %d\" % len(args)) oldvals = self._dict.copy() try:", "self.__dict__[\"_global_validation_enabled\"] def _enable_global_validation(self, on): self.__dict__[\"_global_validation_enabled\"] = on class Tuple(TypeBase, tuple):", "return if k2 in self.__dict__: if self.__dict__[k2] != getattr(self._dict, k):", "recover struct data (%s)\" % e) raise ec, ei, tb", "# Override of dict.setdefault def _setdefault(self, *args): nargs = len(args)", "super(Dict, self).pop(_k, *args) try: self._gvalidate() except: ec, ei, tb =", "super(Sequence, self).pop(self._wrap_index(i, n=len(self)-1, clamp=True)) except Exception, e: print(\"das.types.Sequence.insert: Failed to", "self._dict) oldval = (self._dict[k] if wasset else None) self._dict.__setitem__(k, self._adapt_value(v,", "ei, tb = sys.exc_info() try: self._dict.update(items) except Exception, e: print(\"das.types.Struct.clear:", "_copy(self): return self._wrap(self) # Override of dict.setdefault def _setdefault(self, *args):", "e: print(\"das.types.Struct.clear: Failed to recover struct data (%s)\" % e)", "sys.exc_info() try: super(Sequence, self).pop() except Exception, e: print(\"das.types.Sequence.append: Failed to", "for i, x in enumerate(y)])) try: self._gvalidate() except: ec, ei,", "len(self) + len(y))) def __getitem__(self, i): return TypeBase.TransferGlobalValidator(self, super(Tuple, self).__getitem__(i))", "override method of the same name prefixed by '_' in", "index=i) for i, x in enumerate(y)] for item in lst:", "self).add(ae) try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "self._dict.copy() try: if len(args) == 1: a0 = args[0] if", "def _adapt_key(self, key): st = self._get_schema_type() return (key if st", "if i < 0: if n is None: n =", "ec, ei, tb = sys.exc_info() # Note: we can reach", "ec, ei, tb = sys.exc_info() try: super(Sequence, self).pop() except Exception,", "*args, **kwargs) def _adapt_key(self, key): st = self._get_schema_type() return (key", "except Exception, e: print(\"das.types.Set.__ixor__: Failed to recover set data (%s)\"", "v\" + fullmsg else: fullmsg = \"V\" + fullmsg super(VersionError,", "if nargs == 2: args = (args[0], self._adapt_value(args[1], key=args[0])) self._dict.setdefault(*args)", "% name) class VersionError(Exception): def __init__(self, msg=None, current_version=None, required_version=None): fullmsg", "except: ec, ei, tb = sys.exc_info() try: ii = self._wrap_index(i,", "self).__ior__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)])) try: self._gvalidate() except:", "except: ec, ei, tb = sys.exc_info() try: super(Sequence, self).__setslice__(oldlen, len(self),", "x in self.itervalues()] def iteritems(self): for k, v in super(Dict,", "except: ec, ei, tb = sys.exc_info() try: if args: super(Sequence,", "tb = sys.exc_info() try: self._dict[k] = oldval except Exception, e:", "self: oldvals[k] = self[k] else: remvals.add(k) self[k] = self._adapt_value(a0[k], key=k)", "if isinstance(oth, Struct) else oth) def __ge__(self, oth): return self._dict.__ge__(oth._dict", "= self._adapt_value(v, key=k) for k, v in kwargs.iteritems(): k =", "# return False def index(self, y): return super(Sequence, self).index(self._adapt_value(y, index=0))", "self._adapt_value(y, index=i)) try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "fullmsg += \": no requirements\" if current_version: fullmsg += \",", "tuple is already created # Maybe because tuple is immutable?", "sys.exc_info() try: self._dict[k] = oldval except Exception, e: print(\"das.types.Struct.pop: Failed", "name\" % name) class VersionError(Exception): def __init__(self, msg=None, current_version=None, required_version=None):", "= self._adapt_value(y, index=0) # return super(Sequence, self).__contains__(_v) # except: #", "else: del(self[k]) except Exception, e: print(\"das.types.Dict.__setitem__: Failed to recover dict", "raise Exception(\"update expected at most 1 arguments, got %d\" %", "class TypeBase(object): @classmethod def TransferGlobalValidator(klass, src, dst): if isinstance(src, klass)", "n = len(self) ii = i + n if ii", "i, j, y): oldvals = super(Sequence, self).__getslice__(i, j) newvals =", "difference(self, rhs): return self.__sub__(rhs) def union(self, rhs): return self.__or__(rhs) def", "% e) raise ec, ei, tb return self def __add__(self,", "k): raise ReservedNameError(k) elif hasattr(self._dict, k): k2 = \"_\" +", "is not None: schema_type.validate(self) self._set_schema_type(schema_type) def _gvalidate(self): st = self._get_schema_type()", "= [self._adapt_value(x, index=i+k) for k, x in enumerate(y)] super(Sequence, self).__setslice__(i,", "got %d\" % nargs) if nargs == 2: args =", "sys.exc_info() try: ii = self._wrap_index(i, clamp=True) super(Sequence, self).__setslice__(ii, ii+len(newvals), oldvals)", "k, v): k = self._get_alias(k) self._check_reserved(k) wasset = (k in", "a0.keys(): k = self._adapt_key(k) if k in self: oldvals[k] =", "if st is not None: # run self validation first", "self).__init__(\"'%s' is a reserved name\" % name) class VersionError(Exception): def", "st = self._get_schema_type() rv = self.__class__(rhs if st is None", "e) raise ec, ei, tb return self def __sub__(self, y):", "x in enumerate(y)] for item in lst: if item in", "= self[k] else: remvals.add(k) self[k] = self._adapt_value(v, key=k) try: self._gvalidate()", "if isinstance(oth, Struct) else oth) def __lt__(self, oth): return self._dict.__lt__(oth._dict", "(%s)\" % e) raise ec, ei, tb def pop(self): item", "raise ReservedNameError(k) else: msg = \"[das] %s's '%s(...)' method conflicts", "wasset else None) super(Dict, self).__setitem__(k, self._adapt_value(v, key=k)) try: self._gvalidate() except:", "ec, ei, tb return self def __add__(self, y): rv =", "super(Sequence, self).__setslice__(len(self) - len(newvals), len(self), []) except Exception, e: print(\"das.types.Sequence.extend:", "k): return TypeBase.TransferGlobalValidator(self, super(Dict, self).__getitem__(self._adapt_key(k))) def __delitem__(self, k): _k =", "ei, tb return item def difference(self, rhs): return self.__sub__(rhs) def", "super(Sequence, self).pop() except Exception, e: print(\"das.types.Sequence.append: Failed to recover sequence", "if _k was a valid key super(Dict, self).__setitem__(_k, _v) except", "pop(self, *args): rv = super(Sequence, self).pop(*args) try: self._gvalidate() except: ec,", "raise ec, ei, tb return self def __sub__(self, y): rv", "return self.__or__(y) def __ixor__(self, y): oldvals = super(Set, self).copy() super(Set,", "rv = self[:] rv.__iadd__(y) return rv def __setitem__(self, i, y):", "self).__iter__(): yield TypeBase.TransferGlobalValidator(self, item) def clear(self): oldvals = super(Set, self).copy()", "self._wrap(self) def update(self, *args, **kwargs): oldvals = {} remvals =", "value is provided, we should not reach here # as", "it instead\" % (type(self).__name__, k, k, k) st = self._get_schema_type()", "k = self._adapt_key(k) wasset = (k in self) oldval =", "# _v = self._adapt_value(y, index=0) # return super(Sequence, self).__contains__(_v) #", "args: lst = [self._adapt_value(x, index=i) for i, x in enumerate(y)]", "except: ec, ei, tb = sys.exc_info() try: if wasset: self._dict[k]", "k)) return getattr(self._dict, k) else: #raise AttributeError(\"'Struct' has no attribute", "= self._get_schema_type() rv = self.__class__(rhs if st is None else", "dict.setdefault def _setdefault(self, *args): nargs = len(args) if nargs >", "# Special case for __class__ member that we may want", "def __rsub__(self, y): return self.__sub__(y) def __ior__(self, y): oldvals =", "self.__dict__[k2] != getattr(self._dict, k): raise ReservedNameError(k) else: msg = \"[das]", "def _set_validate_globally_cb(self, cb): self.__dict__[\"_validate_globally_cb\"] = cb def _is_global_validation_enabled(self): return self.__dict__[\"_global_validation_enabled\"]", "msg) das.print_once(msg) self.__dict__[k2] = getattr(self._dict, k) def ordered_keys(self): return filter(lambda", "if st is not None and st.has_key(k): aliasname = das.schematypes.Alias.Name(st[k])", "1 arguments, got %d\" % len(args)) for k, v in", "v = self._dict.popitem() try: self._gvalidate() except: ec, ei, tb =", "print(\"das.types.Set.__ior__: Failed to recover set data (%s)\" % e) raise", "def __cmp__(self, oth): # base set class doesn't implement __cmp__", "tb def extend(self, y): newvals = [self._adapt_value(x, index=len(self)+i) for i,", "% (k, \"has\" if hasattr(self._dict, k) else \"hasn't\")) return self.__getattribute__(k)", "else: del(self._dict[k]) except Exception, e: print(\"das.types.Struct.__setitem__: Failed to recover struct", "tb = sys.exc_info() try: super(Dict, self).__setitem__(item[0], item[1]) except Exception, e:", "ei, tb = sys.exc_info() try: if args: super(Sequence, self).insert(self._wrap_index(args[0], n=len(self)+1,", "set class doesn't implement __cmp__ # but we need it", "(%s)\" % e) raise ec, ei, tb return _v def", "def clear(self): items = super(Dict, self).items() super(Dict, self).clear() try: self._gvalidate()", "None else das.adapt_value(key, schema_type=st.ktype)) def __setitem__(self, k, v): k =", "self._dict.clear() self._dict.update(oldvals) except Exception, e: print(\"das.types.Struct.update: Failed to recover struct", "oth): return self._dict.__cmp__(oth._dict if isinstance(oth, Struct) else oth) def __eq__(self,", "except Exception, e: print(\"das.types.Sequence.__imul__: Failed to recover sequence data (%s)\"", "ec, ei, tb def __getitem__(self, k): return TypeBase.TransferGlobalValidator(self, super(Dict, self).__getitem__(self._adapt_key(k)))", "% e) raise ec, ei, tb def pop(self): item =", "= k k = self._get_alias(k) oldval = self._dict.get(k, None) retval", "self: return super(Set, self).add(ae) try: self._gvalidate() except: ec, ei, tb", "# except: # return False def index(self, y): return super(Sequence,", "wasset = (k in self._dict) oldval = (self._dict[k] if wasset", "ei.__class__, ei, tb def _get_schema_type(self): return self.__dict__[\"_schema_type\"] def _set_schema_type(self, schema_type):", "= set() if len(args) == 1: a0 = args[0] if", "index=i)) try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "ec, ei, tb = sys.exc_info() try: # if _k i", "[self._adapt_value(x, index=len(self)+i) for i, x in enumerate(y)] super(Sequence, self).extend(newvals) try:", "oldval = (self._dict[k] if wasset else None) self._dict.__setitem__(k, self._adapt_value(v, key=k))", "if nargs >= 1: self._check_reserved(args[0]) if nargs == 2: args", "super(Set, self).clear() super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__isub__: Failed to", "print(\"das.types.Sequence.insert: Failed to recover sequence data (%s)\" % e) raise", "k, k, k) st = self._get_schema_type() if st is not", "self.__dict__[\"_global_validation_enabled\"] = on class Tuple(TypeBase, tuple): def __init__(self, *args): #", "ei, tb = sys.exc_info() try: super(Dict, self).__setitem__(_k, _v) except Exception,", "return 1 def __iter__(self): for item in super(Set, self).__iter__(): yield", "self.__dict__[\"_global_validation_enabled\"] = True def _wrap(self, rhs): st = self._get_schema_type() rv", "rhs): return self.__and__(rhs) def symmetric_difference(self, rhs): return self.__xor__(rhs) class Dict(TypeBase,", "Struct) else oth) def __gt__(self, oth): return self._dict.__gt__(oth._dict if isinstance(oth,", "ec, ei, tb = sys.exc_info() try: super(Sequence, self).__setslice__(n, len(self), [])", "k2 in self.__dict__: if self.__dict__[k2] != getattr(self._dict, k): raise ReservedNameError(k)", "is a reserved name\" % name) class VersionError(Exception): def __init__(self,", "tb def _get_alias(self, k): st = self._get_schema_type() if st is", "tb = sys.exc_info() try: if wasset: super(Dict, self).__setitem__(k, oldval) else:", "add(self, e): ae = self._adapt_value(e, index=len(self)) if ae in self:", "super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__isub__: Failed to recover set", "= self._get_schema_type() if schema_type is not None: schema_type.validate(self) self._set_schema_type(schema_type) def", "def __cmp__(self, oth): return self._dict.__cmp__(oth._dict if isinstance(oth, Struct) else oth)", "if isinstance(oth, Struct) else oth) def __le__(self, oth): return self._dict.__le__(oth._dict", "def _copy(self): return self._wrap(self) # Override of dict.setdefault def _setdefault(self,", "ec, ei, tb = sys.exc_info() try: ii = self._wrap_index(i, clamp=True)", "len(self) super(Sequence, self).__imul__(n) try: self._gvalidate() except: ec, ei, tb =", "y): rv = self.copy() rv &= y return rv def", "getattr(self, \"_validate_globally\")() except: _, ei, tb = sys.exc_info() ei =", "ei, tb = sys.exc_info() try: super(Dict, self).update(items) except Exception, e:", "= self[k] else: remvals.add(k) self[k] = self._adapt_value(v, key=k) elif len(args)", "e) raise ec, ei, tb return self def __and__(self, y):", "nargs == 2: args = (args[0], self._adapt_value(args[1], key=args[0])) super(Dict, self).setdefault(*args)", "def clear(self): oldvals = super(Set, self).copy() super(Set, self).clear() try: self._gvalidate()", "data (%s)\" % e) raise ec, ei, tb # Override", "_k = self._adapt_key(k) _v = super(Dict, self).__getitem__(_k) super(Dict, self).__delitem__(_k) try:", "= len(args) if nargs > 2: raise TypeError(\"setdefault expected at", "self._wrap(self) # Override of dict.setdefault def _setdefault(self, *args): nargs =", "fullmsg = msg + \" v\" + fullmsg else: fullmsg", "e: print(\"das.types.Set.__isub__: Failed to recover set data (%s)\" % e)", "try: super(Sequence, self).__setslice__(len(self) - len(newvals), len(self), []) except Exception, e:", "default value is provided, we should not reach here #", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: super(Sequence, self).__setslice__(n,", "by '_' in current class k2 = '_' + k", "= sys.exc_info() try: self._dict.update(items) except Exception, e: print(\"das.types.Struct.clear: Failed to", "return self._wrap(self) # Override of dict.setdefault def _setdefault(self, *args): nargs", "else: #raise AttributeError(\"'Struct' has no attribute '%s' (dict %s)\" %", "j) super(Sequence, self).__delslice__(i, j) try: self._gvalidate() except: ec, ei, tb", "j) newvals = [self._adapt_value(x, index=i+k) for k, x in enumerate(y)]", "raise ec, ei, tb def __contains__(self, k): return self._dict.__contains__(self._get_alias(k)) def", "__iter__(self): for item in super(Sequence, self).__iter__(): yield TypeBase.TransferGlobalValidator(self, item) def", "i not defined but a default value is provided, we", "print(\"das.types.Struct.popitem: Failed to recover struct data (%s)\" % e) raise", "TypeBase.__init__(self) list.__init__(self, *args) def _wrap_index(self, i, n=None, clamp=False): if i", "return self.__getattribute__(k) def __setattr__(self, k, v): # Special case for", "self._get_alias(k) self._check_reserved(k) self._dict[k] = self._adapt_value(a0[k], key=k) else: for k, v", "self._get_schema_type() if schema_type is not None: schema_type.validate(self) self._set_schema_type(schema_type) def _gvalidate(self):", "return _v def popitem(self): item = super(Dict, self).popitem() try: self._gvalidate()", "rv -= y return rv def __rsub__(self, y): return self.__sub__(y)", "self._get_alias(k) self._check_reserved(k) self._dict[k] = self._adapt_value(v, key=k) for k, v in", "not None: n = das.get_schema_type_name(st) if n: msg = \"[%s]", "= schema_type def _get_validate_globally_cb(self): return self.__dict__[\"_validate_globally_cb\"] def _set_validate_globally_cb(self, cb): self.__dict__[\"_validate_globally_cb\"]", "else: remvals.add(k) self[k] = self._adapt_value(a0[k], key=k) else: for k, v", "index=index) def _validate(self, schema_type=None): if schema_type is None: schema_type =", "__rand__(self, y): return self.__and__(y) def __isub__(self, y): oldvals = super(Set,", "*args, **kwargs): oldvals = {} remvals = set() if len(args)", "e) raise ec, ei, tb def __getitem__(self, k): return TypeBase.TransferGlobalValidator(self,", "super(Set, self).remove(item) except Exception, e: print(\"das.types.Set.update: Failed to recover set", "index=None): return das.adapt_value(value, schema_type=self._get_schema_type(), key=key, index=index) def _validate(self, schema_type=None): if", "except Exception, e: print(\"das.types.Struct.__delattr__: Failed to recover struct data (%s)\"", "self.data._is_global_validation_enabled() self.data._enable_global_validation(False) except: pass return self.data def __exit__(self, type, value,", "oldvals: oldvals[k] = self[k] else: remvals.add(k) self[k] = self._adapt_value(v, key=k)", "e) raise ec, ei, tb # def __contains__(self, y): #", "self._adapt_key(k) wasset = (k in self) oldval = (self[k] if", "def __lt__(self, oth): return self._dict.__lt__(oth._dict if isinstance(oth, Struct) else oth)", "self._dict.iteritems(): yield k, TypeBase.TransferGlobalValidator(self, v) def _items(self): return [x for", "else oth) def __le__(self, oth): return self._dict.__le__(oth._dict if isinstance(oth, Struct)", "except: ec, ei, tb = sys.exc_info() try: self._dict.update(items) except Exception,", "self.itervalues()] def _iteritems(self): for k, v in self._dict.iteritems(): yield k,", "i, x in enumerate(y)] super(Sequence, self).extend(newvals) try: self._gvalidate() except: ec,", "not k in oldvals: oldvals[k] = self[k] else: remvals.add(k) self[k]", "super(Dict, self).items() super(Dict, self).clear() try: self._gvalidate() except: ec, ei, tb", "def __or__(self, y): rv = self.copy() rv |= y return", "e: print(\"das.types.Set.__ior__: Failed to recover set data (%s)\" % e)", "self).copy() super(Set, self).__iand__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)])) try:", "else oth) def __ge__(self, oth): return self._dict.__ge__(oth._dict if isinstance(oth, Struct)", "_enable_global_validation(self, on): self.__dict__[\"_global_validation_enabled\"] = on class Tuple(TypeBase, tuple): def __init__(self,", "= cb def _is_global_validation_enabled(self): return self.__dict__[\"_global_validation_enabled\"] def _enable_global_validation(self, on): self.__dict__[\"_global_validation_enabled\"]", "except: ec, ei, tb = sys.exc_info() # Note: we can", "data (%s)\" % e) raise ec, ei, tb def _get_alias(self,", "__init__(self, data): super(GlobalValidationDisabled, self).__init__() self.data = data self.oldstate = None", "[x for x in self.iteritems()] class Struct(TypeBase): def __init__(self, *args,", "= self[:] rv.__iadd__(y) return rv def __setitem__(self, i, y): super(Sequence,", "None: schema_type.validate(self) self._set_schema_type(schema_type) def _gvalidate(self): st = self._get_schema_type() if st", "TypeBase.TransferGlobalValidator(self, self._dict[k]) except KeyError: if hasattr(self._dict, k): # Look for", "self._dict.__iter__() def __len__(self): return self._dict.__len__() def __str__(self): return self._dict.__str__() def", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: super(Dict, self).update(items)", "dict.__init__(self, *args, **kwargs) def _adapt_key(self, key): st = self._get_schema_type() return", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: self._dict.update(items) except", "ei, tb = sys.exc_info() try: if wasset: super(Dict, self).__setitem__(k, oldval)", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: super(Sequence, self).insert(idx,", "k, k) st = self._get_schema_type() if st is not None:", "super(Set, self).remove(ae) except Exception, e: print(\"das.types.Set.add: Failed to recover set", "because tuple is immutable? super(Tuple, self).__init__() def __add__(self, y): raise", "del(self._dict[k]) except Exception, e: print(\"das.types.Struct.__setattr__: Failed to recover struct data", "def pop(self, k, *args): _k = self._adapt_key(k) _v = super(Dict,", "\": %s required\" % required_version else: fullmsg += \": no", "reach # the core of the method, tuple is already", "in remvals: super(Dict, self).__delitem__(k) for k, v in oldvals.iteritems(): super(Dict,", "Validation Failed (%s)\" % str(ei)) raise ei.__class__, ei, tb def", "y return rv def __ror__(self, y): return self.__or__(y) def __ixor__(self,", "= oldval except Exception, e: print(\"das.types.Struct.pop: Failed to recover struct", "y return rv def __rsub__(self, y): return self.__sub__(y) def __ior__(self,", "== \"__class__\": super(Struct, self).__setattr__(k, v) else: k = self._get_alias(k) self._check_reserved(k)", "e: print(\"das.types.Struct.pop: Failed to recover struct data (%s)\" % e)", "# Look for an override method of the same name", "str(ei)) raise ei.__class__, ei, tb def _get_schema_type(self): return self.__dict__[\"_schema_type\"] def", "args[0] if hasattr(a0, \"keys\"): for k in a0.keys(): k =", "args = (args[0], self._adapt_value(args[1], key=args[0])) super(Dict, self).setdefault(*args) def copy(self): return", "__repr__(self): return self._dict.__repr__() # Override of dict.has_key def _has_key(self, k):", "ec, ei, tb class Set(TypeBase, set): def __init__(self, args): TypeBase.__init__(self)", "__setitem__(self, k, v): k = self._get_alias(k) self._check_reserved(k) wasset = (k", "def __sub__(self, y): rv = self.copy() rv -= y return", "return rv def _adapt_value(self, value, key=None, index=None): return das.adapt_value(value, schema_type=self._get_schema_type(),", "len(oth): return -1 else: return 1 def __iter__(self): for item", "st = self._get_schema_type() if st is not None and st.has_key(k):", "return TypeBase.TransferGlobalValidator(self, self._dict.__getitem__(k)) def __setitem__(self, k, v): k = self._get_alias(k)", "# if isinstance(st[k], das.schematypes.Deprecated): # message = (\"[das] Field %s", "= len(args) if nargs > 2: raise TypeError(\"_setdefault expected at", "tb return self def __add__(self, y): rv = self[:] rv.__iadd__(y)", "sys.exc_info() try: if wasset: super(Dict, self).__setitem__(k, oldval) else: del(self[k]) except", "oldval else: del(self._dict[k]) except Exception, e: print(\"das.types.Struct.__setattr__: Failed to recover", "ec, ei, tb def pop(self): item = super(Set, self).pop() try:", "= super(Dict, self).items() super(Dict, self).clear() try: self._gvalidate() except: ec, ei,", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: super(Sequence, self).__setslice__(oldlen,", "__setslice__(self, i, j, y): oldvals = super(Sequence, self).__getslice__(i, j) newvals", "ec, ei, tb return self def __or__(self, y): rv =", "ec, ei, tb def __contains__(self, k): return self._dict.__contains__(self._get_alias(k)) def __cmp__(self,", "oldval = self._dict.get(k, None) retval = self._dict.pop(k, *args) try: self._gvalidate()", "_v = super(Dict, self).__getitem__(_k) super(Dict, self).__delitem__(_k) try: self._gvalidate() except: ec,", "with data field '%s', use '_%s(...)' to call it instead\"", "tb def update(self, *args): added = set() for y in", "nargs = len(args) if nargs > 2: raise TypeError(\"setdefault expected", "= self._adapt_key(k) if k in self: oldvals[k] = self[k] else:", "try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try: super(Sequence,", "of dict.popitem def _popitem(self): k, v = self._dict.popitem() try: self._gvalidate()", "= self.copy() rv -= y return rv def __rsub__(self, y):", "key=None, index=None): return das.adapt_value(value, schema_type=self._get_schema_type(), key=key, index=index) def _validate(self, schema_type=None):", "# Funny, we need to declare *args here, but at", "__ixor__(self, y): oldvals = super(Set, self).copy() super(Set, self).__ixor__(set([self._adapt_value(x, index=i) for", "super(Dict, self).popitem() try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "self._get_alias(k) return TypeBase.TransferGlobalValidator(self, self._dict.__getitem__(k)) def __setitem__(self, k, v): k =", "y return rv def __rxor__(self, y): rv = self.copy() rv", "e: print(\"das.types.Set.update: Failed to recover set data (%s)\" % e)", "else: return i def __imul__(self, n): oldlen = len(self) super(Sequence,", "super(Set, self).copy() super(Set, self).__ior__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)]))", "sys.exc_info() try: super(Dict, self).__setitem__(_k, _v) except Exception, e: print(\"das.types.Dict.popitem: Failed", "%d\" % len(args)) for k, v in kwargs.iteritems(): k =", "item) except Exception, e: print(\"das.types.Sequence.remove: Failed to recover sequence data", "in enumerate(y)] for item in lst: if item in self:", "Note: we can reach here only if k was a", "% (repr(k), repr(aliasname))) # das.print_once(message) return aliasname return k def", "# def __contains__(self, k): # try: # _k = self._adapt_key(k)", "= self._adapt_value(e, index=len(self)) if ae in self: return super(Set, self).add(ae)", "on class Tuple(TypeBase, tuple): def __init__(self, *args): # Funny, we", "the time we reach # the core of the method,", "= self._dict.get(k, None) retval = self._dict.pop(k, *args) try: self._gvalidate() except:", "return self._wrap(super(Sequence, self).__getslice__(i, j)) def __delslice__(self, i, j): oldvals =", "= sys.exc_info() try: super(Sequence, self).__setslice__(oldlen, len(self), []) except Exception, e:", "except: ec, ei, tb = sys.exc_info() try: super(Sequence, self).insert(idx, item)", "super(Tuple, self).__getitem__(i)) class Sequence(TypeBase, list): def __init__(self, *args): TypeBase.__init__(self) list.__init__(self,", "*args): _k = k k = self._get_alias(k) oldval = self._dict.get(k,", "ec, ei, tb return item def clear(self): items = super(Dict,", "time we reach # the core of the method, tuple", "symmetric_difference(self, rhs): return self.__xor__(rhs) class Dict(TypeBase, dict): def __init__(self, *args,", "tb = sys.exc_info() # Note: we can reach here only", "if hasattr(self, k2): # don't need to create forwarding attribute", "i, j): oldvals = super(Sequence, self).__getslice__(i, j) super(Sequence, self).__delslice__(i, j)", "class k2 = '_' + k if hasattr(self, k2): #print(\"Forward", "except Exception, e: print(\"das.types.Sequence.__setslice__: Failed to recover sequence data (%s)\"", "e) raise ec, ei, tb def append(self, y): n =", "if nargs > 2: raise TypeError(\"_setdefault expected at most 2", "try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try: if", "'%s' to %s class '%s'\" % (k, self.__class__.__name__, k2)) return", "in self.itervalues()] def iteritems(self): for k, v in super(Dict, self).iteritems():", "ei, tb = sys.exc_info() try: super(Set, self).clear() super(Set, self).__ior__(oldvals) except", "k if hasattr(self, k2): # don't need to create forwarding", "das.get_schema_type_name(st) if n: msg = \"[%s] %s\" % (n, msg)", "sys.exc_info() try: for k in remvals: super(Dict, self).__delitem__(k) for k,", "for y in args: lst = [self._adapt_value(x, index=i) for i,", "pass return self.data def __exit__(self, type, value, traceback): if self.oldstate", "value, traceback): if self.oldstate is not None: self.data._enable_global_validation(self.oldstate) self.oldstate =", "tb # Override of dict.copy def _copy(self): return self._wrap(self) #", "data (%s)\" % e) raise ec, ei, tb def copy(self):", "self).itervalues(): yield TypeBase.TransferGlobalValidator(self, v) def values(self): return [x for x", "self).copy() super(Set, self).__isub__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)])) try:", "self).setdefault(*args) def copy(self): return self._wrap(self) def update(self, *args, **kwargs): oldvals", "return self._dict.__lt__(oth._dict if isinstance(oth, Struct) else oth) def __iter__(self): return", "_pop(self, k, *args): _k = k k = self._get_alias(k) oldval", "ec, ei, tb return retval # Override of dict.popitem def", "= sys.exc_info() try: super(Sequence, self).__setslice__(len(self) - len(newvals), len(self), []) except", "if k in self: oldvals[k] = self[k] else: remvals.add(k) self[k]", "+= \": no requirements\" if current_version: fullmsg += \", %s", "sequence data (%s)\" % e) raise ec, ei, tb def", "= [self._adapt_value(x, index=len(self)+i) for i, x in enumerate(y)] super(Sequence, self).extend(newvals)", "attribute '%s' (dict %s)\" % (k, \"has\" if hasattr(self._dict, k)", "ii, oldvals) except Exception, e: print(\"das.types.Sequence.__setslice__: Failed to recover sequence", "1 def __iter__(self): for item in super(Set, self).__iter__(): yield TypeBase.TransferGlobalValidator(self,", "def items(self): return [x for x in self.iteritems()] class Struct(TypeBase):", "_v = self._adapt_value(y, index=0) # return super(Sequence, self).__contains__(_v) # except:", "self.copy() rv ^= y return rv def __rxor__(self, y): rv", "msg=None, current_version=None, required_version=None): fullmsg = \"ersion error\" if required_version: fullmsg", "# Skip global validaton return gvcb = self._get_validate_globally_cb() if gvcb", "def __iter__(self): for item in super(Sequence, self).__iter__(): yield TypeBase.TransferGlobalValidator(self, item)", "*args): super(TypeBase, self).__init__() self.__dict__[\"_schema_type\"] = None self.__dict__[\"_validate_globally_cb\"] = None self.__dict__[\"_global_validation_enabled\"]", "%d\" % nargs) if nargs == 2: args = (args[0],", "return item def difference(self, rhs): return self.__sub__(rhs) def union(self, rhs):", "st is not None and st.has_key(k): aliasname = das.schematypes.Alias.Name(st[k]) if", "def __exit__(self, type, value, traceback): if self.oldstate is not None:", "k, v): k = self._adapt_key(k) wasset = (k in self)", "def _itervalues(self): for v in self._dict.itervalues(): yield TypeBase.TransferGlobalValidator(self, v) def", "x: x in self, self._get_schema_type().ordered_keys()) def _itervalues(self): for v in", "self).pop(_k, *args) try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "_get_schema_type(self): return self.__dict__[\"_schema_type\"] def _set_schema_type(self, schema_type): self.__dict__[\"_schema_type\"] = schema_type def", "try: super(Dict, self).__setitem__(_k, _v) except Exception, e: print(\"das.types.Dict.popitem: Failed to", "e) raise ec, ei, tb return retval # Override of", "required_version else: fullmsg += \": no requirements\" if current_version: fullmsg", "for item in lst: if item in self: continue super(Set,", "= \"[%s] %s\" % (n, msg) das.print_once(msg) self.__dict__[k2] = getattr(self._dict,", "raise ec, ei, tb def copy(self): return self._wrap(self) def add(self,", "insert(self, i, y): super(Sequence, self).insert(i, self._adapt_value(y, index=i)) try: self._gvalidate() except:", "oth) def __le__(self, oth): return self._dict.__le__(oth._dict if isinstance(oth, Struct) else", "ec, ei, tb def _get_alias(self, k): st = self._get_schema_type() if", "return filter(lambda x: x in self, self._get_schema_type().ordered_keys()) def _itervalues(self): for", "key=k) else: for k, v in a0: k = self._get_alias(k)", "ec, ei, tb def update(self, *args): added = set() for", "+ k if hasattr(self, k2): #print(\"Forward '%s' to %s class", "ei, tb = sys.exc_info() ei = das.ValidationError(\"Global Validation Failed (%s)\"", "def update(self, *args, **kwargs): oldvals = {} remvals = set()", "self._dict.itervalues(): yield TypeBase.TransferGlobalValidator(self, v) def _values(self): return [x for x", "ei, tb = sys.exc_info() try: super(Sequence, self).insert(ii, item) except Exception,", "% e) raise ec, ei, tb def __getslice__(self, i, j):", "try: getattr(self, \"_validate_globally\")() except: _, ei, tb = sys.exc_info() ei", "v) except Exception, e: print(\"das.types.Dict.update: Failed to recover dict data", "Exception, e: print(\"das.types.Set.__iand__: Failed to recover set data (%s)\" %", "struct data (%s)\" % e) raise ec, ei, tb #", "tb # Override of dict.clear def _clear(self): items = self._dict.items()", "oldval except Exception, e: print(\"das.types.Struct.__delattr__: Failed to recover struct data", "super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__ior__: Failed to recover set", "self) oldval = (self[k] if wasset else None) super(Dict, self).__setitem__(k,", "sequence data (%s)\" % e) raise ec, ei, tb class", "e: print(\"das.types.Struct.__delattr__: Failed to recover struct data (%s)\" % e)", "self.oldstate is not None: self.data._enable_global_validation(self.oldstate) self.oldstate = None # Always", "= das.schematypes.Alias.Name(st[k]) if aliasname is not None: # if isinstance(st[k],", "super(Sequence, self).pop(*args) try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "items = self._dict.items() self._dict.clear() try: self._gvalidate() except: ec, ei, tb", "&= y return rv def __rand__(self, y): return self.__and__(y) def", "\"_\" + k if hasattr(self, k2): # don't need to", "except: _, ei, tb = sys.exc_info() ei = das.ValidationError(\"Global Validation", "j, y): oldvals = super(Sequence, self).__getslice__(i, j) newvals = [self._adapt_value(x,", "def __init__(self, *args): super(TypeBase, self).__init__() self.__dict__[\"_schema_type\"] = None self.__dict__[\"_validate_globally_cb\"] =", "return TypeBase.TransferGlobalValidator(self, super(Sequence, self).__getitem__(i)) def __delitem__(self, i): ii = self._wrap_index(i,", "len(y))) def __getitem__(self, i): return TypeBase.TransferGlobalValidator(self, super(Tuple, self).__getitem__(i)) class Sequence(TypeBase,", "try: super(Dict, self).__setitem__(item[0], item[1]) except Exception, e: print(\"das.types.Dict.popitem: Failed to", "self).__init__() self.data = data self.oldstate = None def __enter__(self): try:", "n): rv = self[:] rv.__imul__(n) return rv def __rmul__(self, n):", "k = self._get_alias(k) self._check_reserved(k) self._dict[k] = self._adapt_value(a0[k], key=k) else: for", "try: if wasset: super(Dict, self).__setitem__(k, oldval) else: del(self[k]) except Exception,", "% e) raise ec, ei, tb def __delattr__(self, k): k", "Failed to recover struct data (%s)\" % e) raise ec,", "ec, ei, tb def __iter__(self): for item in super(Sequence, self).__iter__():", "in self._dict) oldval = (self._dict[k] if wasset else None) self._dict.__setitem__(k,", "ReservedNameError(k) elif hasattr(self._dict, k): k2 = \"_\" + k if", "k2): #print(\"Forward '%s' to %s class '%s'\" % (k, self.__class__.__name__,", "is None: n = len(self) ii = i + n", "update(self, *args): added = set() for y in args: lst", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: super(Dict, self).__setitem__(item[0],", "super(Sequence, self).extend(newvals) try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "% str(ei)) raise ei.__class__, ei, tb def _get_schema_type(self): return self.__dict__[\"_schema_type\"]", "class Sequence(TypeBase, list): def __init__(self, *args): TypeBase.__init__(self) list.__init__(self, *args) def", "len(self) <= len(oth): return -1 else: return 1 def __iter__(self):", "sequence data (%s)\" % e) raise ec, ei, tb return", "else oth) def __gt__(self, oth): return self._dict.__gt__(oth._dict if isinstance(oth, Struct)", "ec, ei, tb = sys.exc_info() try: self._dict[k] = v except", "% e) raise ec, ei, tb return self def __or__(self,", "= self.data._is_global_validation_enabled() self.data._enable_global_validation(False) except: pass return self.data def __exit__(self, type,", "y): super(Sequence, self).insert(i, self._adapt_value(y, index=i)) try: self._gvalidate() except: ec, ei,", "sys import das import traceback class ReservedNameError(Exception): def __init__(self, name):", "(%s)\" % e) raise ec, ei, tb def __getslice__(self, i,", "to recover set data (%s)\" % e) raise ec, ei,", "remvals.add(k) self[k] = self._adapt_value(v, key=k) try: self._gvalidate() except: ec, ei,", "wasset else None) self._dict.__setitem__(k, self._adapt_value(v, key=k)) try: self._gvalidate() except: ec,", "= \"ersion error\" if required_version: fullmsg += \": %s required\"", "% len(args)) for k, v in kwargs.iteritems(): k = self._adapt_key(k)", "if st is None else das.adapt_value(key, schema_type=st.ktype)) def __setitem__(self, k,", "k in oldvals: oldvals[k] = self[k] else: remvals.add(k) self[k] =", "isinstance(src, klass) and isinstance(dst, klass): dst._set_validate_globally_cb(src._gvalidate) return dst @classmethod def", "__sub__(self, y): rv = self.copy() rv -= y return rv", "data (%s)\" % e) raise ec, ei, tb def __iter__(self):", "clamp=True)) except Exception, e: print(\"das.types.Sequence.insert: Failed to recover sequence data", "_v def popitem(self): item = super(Dict, self).popitem() try: self._gvalidate() except:", "self).remove(item) except Exception, e: print(\"das.types.Set.update: Failed to recover set data", "self[k] else: remvals.add(k) self[k] = self._adapt_value(v, key=k) elif len(args) >", "sys.exc_info() try: super(Set, self).clear() super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__isub__:", "raise ec, ei, tb return item def clear(self): items =", "a0: k = self._get_alias(k) self._check_reserved(k) self._dict[k] = self._adapt_value(v, key=k) for", "rv = self.copy() rv &= y return rv def __rand__(self,", "k, TypeBase.TransferGlobalValidator(self, v) def _items(self): return [x for x in", "index=n+i) for i, x in enumerate(y)]) try: self._gvalidate() except: ec,", "def _wrap_index(self, i, n=None, clamp=False): if i < 0: if", "tb def pop(self): item = super(Set, self).pop() try: self._gvalidate() except:", "_clear(self): items = self._dict.items() self._dict.clear() try: self._gvalidate() except: ec, ei,", "for item in super(Set, self).__iter__(): yield TypeBase.TransferGlobalValidator(self, item) def clear(self):", "sys.exc_info() # Note: we can reach here only if k", "unchanged # -> no need to check if _k was", "arguments, got %d\" % nargs) if nargs >= 1: self._check_reserved(args[0])", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: self._dict.clear() self._dict.update(oldvals)", "= (\"[das] Field %s is deprecated, use %s instead\" %", "if item in self: continue super(Set, self).add(item) added.add(item) try: self._gvalidate()", "self).__iter__(): yield TypeBase.TransferGlobalValidator(self, item) def __setslice__(self, i, j, y): oldvals", "_adapt_value(self, value, key=None, index=None): return das.adapt_value(value, schema_type=self._get_schema_type(), key=key, index=index) def", "continue super(Set, self).add(item) added.add(item) try: self._gvalidate() except: ec, ei, tb", "fullmsg += \", no version info\" if msg: fullmsg =", "e) raise ec, ei, tb def itervalues(self): for v in", "\", %s in use\" % current_version else: fullmsg += \",", "super(Sequence, self).__setitem__(i, self._adapt_value(y, index=i)) self._gvalidate() def __getitem__(self, i): return TypeBase.TransferGlobalValidator(self,", "= self._get_schema_type() if st is not None: n = das.get_schema_type_name(st)", "the same name prefixed by '_' in current class k2", "try: self._dict[k] = oldval except Exception, e: print(\"das.types.Struct.__delitem__: Failed to", "_validate(self, schema_type=None): if schema_type is None: schema_type = self._get_schema_type() if", "self.__xor__(rhs) class Dict(TypeBase, dict): def __init__(self, *args, **kwargs): TypeBase.__init__(self) dict.__init__(self,", "1: raise Exception(\"update expected at most 1 arguments, got %d\"", "enumerate(y)] super(Sequence, self).__setslice__(i, j, newvals) try: self._gvalidate() except: ec, ei,", "(repr(k), repr(aliasname))) # das.print_once(message) return aliasname return k def _check_reserved(self,", "for item in super(Sequence, self).__iter__(): yield TypeBase.TransferGlobalValidator(self, item) def __setslice__(self,", "i, n=None, clamp=False): if i < 0: if n is", "tb = sys.exc_info() try: super(Dict, self).update(items) except Exception, e: print(\"das.types.Dict.clear:", "provided, we should not reach here # as dict is", "%s\" % (n, msg) das.print_once(msg) self.__dict__[k2] = getattr(self._dict, k) def", "recover dict data (%s)\" % e) raise ec, ei, tb", "except Exception, e: print(\"das.types.Set.update: Failed to recover set data (%s)\"", "v in kwargs.iteritems(): k = self._get_alias(k) self._check_reserved(k) self._dict[k] = self._adapt_value(v,", "if not self._is_global_validation_enabled(): # Skip global validaton return gvcb =", "return retval # Override of dict.popitem def _popitem(self): k, v", "raise IndexError(\"list index out of range\") else: return ii else:", "for v in self._dict.itervalues(): yield TypeBase.TransferGlobalValidator(self, v) def _values(self): return", "'%s' to dict class '%s'\" % (k, k)) return getattr(self._dict,", "- len(newvals), len(self), []) except Exception, e: print(\"das.types.Sequence.extend: Failed to", "(otherwise __delitem__(k) would fail) try: self._dict[k] = oldval except Exception,", "def TransferGlobalValidator(klass, src, dst): if isinstance(src, klass) and isinstance(dst, klass):", "def __init__(self, msg=None, current_version=None, required_version=None): fullmsg = \"ersion error\" if", "len(self), []) except Exception, e: print(\"das.types.Sequence.__imul__: Failed to recover sequence", "klass) and isinstance(dst, klass): dst._set_validate_globally_cb(src._gvalidate) return dst @classmethod def ValidateGlobally(klass,", "key (otherwise __delitem__(k) would fail) try: self._dict[k] = oldval except", "+ n if ii < 0: if clamp: return 0", "i, x in enumerate(y)] for item in lst: if item", "Override of dict.update def _update(self, *args, **kwargs): if len(args) >", "tb = sys.exc_info() try: super(Sequence, self).pop(self._wrap_index(i, n=len(self)-1, clamp=True)) except Exception,", "try: super(Sequence, self).pop() except Exception, e: print(\"das.types.Sequence.append: Failed to recover", "return super(Dict, self).__contains__(_k) # except: # return False def setdefault(self,", "_values(self): return [x for x in self.itervalues()] def _iteritems(self): for", "self._set_schema_type(schema_type) def _gvalidate(self): st = self._get_schema_type() if st is not", "st.has_key(k): aliasname = das.schematypes.Alias.Name(st[k]) if aliasname is not None: #", "*args): TypeBase.__init__(self) list.__init__(self, *args) def _wrap_index(self, i, n=None, clamp=False): if", "e) raise ec, ei, tb def __getitem__(self, k): k =", "def __ror__(self, y): return self.__or__(y) def __ixor__(self, y): oldvals =", "\"_validate_globally\")() except: _, ei, tb = sys.exc_info() ei = das.ValidationError(\"Global", "index=i)) self._gvalidate() def __getitem__(self, i): return TypeBase.TransferGlobalValidator(self, super(Sequence, self).__getitem__(i)) def", "self).clear() try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "Exception, e: print(\"das.types.Sequence.insert: Failed to recover sequence data (%s)\" %", "oth): return self._dict.__ge__(oth._dict if isinstance(oth, Struct) else oth) def __le__(self,", "to create forwarding attribute (set __getattr__) return if k2 in", "k, v in a0: k = self._get_alias(k) self._check_reserved(k) self._dict[k] =", "ei, tb = sys.exc_info() try: if wasset: self._dict[k] = oldval", "v): k = self._get_alias(k) self._check_reserved(k) wasset = (k in self._dict)", "try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try: for", "__init__(self, name): super(ReservedNameError, self).__init__(\"'%s' is a reserved name\" % name)", "self._adapt_value(v, key=k) for k, v in kwargs.iteritems(): k = self._get_alias(k)", "data (%s)\" % e) raise ec, ei, tb def append(self,", "% e) raise ec, ei, tb def __delitem__(self, k): _k", "+ fullmsg else: fullmsg = \"V\" + fullmsg super(VersionError, self).__init__(fullmsg)", "conflicts with data field '%s', use '_%s(...)' to call it", "append(self, y): n = len(self) super(Sequence, self).append(self._adapt_value(y, index=n)) try: self._gvalidate()", "e) raise ec, ei, tb def __contains__(self, k): return self._dict.__contains__(self._get_alias(k))", "try: # _v = self._adapt_value(y, index=0) # return super(Sequence, self).__contains__(_v)", "(%s)\" % e) raise ec, ei, tb def update(self, *args):", "== 0: return 0 elif len(self) <= len(oth): return -1", "% e) raise ec, ei, tb def __getitem__(self, k): return", "% e) raise ec, ei, tb def __contains__(self, k): return", "key=k) self._gvalidate() except: ec, ei, tb = sys.exc_info() try: self._dict.clear()", "rv def remove(self, y): idx = self.index(y) item = self[idx]", "ei, tb def pop(self, k, *args): _k = self._adapt_key(k) _v", "k2 = \"_\" + k if hasattr(self, k2): # don't", "hasattr(self._dict, k): k2 = \"_\" + k if hasattr(self, k2):", "= sys.exc_info() # Note: we can reach here only if", "k, v in self._dict.iteritems(): yield k, TypeBase.TransferGlobalValidator(self, v) def _items(self):", "implement __cmp__ # but we need it for some other", "return self def __sub__(self, y): rv = self.copy() rv -=", "tb = sys.exc_info() try: self._dict.update(items) except Exception, e: print(\"das.types.Struct.clear: Failed", "print(\"das.types.Struct.update: Failed to recover struct data (%s)\" % e) raise", "kwargs.iteritems(): k = self._adapt_key(k) if k in self: if not", "= \"V\" + fullmsg super(VersionError, self).__init__(fullmsg) class GlobalValidationDisabled(object): def __init__(self,", "self.__mul__(n) def __iadd__(self, y): n = len(self) super(Sequence, self).__iadd__([self._adapt_value(x, index=n+i)", "0 elif len(self) <= len(oth): return -1 else: return 1", "Exception, e: print(\"das.types.Struct.__setitem__: Failed to recover struct data (%s)\" %", "got %d\" % len(args)) oldvals = self._dict.copy() try: if len(args)", "k = self._adapt_key(k) if k in self: if not k", "here # as dict is actually unchanged # -> no", "= '_' + k if hasattr(self, k2): #print(\"Forward '%s' to", "if msg: fullmsg = msg + \" v\" + fullmsg", "oth) def __iter__(self): return self._dict.__iter__() def __len__(self): return self._dict.__len__() def", "else st._validate_self(rhs)) rv._set_schema_type(self._get_schema_type()) return rv def _adapt_value(self, value, key=None, index=None):", "Dict(TypeBase, dict): def __init__(self, *args, **kwargs): TypeBase.__init__(self) dict.__init__(self, *args, **kwargs)", "nargs > 2: raise TypeError(\"setdefault expected at most 2 arguments,", "can reach here only if k was a valid key", "self).__isub__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)])) try: self._gvalidate() except:", "__init__(self, *args, **kwargs): TypeBase.__init__(self) self.__dict__[\"_dict\"] = {} self._update(*args, **kwargs) def", "if current_version: fullmsg += \", %s in use\" % current_version", "ii = i + n if ii < 0: if", "v): # Special case for __class__ member that we may", "Struct) else oth) def __ge__(self, oth): return self._dict.__ge__(oth._dict if isinstance(oth,", "__str__(self): return self._dict.__str__() def __repr__(self): return self._dict.__repr__() # Override of", "super(Sequence, self).__getitem__(i)) def __delitem__(self, i): ii = self._wrap_index(i, clamp=False) item", "oth): return self._dict.__gt__(oth._dict if isinstance(oth, Struct) else oth) def __lt__(self,", "y): super(Sequence, self).__setitem__(i, self._adapt_value(y, index=i)) self._gvalidate() def __getitem__(self, i): return", "self.__sub__(y) def __ior__(self, y): oldvals = super(Set, self).copy() super(Set, self).__ior__(set([self._adapt_value(x,", "self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__ixor__: Failed to recover set data", "__xor__(self, y): rv = self.copy() rv ^= y return rv", "self._dict.__str__() def __repr__(self): return self._dict.__repr__() # Override of dict.has_key def", "index out of range\") else: return ii else: return i", "sys.exc_info() try: super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.clear: Failed to", "fullmsg else: fullmsg = \"V\" + fullmsg super(VersionError, self).__init__(fullmsg) class", "gvcb = self._get_validate_globally_cb() if gvcb is not None: gvcb() if", "__rmul__(self, n): return self.__mul__(n) def __iadd__(self, y): n = len(self)", "tb = sys.exc_info() try: super(Sequence, self).pop() except Exception, e: print(\"das.types.Sequence.append:", "remvals: super(Dict, self).__delitem__(k) for k, v in oldvals.iteritems(): super(Dict, self).__setitem__(k,", "if isinstance(oth, Struct) else oth) def __iter__(self): return self._dict.__iter__() def", "if hasattr(a0, \"keys\"): for k in a0.keys(): k = self._adapt_key(k)", "that we may want to modify for # to enable", "key=k) try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "AttributeError(\"'Struct' has no attribute '%s' (dict %s)\" % (k, \"has\"", "__delitem__(self, k): _k = self._adapt_key(k) _v = super(Dict, self).__getitem__(_k) super(Dict,", "= super(Set, self).copy() super(Set, self).__ixor__(set([self._adapt_value(x, index=i) for i, x in", "None self.__dict__[\"_validate_globally_cb\"] = None self.__dict__[\"_global_validation_enabled\"] = True def _wrap(self, rhs):", "None: self.data._enable_global_validation(self.oldstate) self.oldstate = None # Always re-raise exception return", "self).copy() super(Set, self).__ior__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)])) try:", "= (k in self._dict) oldval = (self._dict[k] if wasset else", "def __repr__(self): return self._dict.__repr__() # Override of dict.has_key def _has_key(self,", "*args): nargs = len(args) if nargs > 2: raise TypeError(\"_setdefault", "__cmp__(self, oth): return self._dict.__cmp__(oth._dict if isinstance(oth, Struct) else oth) def", "key=k) for k, v in kwargs.iteritems(): k = self._get_alias(k) self._check_reserved(k)", "newvals = [self._adapt_value(x, index=i+k) for k, x in enumerate(y)] super(Sequence,", "if st is not None: n = das.get_schema_type_name(st) if n:", "self._adapt_value(y, index=i)) self._gvalidate() def __getitem__(self, i): return TypeBase.TransferGlobalValidator(self, super(Sequence, self).__getitem__(i))", "v in super(Dict, self).iteritems(): yield k, TypeBase.TransferGlobalValidator(self, v) def items(self):", "Sequence(TypeBase, list): def __init__(self, *args): TypeBase.__init__(self) list.__init__(self, *args) def _wrap_index(self,", "print(\"das.types.Dict.popitem: Failed to recover dict data (%s)\" % e) raise", "super(Sequence, self).__iadd__([self._adapt_value(x, index=n+i) for i, x in enumerate(y)]) try: self._gvalidate()", "self def __and__(self, y): rv = self.copy() rv &= y", "super(Set, self).__ixor__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)])) try: self._gvalidate()", "y): return super(Sequence, self).index(self._adapt_value(y, index=0)) def insert(self, i, y): super(Sequence,", "ei, tb def update(self, *args): added = set() for y", "self: if not k in oldvals: oldvals[k] = self[k] else:", "data (%s)\" % e) raise ec, ei, tb def pop(self,", "return self._dict.__gt__(oth._dict if isinstance(oth, Struct) else oth) def __lt__(self, oth):", "= super(Dict, self).pop(_k, *args) try: self._gvalidate() except: ec, ei, tb", "for x in self.iteritems()] class Struct(TypeBase): def __init__(self, *args, **kwargs):", "Exception, e: print(\"das.types.Set.clear: Failed to recover set data (%s)\" %", "self.__and__(y) def __isub__(self, y): oldvals = super(Set, self).copy() super(Set, self).__isub__(set([self._adapt_value(x,", "declare *args here, but at the time we reach #", "Exception, e: print(\"das.types.Struct.__delattr__: Failed to recover struct data (%s)\" %", "first (container validation) st._validate_self(self) if hasattr(self, \"_is_global_validation_enabled\"): if not self._is_global_validation_enabled():", "sys.exc_info() try: self._dict.update(items) except Exception, e: print(\"das.types.Struct.clear: Failed to recover", "if n: msg = \"[%s] %s\" % (n, msg) das.print_once(msg)", "self._get_schema_type() rv = self.__class__(rhs if st is None else st._validate_self(rhs))", "rhs): return self.__or__(rhs) def intersection(self, rhs): return self.__and__(rhs) def symmetric_difference(self,", "# message = (\"[das] Field %s is deprecated, use %s", "dst._set_validate_globally_cb(src._gvalidate) return dst @classmethod def ValidateGlobally(klass, inst): if isinstance(inst, klass):", "= super(Set, self).copy() super(Set, self).__ior__(set([self._adapt_value(x, index=i) for i, x in", "member that we may want to modify for # to", "self).__setitem__(k, self._adapt_value(v, key=k)) try: self._gvalidate() except: ec, ei, tb =", "Exception, e: print(\"das.types.Sequence.__setslice__: Failed to recover sequence data (%s)\" %", "else: #print(\"Forward '%s' to dict class '%s'\" % (k, k))", "data): super(GlobalValidationDisabled, self).__init__() self.data = data self.oldstate = None def", "tuple): def __init__(self, *args): # Funny, we need to declare", "raise ec, ei, tb def __getitem__(self, k): k = self._get_alias(k)", "yield TypeBase.TransferGlobalValidator(self, v) def _values(self): return [x for x in", "e) raise ec, ei, tb return self def __or__(self, y):", "tb = sys.exc_info() try: for item in added: super(Set, self).remove(item)", "is not None: # if isinstance(st[k], das.schematypes.Deprecated): # message =", "lst: if item in self: continue super(Set, self).add(item) added.add(item) try:", "= sys.exc_info() try: ii = self._wrap_index(i, clamp=True) super(Sequence, self).__setslice__(ii, ii,", "it for some other purpose if len(self.symmetric_difference(oth)) == 0: return", "tb return self def __and__(self, y): rv = self.copy() rv", "self._dict[k] = oldval except Exception, e: print(\"das.types.Struct.__delattr__: Failed to recover", "items = super(Dict, self).items() super(Dict, self).clear() try: self._gvalidate() except: ec,", "yield k, TypeBase.TransferGlobalValidator(self, v) def items(self): return [x for x", "i): return TypeBase.TransferGlobalValidator(self, super(Tuple, self).__getitem__(i)) class Sequence(TypeBase, list): def __init__(self,", "e: print(\"das.types.Struct.popitem: Failed to recover struct data (%s)\" % e)", "try: super(Set, self).clear() super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__ixor__: Failed", "need it for some other purpose if len(self.symmetric_difference(oth)) == 0:", "self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.clear: Failed to recover set data", "added.add(item) try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "e) raise ec, ei, tb return item def difference(self, rhs):", "is provided, we should not reach here # as dict", "_k = self._adapt_key(k) # return super(Dict, self).__contains__(_k) # except: #", "= sys.exc_info() try: super(Set, self).add(item) except Exception, e: print(\"das.types.Set.pop: Failed", "union(self, rhs): return self.__or__(rhs) def intersection(self, rhs): return self.__and__(rhs) def", "__setitem__(self, i, y): super(Sequence, self).__setitem__(i, self._adapt_value(y, index=i)) self._gvalidate() def __getitem__(self,", "reach here # as dict is actually unchanged # ->", "print(\"das.types.Struct.__setitem__: Failed to recover struct data (%s)\" % e) raise", "oth) def __gt__(self, oth): return self._dict.__gt__(oth._dict if isinstance(oth, Struct) else", "except Exception, e: print(\"das.types.Struct.__delitem__: Failed to recover struct data (%s)\"", "aliasname = das.schematypes.Alias.Name(st[k]) if aliasname is not None: # if", "\"keys\"): for k in a0.keys(): k = self._adapt_key(k) if k", "Skip global validaton return gvcb = self._get_validate_globally_cb() if gvcb is", "if ae in self: return super(Set, self).add(ae) try: self._gvalidate() except:", "name): super(ReservedNameError, self).__init__(\"'%s' is a reserved name\" % name) class", "oth) def __ge__(self, oth): return self._dict.__ge__(oth._dict if isinstance(oth, Struct) else", "k, x in enumerate(y)] super(Sequence, self).__setslice__(i, j, newvals) try: self._gvalidate()", "= sys.exc_info() try: super(Sequence, self).insert(ii, item) except Exception, e: print(\"das.types.Sequence.__delitem__:", "= self._wrap_index(i, clamp=False) item = super(Sequence, self).__getitem__(ii) super(Sequence, self).__delitem__(i) try:", "def __setitem__(self, k, v): k = self._adapt_key(k) wasset = (k", "except Exception, e: print(\"das.types.Set.add: Failed to recover set data (%s)\"", "is not None: # run self validation first (container validation)", "super(Sequence, self).append(self._adapt_value(y, index=n)) try: self._gvalidate() except: ec, ei, tb =", "self._dict[k]) except KeyError: if hasattr(self._dict, k): # Look for an", "dict): def __init__(self, *args, **kwargs): TypeBase.__init__(self) dict.__init__(self, *args, **kwargs) def", "Exception, e: print(\"das.types.Sequence.__delitem__: Failed to recover sequence data (%s)\" %", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: ii =", "None # Always re-raise exception return False class TypeBase(object): @classmethod", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: super(Set, self).remove(ae)", "% e) raise ec, ei, tb return self def __mul__(self,", "j, newvals) try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "'_' in current class k2 = '_' + k if", "def union(self, rhs): return self.__or__(rhs) def intersection(self, rhs): return self.__and__(rhs)", "except: ec, ei, tb = sys.exc_info() try: super(Sequence, self).insert(ii, item)", "if _k i not defined but a default value is", "k) def ordered_keys(self): return filter(lambda x: x in self, self._get_schema_type().ordered_keys())", "(%s)\" % e) raise ec, ei, tb def __iter__(self): for", "super(Dict, self).iteritems(): yield k, TypeBase.TransferGlobalValidator(self, v) def items(self): return [x", "oldvals.iteritems(): super(Dict, self).__setitem__(k, v) except Exception, e: print(\"das.types.Dict.update: Failed to", "len(args) == 1: a0 = args[0] if hasattr(a0, \"keys\"): for", "= self[k] else: remvals.add(k) self[k] = self._adapt_value(a0[k], key=k) else: for", "except Exception, e: print(\"das.types.Sequence.__iadd__: Failed to recover sequence data (%s)\"", "Exception, e: print(\"das.types.Dict.__setitem__: Failed to recover dict data (%s)\" %", "= sys.exc_info() try: super(Dict, self).__setitem__(_k, _v) except Exception, e: print(\"das.types.Dict.popitem:", "oldvals[k] = self[k] else: remvals.add(k) self[k] = self._adapt_value(v, key=k) try:", "item = super(Sequence, self).__getitem__(ii) super(Sequence, self).__delitem__(i) try: self._gvalidate() except: ec,", "data (%s)\" % e) raise ec, ei, tb return item", "else das.adapt_value(key, schema_type=st.ktype)) def __setitem__(self, k, v): k = self._adapt_key(k)", "in self.iteritems()] class Struct(TypeBase): def __init__(self, *args, **kwargs): TypeBase.__init__(self) self.__dict__[\"_dict\"]", "k k = self._get_alias(k) oldval = self._dict.get(k, None) self._dict.__delitem__(k) try:", "*args) def _wrap_index(self, i, n=None, clamp=False): if i < 0:", "we may want to modify for # to enable dynamic", "None else st._validate_self(rhs)) rv._set_schema_type(self._get_schema_type()) return rv def _adapt_value(self, value, key=None,", "raise ec, ei, tb def pop(self, k, *args): _k =", "case for __class__ member that we may want to modify", "%s in use\" % current_version else: fullmsg += \", no", "newvals) try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "def __getitem__(self, i): return TypeBase.TransferGlobalValidator(self, super(Sequence, self).__getitem__(i)) def __delitem__(self, i):", "das import traceback class ReservedNameError(Exception): def __init__(self, name): super(ReservedNameError, self).__init__(\"'%s'", "2 arguments, got %d\" % nargs) if nargs == 2:", "len(self) ii = i + n if ii < 0:", "super(Sequence, self).__iter__(): yield TypeBase.TransferGlobalValidator(self, item) def __setslice__(self, i, j, y):", "e) raise ec, ei, tb # Override of dict.clear def", "v) else: k = self._get_alias(k) self._check_reserved(k) wasset = (k in", "clamp=True) super(Sequence, self).__setslice__(ii, ii, oldvals) except Exception, e: print(\"das.types.Sequence.__setslice__: Failed", "except: ec, ei, tb = sys.exc_info() try: super(Sequence, self).__setslice__(len(self) -", "Exception, e: print(\"das.types.Sequence.extend: Failed to recover sequence data (%s)\" %", "return TypeBase.TransferGlobalValidator(self, self._dict[k]) except KeyError: if hasattr(self._dict, k): # Look", "except Exception, e: print(\"das.types.Set.__isub__: Failed to recover set data (%s)\"", "except Exception, e: print(\"das.types.Set.__ior__: Failed to recover set data (%s)\"", "may want to modify for # to enable dynamic function", "lst = [self._adapt_value(x, index=i) for i, x in enumerate(y)] for", "= \"_\" + k if hasattr(self, k2): # don't need", "ei, tb = sys.exc_info() try: super(Sequence, self).__setslice__(len(self) - len(newvals), len(self),", "e) raise ec, ei, tb return _v def popitem(self): item", "Override of dict.setdefault def _setdefault(self, *args): nargs = len(args) if", "enable dynamic function set binding if k == \"__class__\": super(Struct,", "self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__iand__: Failed to recover set data", "None: # run self validation first (container validation) st._validate_self(self) if", "self._dict.__delitem__(k) try: self._gvalidate() except: ec, ei, tb = sys.exc_info() #", "= self.index(y) item = self[idx] super(Sequence, self).remove(item) try: self._gvalidate() except:", "src, dst): if isinstance(src, klass) and isinstance(dst, klass): dst._set_validate_globally_cb(src._gvalidate) return", "self).__iadd__([self._adapt_value(x, index=n+i) for i, x in enumerate(y)]) try: self._gvalidate() except:", "|= y return rv def __ror__(self, y): return self.__or__(y) def", "is None else st._validate_self(rhs)) rv._set_schema_type(self._get_schema_type()) return rv def _adapt_value(self, value,", "__init__(self, *args, **kwargs): TypeBase.__init__(self) dict.__init__(self, *args, **kwargs) def _adapt_key(self, key):", "def copy(self): return self._wrap(self) def update(self, *args, **kwargs): oldvals =", "we reach # the core of the method, tuple is", "__getitem__(self, k): k = self._get_alias(k) return TypeBase.TransferGlobalValidator(self, self._dict.__getitem__(k)) def __setitem__(self,", "% e) raise ec, ei, tb # Override of dict.copy", "Exception, e: print(\"das.types.Struct.__delitem__: Failed to recover struct data (%s)\" %", "return rv def __rmul__(self, n): return self.__mul__(n) def __iadd__(self, y):", "args): TypeBase.__init__(self) set.__init__(self, args) def __iand__(self, y): oldvals = super(Set,", "y): oldvals = super(Set, self).copy() super(Set, self).__isub__(set([self._adapt_value(x, index=i) for i,", "def __isub__(self, y): oldvals = super(Set, self).copy() super(Set, self).__isub__(set([self._adapt_value(x, index=i)", "tb = sys.exc_info() try: super(Set, self).add(item) except Exception, e: print(\"das.types.Set.pop:", "ec, ei, tb # Override of dict.clear def _clear(self): items", "try: if len(args) == 1: a0 = args[0] if hasattr(a0,", "super(Sequence, self).__setslice__(ii, ii, oldvals) except Exception, e: print(\"das.types.Sequence.__setslice__: Failed to", "validation first (container validation) st._validate_self(self) if hasattr(self, \"_is_global_validation_enabled\"): if not", "ei, tb return rv def remove(self, y): idx = self.index(y)", "e: print(\"das.types.Set.__iand__: Failed to recover set data (%s)\" % e)", "= das.get_schema_type_name(st) if n: msg = \"[%s] %s\" % (n,", "try: self.oldstate = self.data._is_global_validation_enabled() self.data._enable_global_validation(False) except: pass return self.data def", "n if ii < 0: if clamp: return 0 else:", "k): k = self._get_alias(k) oldval = self._dict.get(k, None) self._dict.__delitem__(k) try:", "return super(Sequence, self).__contains__(_v) # except: # return False def index(self,", "e) raise ec, ei, tb def __delattr__(self, k): k =", "self._get_alias(k) oldval = self._dict.get(k, None) self._dict.__delitem__(k) try: self._gvalidate() except: ec,", "> 2: raise TypeError(\"_setdefault expected at most 2 arguments, got", "sys.exc_info() ei = das.ValidationError(\"Global Validation Failed (%s)\" % str(ei)) raise", "tb = sys.exc_info() try: if args: super(Sequence, self).insert(self._wrap_index(args[0], n=len(self)+1, clamp=False),", "ec, ei, tb return _v def popitem(self): item = super(Dict,", "forwarding attribute (set __getattr__) return if k2 in self.__dict__: if", "self[:] rv.__iadd__(y) return rv def __setitem__(self, i, y): super(Sequence, self).__setitem__(i,", "ei, tb = sys.exc_info() try: super(Dict, self).__setitem__(item[0], item[1]) except Exception,", "'%s'\" % (k, k)) return getattr(self._dict, k) else: #raise AttributeError(\"'Struct'", "= self.copy() rv ^= y return rv def __cmp__(self, oth):", "def __delitem__(self, k): _k = k k = self._get_alias(k) oldval", "we should not reach here # as dict is actually", "sequence data (%s)\" % e) raise ec, ei, tb #", "item) def __setslice__(self, i, j, y): oldvals = super(Sequence, self).__getslice__(i,", "copy(self): return self._wrap(self) def update(self, *args, **kwargs): oldvals = {}", "getattr(self._dict, k): raise ReservedNameError(k) else: msg = \"[das] %s's '%s(...)'", "in super(Sequence, self).__iter__(): yield TypeBase.TransferGlobalValidator(self, item) def __setslice__(self, i, j,", "in use\" % current_version else: fullmsg += \", no version", "tb return item def clear(self): items = super(Dict, self).items() super(Dict,", "is None: schema_type = self._get_schema_type() if schema_type is not None:", "except: ec, ei, tb = sys.exc_info() try: super(Sequence, self).pop() except", "remvals = set() if len(args) == 1: a0 = args[0]", "def _popitem(self): k, v = self._dict.popitem() try: self._gvalidate() except: ec,", "raise ec, ei, tb # Override of dict.clear def _clear(self):", "most 1 arguments, got %d\" % len(args)) oldvals = self._dict.copy()", "return getattr(self._dict, k) else: #raise AttributeError(\"'Struct' has no attribute '%s'", "v in self._dict.iteritems(): yield k, TypeBase.TransferGlobalValidator(self, v) def _items(self): return", "oldval) else: del(self[k]) except Exception, e: print(\"das.types.Dict.__setitem__: Failed to recover", "(%s)\" % e) raise ec, ei, tb return item def", "self._gvalidate() except: ec, ei, tb = sys.exc_info() # Note: we", "update(self, *args, **kwargs): oldvals = {} remvals = set() if", "= self.copy() rv ^= y return rv def __rxor__(self, y):", "self validation first (container validation) st._validate_self(self) if hasattr(self, \"_is_global_validation_enabled\"): if", "k, v in a0: k = self._adapt_key(k) if k in", "= self._dict.items() self._dict.clear() try: self._gvalidate() except: ec, ei, tb =", "clamp=False), rv) else: super(Sequence, self).append(rv) except Exception, e: print(\"das.types.Sequence.pop: Failed", "a reserved name\" % name) class VersionError(Exception): def __init__(self, msg=None,", "_adapt_key(self, key): st = self._get_schema_type() return (key if st is", "return aliasname return k def _check_reserved(self, k): if hasattr(self.__class__, k):", "[]) except Exception, e: print(\"das.types.Sequence.__imul__: Failed to recover sequence data", "@classmethod def TransferGlobalValidator(klass, src, dst): if isinstance(src, klass) and isinstance(dst,", "= self._adapt_value(v, key=k) elif len(args) > 1: raise Exception(\"update expected", "(%s)\" % e) raise ec, ei, tb def copy(self): return", "**kwargs): TypeBase.__init__(self) dict.__init__(self, *args, **kwargs) def _adapt_key(self, key): st =", "else: raise IndexError(\"list index out of range\") else: return ii", "try: self._dict[k] = v except Exception, e: print(\"das.types.Struct.popitem: Failed to", "raise TypeError(\"_setdefault expected at most 2 arguments, got %d\" %", "self._dict.get(k, None) retval = self._dict.pop(k, *args) try: self._gvalidate() except: ec,", "self[idx] super(Sequence, self).remove(item) try: self._gvalidate() except: ec, ei, tb =", "y return rv def __rand__(self, y): return self.__and__(y) def __isub__(self,", "e) raise ec, ei, tb def __delitem__(self, k): _k =", "current_version else: fullmsg += \", no version info\" if msg:", "def __le__(self, oth): return self._dict.__le__(oth._dict if isinstance(oth, Struct) else oth)", "%d\" % (len(self), len(self) + len(y))) def __getitem__(self, i): return", "popitem(self): item = super(Dict, self).popitem() try: self._gvalidate() except: ec, ei,", "% e) raise ec, ei, tb # def __contains__(self, k):", "print(\"das.types.Sequence.__setslice__: Failed to recover sequence data (%s)\" % e) raise", "nargs = len(args) if nargs > 2: raise TypeError(\"_setdefault expected", "def _wrap(self, rhs): st = self._get_schema_type() rv = self.__class__(rhs if", "if nargs == 2: args = (args[0], self._adapt_value(args[1], key=args[0])) super(Dict,", "\" v\" + fullmsg else: fullmsg = \"V\" + fullmsg", "except Exception, e: print(\"das.types.Sequence.pop: Failed to recover sequence data (%s)\"", "else: for k, v in a0: k = self._get_alias(k) self._check_reserved(k)", "tb = sys.exc_info() try: super(Sequence, self).__setslice__(oldlen, len(self), []) except Exception,", "return self._dict.__repr__() # Override of dict.has_key def _has_key(self, k): return", "rv def __ror__(self, y): return self.__or__(y) def __ixor__(self, y): oldvals", "except Exception, e: print(\"das.types.Sequence.append: Failed to recover sequence data (%s)\"", "print(\"das.types.Struct.pop: Failed to recover struct data (%s)\" % e) raise", "retval # Override of dict.popitem def _popitem(self): k, v =", "GlobalValidationDisabled(object): def __init__(self, data): super(GlobalValidationDisabled, self).__init__() self.data = data self.oldstate", "self.data._enable_global_validation(False) except: pass return self.data def __exit__(self, type, value, traceback):", "= sys.exc_info() try: for k in remvals: super(Dict, self).__delitem__(k) for", "item in lst: if item in self: continue super(Set, self).add(item)", "Exception, e: print(\"das.types.Dict.update: Failed to recover dict data (%s)\" %", "try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try: #", "_get_alias(self, k): st = self._get_schema_type() if st is not None", "= self._adapt_value(a0[k], key=k) else: for k, v in a0: k", "__eq__(self, oth): return self._dict.__eq__(oth._dict if isinstance(oth, Struct) else oth) def", "TransferGlobalValidator(klass, src, dst): if isinstance(src, klass) and isinstance(dst, klass): dst._set_validate_globally_cb(src._gvalidate)", "item in super(Sequence, self).__iter__(): yield TypeBase.TransferGlobalValidator(self, item) def __setslice__(self, i,", "dict data (%s)\" % e) raise ec, ei, tb #", "if schema_type is not None: schema_type.validate(self) self._set_schema_type(schema_type) def _gvalidate(self): st", "key=key, index=index) def _validate(self, schema_type=None): if schema_type is None: schema_type", "not defined but a default value is provided, we should", "dict class '%s'\" % (k, k)) return getattr(self._dict, k) else:", "global validaton return gvcb = self._get_validate_globally_cb() if gvcb is not", "x in self.itervalues()] def _iteritems(self): for k, v in self._dict.iteritems():", "else: fullmsg += \", no version info\" if msg: fullmsg", "_set_schema_type(self, schema_type): self.__dict__[\"_schema_type\"] = schema_type def _get_validate_globally_cb(self): return self.__dict__[\"_validate_globally_cb\"] def", "(%s)\" % e) raise ec, ei, tb def __delitem__(self, k):", "ec, ei, tb return self def __mul__(self, n): rv =", "return self def __and__(self, y): rv = self.copy() rv &=", "def _gvalidate(self): st = self._get_schema_type() if st is not None:", "super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__iand__: Failed to recover set", "print(\"das.types.Set.add: Failed to recover set data (%s)\" % e) raise", "st._validate_self(rhs)) rv._set_schema_type(self._get_schema_type()) return rv def _adapt_value(self, value, key=None, index=None): return", "core of the method, tuple is already created # Maybe", "% e) raise ec, ei, tb def pop(self, k, *args):", "= msg + \" v\" + fullmsg else: fullmsg =", "self).__getitem__(i)) class Sequence(TypeBase, list): def __init__(self, *args): TypeBase.__init__(self) list.__init__(self, *args)", "(%s)\" % e) raise ec, ei, tb def _get_alias(self, k):", "#raise AttributeError(\"'Struct' has no attribute '%s' (dict %s)\" % (k,", "self).__getslice__(i, j) super(Sequence, self).__delslice__(i, j) try: self._gvalidate() except: ec, ei,", "ei, tb return self def __mul__(self, n): rv = self[:]", "(k in self._dict) oldval = (self._dict[k] if wasset else None)", "at the time we reach # the core of the", "= self.copy() rv &= y return rv def __rand__(self, y):", "ReservedNameError(k) else: msg = \"[das] %s's '%s(...)' method conflicts with", "self.oldstate = None # Always re-raise exception return False class", "data field '%s', use '_%s(...)' to call it instead\" %", "in a0.keys(): k = self._get_alias(k) self._check_reserved(k) self._dict[k] = self._adapt_value(a0[k], key=k)", "k = self._adapt_key(k) if k in self: oldvals[k] = self[k]", "ei, tb = sys.exc_info() try: super(Set, self).__ior__(oldvals) except Exception, e:", "tuple of size %d, got %d\" % (len(self), len(self) +", "recover sequence data (%s)\" % e) raise ec, ei, tb", "item in added: super(Set, self).remove(item) except Exception, e: print(\"das.types.Set.update: Failed", "k2): # don't need to create forwarding attribute (set __getattr__)", "@classmethod def ValidateGlobally(klass, inst): if isinstance(inst, klass): inst._gvalidate() return inst", "intersection(self, rhs): return self.__and__(rhs) def symmetric_difference(self, rhs): return self.__xor__(rhs) class", "(key if st is None else das.adapt_value(key, schema_type=st.ktype)) def __setitem__(self,", "_has_key(self, k): return self._dict.has_key(self._get_alias(k)) # Override of dict.pop def _pop(self,", "e: print(\"das.types.Struct.__delitem__: Failed to recover struct data (%s)\" % e)", "[]) except Exception, e: print(\"das.types.Sequence.extend: Failed to recover sequence data", "isinstance(dst, klass): dst._set_validate_globally_cb(src._gvalidate) return dst @classmethod def ValidateGlobally(klass, inst): if", "valid key (otherwise __delitem__(k) would fail) try: self._dict[k] = oldval", "def __add__(self, y): rv = self[:] rv.__iadd__(y) return rv def", "for item in added: super(Set, self).remove(item) except Exception, e: print(\"das.types.Set.update:", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: super(Set, self).clear()", "ei, tb = sys.exc_info() # Note: we can reach here", "tb return self def __sub__(self, y): rv = self.copy() rv", "das.print_once(message) return aliasname return k def _check_reserved(self, k): if hasattr(self.__class__,", "y): oldvals = super(Set, self).copy() super(Set, self).__ixor__(set([self._adapt_value(x, index=i) for i,", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: super(Set, self).__ior__(oldvals)", "k2) else: #print(\"Forward '%s' to dict class '%s'\" % (k,", "to %s class '%s'\" % (k, self.__class__.__name__, k2)) return getattr(self,", "= self._get_schema_type() return (key if st is None else das.adapt_value(key,", "in a0: k = self._get_alias(k) self._check_reserved(k) self._dict[k] = self._adapt_value(v, key=k)", "traceback class ReservedNameError(Exception): def __init__(self, name): super(ReservedNameError, self).__init__(\"'%s' is a", "return self.__sub__(y) def __ior__(self, y): oldvals = super(Set, self).copy() super(Set,", "*args) try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "use %s instead\" % (repr(k), repr(aliasname))) # das.print_once(message) return aliasname", "return getattr(self, k2) else: #print(\"Forward '%s' to dict class '%s'\"", "ei, tb class Set(TypeBase, set): def __init__(self, args): TypeBase.__init__(self) set.__init__(self,", "= {} self._update(*args, **kwargs) def __getattr__(self, k): try: k =", "yield TypeBase.TransferGlobalValidator(self, item) def clear(self): oldvals = super(Set, self).copy() super(Set,", "Struct) else oth) def __lt__(self, oth): return self._dict.__lt__(oth._dict if isinstance(oth,", "Look for an override method of the same name prefixed", "i): ii = self._wrap_index(i, clamp=False) item = super(Sequence, self).__getitem__(ii) super(Sequence,", "n=len(self)-1, clamp=True)) except Exception, e: print(\"das.types.Sequence.insert: Failed to recover sequence", "def __iand__(self, y): oldvals = super(Set, self).copy() super(Set, self).__iand__(set([self._adapt_value(x, index=i)", "v in self._dict.itervalues(): yield TypeBase.TransferGlobalValidator(self, v) def _values(self): return [x", "= super(Set, self).copy() super(Set, self).__isub__(set([self._adapt_value(x, index=i) for i, x in", "das.schematypes.Alias.Name(st[k]) if aliasname is not None: # if isinstance(st[k], das.schematypes.Deprecated):", "e: print(\"das.types.Dict.clear: Failed to recover dict data (%s)\" % e)", "ec, ei, tb return self def __xor__(self, y): rv =", "rv def __rmul__(self, n): return self.__mul__(n) def __iadd__(self, y): n", "% e) raise ec, ei, tb def append(self, y): n", "print(\"das.types.Sequence.remove: Failed to recover sequence data (%s)\" % e) raise", "ec, ei, tb return self def __and__(self, y): rv =", "tb return _v def popitem(self): item = super(Dict, self).popitem() try:", "self._dict.__le__(oth._dict if isinstance(oth, Struct) else oth) def __gt__(self, oth): return", "None: schema_type = self._get_schema_type() if schema_type is not None: schema_type.validate(self)", "self).__setslice__(n, len(self), []) except Exception, e: print(\"das.types.Sequence.__iadd__: Failed to recover", "(%s)\" % e) raise ec, ei, tb class Set(TypeBase, set):", "in a0: k = self._adapt_key(k) if k in self: oldvals[k]", "k, v): # Special case for __class__ member that we", "try: super(Set, self).remove(ae) except Exception, e: print(\"das.types.Set.add: Failed to recover", "(%s)\" % str(ei)) raise ei.__class__, ei, tb def _get_schema_type(self): return", "= (args[0], self._adapt_value(args[1], key=args[0])) super(Dict, self).setdefault(*args) def copy(self): return self._wrap(self)", "return False class TypeBase(object): @classmethod def TransferGlobalValidator(klass, src, dst): if", "n=len(self)+1, clamp=False), rv) else: super(Sequence, self).append(rv) except Exception, e: print(\"das.types.Sequence.pop:", "class GlobalValidationDisabled(object): def __init__(self, data): super(GlobalValidationDisabled, self).__init__() self.data = data", "self.__class__(rhs if st is None else st._validate_self(rhs)) rv._set_schema_type(self._get_schema_type()) return rv", "key=args[0])) self._dict.setdefault(*args) # Override of dict.update def _update(self, *args, **kwargs):", "__getitem__(self, i): return TypeBase.TransferGlobalValidator(self, super(Sequence, self).__getitem__(i)) def __delitem__(self, i): ii", "self).pop(*args) try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "(k in self) oldval = (self[k] if wasset else None)", "i def __imul__(self, n): oldlen = len(self) super(Sequence, self).__imul__(n) try:", "tb = sys.exc_info() try: super(Set, self).clear() super(Set, self).__ior__(oldvals) except Exception,", "*args): rv = super(Sequence, self).pop(*args) try: self._gvalidate() except: ec, ei,", "TypeError(\"_setdefault expected at most 2 arguments, got %d\" % nargs)", "super(Set, self).clear() super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__iand__: Failed to", "return self.data def __exit__(self, type, value, traceback): if self.oldstate is", "sys.exc_info() try: # if _k i not defined but a", "__contains__(self, k): return self._dict.__contains__(self._get_alias(k)) def __cmp__(self, oth): return self._dict.__cmp__(oth._dict if", "tb def __iter__(self): for item in super(Sequence, self).__iter__(): yield TypeBase.TransferGlobalValidator(self,", "try: self._dict[k] = oldval except Exception, e: print(\"das.types.Struct.pop: Failed to", "self).__imul__(n) try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "type, value, traceback): if self.oldstate is not None: self.data._enable_global_validation(self.oldstate) self.oldstate", "None: n = len(self) ii = i + n if", "ec, ei, tb def pop(self, *args): rv = super(Sequence, self).pop(*args)", "in super(Dict, self).iteritems(): yield k, TypeBase.TransferGlobalValidator(self, v) def items(self): return", "(%s)\" % e) raise ec, ei, tb def pop(self, k,", "not None: gvcb() if hasattr(self, \"_validate_globally\"): try: getattr(self, \"_validate_globally\")() except:", "created # Maybe because tuple is immutable? super(Tuple, self).__init__() def", "def setdefault(self, *args): nargs = len(args) if nargs > 2:", "e) raise ec, ei, tb def copy(self): return self._wrap(self) def", "rv def __rsub__(self, y): return self.__sub__(y) def __ior__(self, y): oldvals", "tb def copy(self): return self._wrap(self) def add(self, e): ae =", "super(Sequence, self).__contains__(_v) # except: # return False def index(self, y):", "on): self.__dict__[\"_global_validation_enabled\"] = on class Tuple(TypeBase, tuple): def __init__(self, *args):", "dict.has_key def _has_key(self, k): return self._dict.has_key(self._get_alias(k)) # Override of dict.pop", "ei, tb = sys.exc_info() try: self._dict[k] = oldval except Exception,", "print(\"das.types.Sequence.pop: Failed to recover sequence data (%s)\" % e) raise", "raise ec, ei, tb return item def difference(self, rhs): return", "dict.clear def _clear(self): items = self._dict.items() self._dict.clear() try: self._gvalidate() except:", "[self._adapt_value(x, index=i+k) for k, x in enumerate(y)] super(Sequence, self).__setslice__(i, j,", "__ror__(self, y): return self.__or__(y) def __ixor__(self, y): oldvals = super(Set,", "self.copy() rv ^= y return rv def __cmp__(self, oth): #", "in super(Set, self).__iter__(): yield TypeBase.TransferGlobalValidator(self, item) def clear(self): oldvals =", "requirements\" if current_version: fullmsg += \", %s in use\" %", "TypeBase.TransferGlobalValidator(self, v) def values(self): return [x for x in self.itervalues()]", "tb def _get_schema_type(self): return self.__dict__[\"_schema_type\"] def _set_schema_type(self, schema_type): self.__dict__[\"_schema_type\"] =", "st = self._get_schema_type() if st is not None: n =", "_wrap(self, rhs): st = self._get_schema_type() rv = self.__class__(rhs if st", "here, but at the time we reach # the core", "remove(self, y): idx = self.index(y) item = self[idx] super(Sequence, self).remove(item)", "fullmsg super(VersionError, self).__init__(fullmsg) class GlobalValidationDisabled(object): def __init__(self, data): super(GlobalValidationDisabled, self).__init__()", "return self.__and__(rhs) def symmetric_difference(self, rhs): return self.__xor__(rhs) class Dict(TypeBase, dict):", "in self) oldval = (self[k] if wasset else None) super(Dict,", "immutable? super(Tuple, self).__init__() def __add__(self, y): raise das.ValidationError(\"Expected a tuple", "self._adapt_key(k) _v = super(Dict, self).__getitem__(_k) super(Dict, self).__delitem__(_k) try: self._gvalidate() except:", "except Exception, e: print(\"das.types.Dict.popitem: Failed to recover dict data (%s)\"", "len(newvals), len(self), []) except Exception, e: print(\"das.types.Sequence.extend: Failed to recover", "{} self._update(*args, **kwargs) def __getattr__(self, k): try: k = self._get_alias(k)", "k): _k = k k = self._get_alias(k) oldval = self._dict.get(k,", "else: for k, v in a0: k = self._adapt_key(k) if", "== 2: args = (args[0], self._adapt_value(args[1], key=args[0])) super(Dict, self).setdefault(*args) def", "a0: k = self._adapt_key(k) if k in self: oldvals[k] =", "self._dict.__len__() def __str__(self): return self._dict.__str__() def __repr__(self): return self._dict.__repr__() #", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: super(Sequence, self).__setslice__(len(self)", "key=k) elif len(args) > 1: raise Exception(\"update expected at most", "self._wrap(self) def add(self, e): ae = self._adapt_value(e, index=len(self)) if ae", "k, v in oldvals.iteritems(): super(Dict, self).__setitem__(k, v) except Exception, e:", "need to declare *args here, but at the time we", "if k2 in self.__dict__: if self.__dict__[k2] != getattr(self._dict, k): raise", "st._validate_self(self) if hasattr(self, \"_is_global_validation_enabled\"): if not self._is_global_validation_enabled(): # Skip global", "class Set(TypeBase, set): def __init__(self, args): TypeBase.__init__(self) set.__init__(self, args) def", "rv) else: super(Sequence, self).append(rv) except Exception, e: print(\"das.types.Sequence.pop: Failed to", "for v in super(Dict, self).itervalues(): yield TypeBase.TransferGlobalValidator(self, v) def values(self):", "#print(\"Forward '%s' to dict class '%s'\" % (k, k)) return", "data (%s)\" % e) raise ec, ei, tb def itervalues(self):", "= self._wrap_index(i, clamp=True) super(Sequence, self).__setslice__(ii, ii, oldvals) except Exception, e:", "super(Sequence, self).insert(i, self._adapt_value(y, index=i)) try: self._gvalidate() except: ec, ei, tb", "v) def _values(self): return [x for x in self.itervalues()] def", "def ordered_keys(self): return filter(lambda x: x in self, self._get_schema_type().ordered_keys()) def", "super(Set, self).add(ae) try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "args) def __iand__(self, y): oldvals = super(Set, self).copy() super(Set, self).__iand__(set([self._adapt_value(x,", "for k, v in self._dict.iteritems(): yield k, TypeBase.TransferGlobalValidator(self, v) def", "Struct) else oth) def __eq__(self, oth): return self._dict.__eq__(oth._dict if isinstance(oth,", "len(args) if nargs > 2: raise TypeError(\"_setdefault expected at most", "super(Dict, self).__setitem__(k, self._adapt_value(v, key=k)) try: self._gvalidate() except: ec, ei, tb", "try: super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.clear: Failed to recover", "self).insert(idx, item) except Exception, e: print(\"das.types.Sequence.remove: Failed to recover sequence", "self._dict.__lt__(oth._dict if isinstance(oth, Struct) else oth) def __iter__(self): return self._dict.__iter__()", "current_version: fullmsg += \", %s in use\" % current_version else:", "self[:] rv.__imul__(n) return rv def __rmul__(self, n): return self.__mul__(n) def", "del(self._dict[k]) except Exception, e: print(\"das.types.Struct.__setitem__: Failed to recover struct data", "e: print(\"das.types.Sequence.__iadd__: Failed to recover sequence data (%s)\" % e)", "y return rv def __cmp__(self, oth): # base set class", "return self.__and__(y) def __isub__(self, y): oldvals = super(Set, self).copy() super(Set,", "n): oldlen = len(self) super(Sequence, self).__imul__(n) try: self._gvalidate() except: ec,", "{} remvals = set() if len(args) == 1: a0 =", "for k, v in a0: k = self._adapt_key(k) if k", "item = super(Set, self).pop() try: self._gvalidate() except: ec, ei, tb", "Exception, e: print(\"das.types.Dict.popitem: Failed to recover dict data (%s)\" %", "schema_type is None: schema_type = self._get_schema_type() if schema_type is not", "if nargs > 2: raise TypeError(\"setdefault expected at most 2", "tb = sys.exc_info() try: for k in remvals: super(Dict, self).__delitem__(k)", "__ge__(self, oth): return self._dict.__ge__(oth._dict if isinstance(oth, Struct) else oth) def", "set.__init__(self, args) def __iand__(self, y): oldvals = super(Set, self).copy() super(Set,", "of dict.update def _update(self, *args, **kwargs): if len(args) > 1:", "ValidateGlobally(klass, inst): if isinstance(inst, klass): inst._gvalidate() return inst def __init__(self,", "(k, self.__class__.__name__, k2)) return getattr(self, k2) else: #print(\"Forward '%s' to", "_update(self, *args, **kwargs): if len(args) > 1: raise Exception(\"update expected", "self).copy() super(Set, self).__ixor__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)])) try:", "import traceback class ReservedNameError(Exception): def __init__(self, name): super(ReservedNameError, self).__init__(\"'%s' is", "is deprecated, use %s instead\" % (repr(k), repr(aliasname))) # das.print_once(message)", "super(Tuple, self).__init__() def __add__(self, y): raise das.ValidationError(\"Expected a tuple of", "def intersection(self, rhs): return self.__and__(rhs) def symmetric_difference(self, rhs): return self.__xor__(rhs)", "super(Set, self).__ior__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)])) try: self._gvalidate()", "% e) raise ec, ei, tb def pop(self, *args): rv", "rhs): return self.__xor__(rhs) class Dict(TypeBase, dict): def __init__(self, *args, **kwargs):", "raise ec, ei, tb def _get_alias(self, k): st = self._get_schema_type()", "ii else: return i def __imul__(self, n): oldlen = len(self)", "(%s)\" % e) raise ec, ei, tb def __getitem__(self, k):", "def insert(self, i, y): super(Sequence, self).insert(i, self._adapt_value(y, index=i)) try: self._gvalidate()", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: super(Sequence, self).pop()", "Exception, e: print(\"das.types.Sequence.pop: Failed to recover sequence data (%s)\" %", "except: pass return self.data def __exit__(self, type, value, traceback): if", "nargs >= 1: self._check_reserved(args[0]) if nargs == 2: args =", "e) raise ec, ei, tb return rv def remove(self, y):", "of dict.copy def _copy(self): return self._wrap(self) # Override of dict.setdefault", "x in enumerate(y)] super(Sequence, self).extend(newvals) try: self._gvalidate() except: ec, ei,", "isinstance(oth, Struct) else oth) def __le__(self, oth): return self._dict.__le__(oth._dict if", "oldvals = super(Sequence, self).__getslice__(i, j) super(Sequence, self).__delslice__(i, j) try: self._gvalidate()", "print(\"das.types.Set.__isub__: Failed to recover set data (%s)\" % e) raise", "super(Struct, self).__setattr__(k, v) else: k = self._get_alias(k) self._check_reserved(k) wasset =", "try: k = self._get_alias(k) return TypeBase.TransferGlobalValidator(self, self._dict[k]) except KeyError: if", "(%s)\" % e) raise ec, ei, tb def __delattr__(self, k):", "ii = self._wrap_index(i, clamp=True) super(Sequence, self).__setslice__(ii, ii+len(newvals), oldvals) except Exception,", "remvals.add(k) self[k] = self._adapt_value(v, key=k) elif len(args) > 1: raise", "self).insert(self._wrap_index(args[0], n=len(self)+1, clamp=False), rv) else: super(Sequence, self).append(rv) except Exception, e:", "y): idx = self.index(y) item = self[idx] super(Sequence, self).remove(item) try:", "None: gvcb() if hasattr(self, \"_validate_globally\"): try: getattr(self, \"_validate_globally\")() except: _,", "__getitem__(self, i): return TypeBase.TransferGlobalValidator(self, super(Tuple, self).__getitem__(i)) class Sequence(TypeBase, list): def", "k2)) return getattr(self, k2) else: #print(\"Forward '%s' to dict class", "oldval except Exception, e: print(\"das.types.Struct.__delitem__: Failed to recover struct data", "ec, ei, tb = sys.exc_info() try: if args: super(Sequence, self).insert(self._wrap_index(args[0],", "def _pop(self, k, *args): _k = k k = self._get_alias(k)", "raise ec, ei, tb class Set(TypeBase, set): def __init__(self, args):", "super(Set, self).clear() try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "= self._get_validate_globally_cb() if gvcb is not None: gvcb() if hasattr(self,", "n): return self.__mul__(n) def __iadd__(self, y): n = len(self) super(Sequence,", "y): return self.__and__(y) def __isub__(self, y): oldvals = super(Set, self).copy()", "return k def _check_reserved(self, k): if hasattr(self.__class__, k): raise ReservedNameError(k)", "super(Set, self).add(item) except Exception, e: print(\"das.types.Set.pop: Failed to recover set", "except Exception, e: print(\"das.types.Sequence.__delitem__: Failed to recover sequence data (%s)\"", "for an override method of the same name prefixed by", "traceback): if self.oldstate is not None: self.data._enable_global_validation(self.oldstate) self.oldstate = None", "= self._get_alias(k) self._check_reserved(k) wasset = (k in self._dict) oldval =", "Exception, e: print(\"das.types.Dict.clear: Failed to recover dict data (%s)\" %", "\": no requirements\" if current_version: fullmsg += \", %s in", "def __gt__(self, oth): return self._dict.__gt__(oth._dict if isinstance(oth, Struct) else oth)", "if hasattr(self, \"_is_global_validation_enabled\"): if not self._is_global_validation_enabled(): # Skip global validaton", "ei, tb def __getitem__(self, k): return TypeBase.TransferGlobalValidator(self, super(Dict, self).__getitem__(self._adapt_key(k))) def", "def values(self): return [x for x in self.itervalues()] def iteritems(self):", "ei, tb = sys.exc_info() try: for item in added: super(Set,", "deprecated, use %s instead\" % (repr(k), repr(aliasname))) # das.print_once(message) return", "self).__init__() def __add__(self, y): raise das.ValidationError(\"Expected a tuple of size", "= (self._dict[k] if wasset else None) self._dict[k] = self._adapt_value(v, key=k)", "self.__dict__[\"_schema_type\"] = None self.__dict__[\"_validate_globally_cb\"] = None self.__dict__[\"_global_validation_enabled\"] = True def", "validaton return gvcb = self._get_validate_globally_cb() if gvcb is not None:", "self._dict.__gt__(oth._dict if isinstance(oth, Struct) else oth) def __lt__(self, oth): return", "k): k = self._get_alias(k) return TypeBase.TransferGlobalValidator(self, self._dict.__getitem__(k)) def __setitem__(self, k,", "hasattr(self._dict, k) else \"hasn't\")) return self.__getattribute__(k) def __setattr__(self, k, v):", "valid key super(Dict, self).__setitem__(_k, _v) except Exception, e: print(\"das.types.Dict.popitem: Failed", "(%s)\" % e) raise ec, ei, tb # def __contains__(self,", "self.__dict__[k2] = getattr(self._dict, k) def ordered_keys(self): return filter(lambda x: x", "return rv def remove(self, y): idx = self.index(y) item =", "das.ValidationError(\"Global Validation Failed (%s)\" % str(ei)) raise ei.__class__, ei, tb", "self._dict[k] = oldval else: del(self._dict[k]) except Exception, e: print(\"das.types.Struct.__setattr__: Failed", "for # to enable dynamic function set binding if k", "_iteritems(self): for k, v in self._dict.iteritems(): yield k, TypeBase.TransferGlobalValidator(self, v)", "clamp: return 0 else: raise IndexError(\"list index out of range\")", "+ fullmsg super(VersionError, self).__init__(fullmsg) class GlobalValidationDisabled(object): def __init__(self, data): super(GlobalValidationDisabled,", "oldval except Exception, e: print(\"das.types.Struct.pop: Failed to recover struct data", "e) raise ec, ei, tb def _get_alias(self, k): st =", "isinstance(oth, Struct) else oth) def __iter__(self): return self._dict.__iter__() def __len__(self):", "y): newvals = [self._adapt_value(x, index=len(self)+i) for i, x in enumerate(y)]", "ec, ei, tb # Override of dict.copy def _copy(self): return", "__lt__(self, oth): return self._dict.__lt__(oth._dict if isinstance(oth, Struct) else oth) def", "raise ec, ei, tb # Override of dict.copy def _copy(self):", "st is None else das.adapt_value(key, schema_type=st.ktype)) def __setitem__(self, k, v):", "self).__setitem__(k, v) except Exception, e: print(\"das.types.Dict.update: Failed to recover dict", "k, *args): _k = k k = self._get_alias(k) oldval =", "return self._dict.__ge__(oth._dict if isinstance(oth, Struct) else oth) def __le__(self, oth):", "method of the same name prefixed by '_' in current", "a tuple of size %d, got %d\" % (len(self), len(self)", "to modify for # to enable dynamic function set binding", "y): oldvals = super(Sequence, self).__getslice__(i, j) newvals = [self._adapt_value(x, index=i+k)", "expected at most 2 arguments, got %d\" % nargs) if", "else None) self._dict.__setitem__(k, self._adapt_value(v, key=k)) try: self._gvalidate() except: ec, ei,", "else None) super(Dict, self).__setitem__(k, self._adapt_value(v, key=k)) try: self._gvalidate() except: ec,", "if hasattr(self._dict, k): # Look for an override method of", "das.adapt_value(key, schema_type=st.ktype)) def __setitem__(self, k, v): k = self._adapt_key(k) wasset", "Override of dict.popitem def _popitem(self): k, v = self._dict.popitem() try:", "return False def index(self, y): return super(Sequence, self).index(self._adapt_value(y, index=0)) def", "super(Dict, self).__getitem__(_k) super(Dict, self).__delitem__(_k) try: self._gvalidate() except: ec, ei, tb", "ei, tb = sys.exc_info() try: # if _k i not", "*args, **kwargs): TypeBase.__init__(self) self.__dict__[\"_dict\"] = {} self._update(*args, **kwargs) def __getattr__(self,", "k) st = self._get_schema_type() if st is not None: n", "= v except Exception, e: print(\"das.types.Struct.popitem: Failed to recover struct", "clamp=False) item = super(Sequence, self).__getitem__(ii) super(Sequence, self).__delitem__(i) try: self._gvalidate() except:", "super(Set, self).copy() super(Set, self).__ixor__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)]))", "klass): inst._gvalidate() return inst def __init__(self, *args): super(TypeBase, self).__init__() self.__dict__[\"_schema_type\"]", "hasattr(a0, \"keys\"): for k in a0.keys(): k = self._adapt_key(k) if", "__setattr__(self, k, v): # Special case for __class__ member that", "self.index(y) item = self[idx] super(Sequence, self).remove(item) try: self._gvalidate() except: ec,", "ei, tb return retval # Override of dict.popitem def _popitem(self):", "ec, ei, tb = sys.exc_info() try: super(Dict, self).update(items) except Exception,", "current class k2 = '_' + k if hasattr(self, k2):", "def __rmul__(self, n): return self.__mul__(n) def __iadd__(self, y): n =", "<= len(oth): return -1 else: return 1 def __iter__(self): for", "ec, ei, tb = sys.exc_info() try: super(Sequence, self).insert(ii, item) except", "same name prefixed by '_' in current class k2 =", "= self._adapt_key(k) if k in self: if not k in", "sys.exc_info() try: ii = self._wrap_index(i, clamp=True) super(Sequence, self).__setslice__(ii, ii, oldvals)", "e: print(\"das.types.Dict.update: Failed to recover dict data (%s)\" % e)", "fullmsg += \", %s in use\" % current_version else: fullmsg", "try: self._dict.clear() self._dict.update(oldvals) except Exception, e: print(\"das.types.Struct.update: Failed to recover", ">= 1: self._check_reserved(args[0]) if nargs == 2: args = (args[0],", "in self: oldvals[k] = self[k] else: remvals.add(k) self[k] = self._adapt_value(a0[k],", "= super(Set, self).copy() super(Set, self).clear() try: self._gvalidate() except: ec, ei,", "self.itervalues()] def iteritems(self): for k, v in super(Dict, self).iteritems(): yield", "if hasattr(self._dict, k) else \"hasn't\")) return self.__getattribute__(k) def __setattr__(self, k,", "rv def __cmp__(self, oth): # base set class doesn't implement", "args: super(Sequence, self).insert(self._wrap_index(args[0], n=len(self)+1, clamp=False), rv) else: super(Sequence, self).append(rv) except", "name) class VersionError(Exception): def __init__(self, msg=None, current_version=None, required_version=None): fullmsg =", "# try: # _v = self._adapt_value(y, index=0) # return super(Sequence,", "index=len(self)+i) for i, x in enumerate(y)] super(Sequence, self).extend(newvals) try: self._gvalidate()", "except: ec, ei, tb = sys.exc_info() try: super(Set, self).add(item) except", "0: if clamp: return 0 else: raise IndexError(\"list index out", "__setitem__(self, k, v): k = self._adapt_key(k) wasset = (k in", "def popitem(self): item = super(Dict, self).popitem() try: self._gvalidate() except: ec,", "oldvals = self._dict.copy() try: if len(args) == 1: a0 =", "self._wrap_index(i, clamp=True) super(Sequence, self).__setslice__(ii, ii, oldvals) except Exception, e: print(\"das.types.Sequence.__setslice__:", "= sys.exc_info() try: ii = self._wrap_index(i, clamp=True) super(Sequence, self).__setslice__(ii, ii+len(newvals),", "def __eq__(self, oth): return self._dict.__eq__(oth._dict if isinstance(oth, Struct) else oth)", "None) retval = self._dict.pop(k, *args) try: self._gvalidate() except: ec, ei,", "return self._dict.__contains__(self._get_alias(k)) def __cmp__(self, oth): return self._dict.__cmp__(oth._dict if isinstance(oth, Struct)", "but we need it for some other purpose if len(self.symmetric_difference(oth))", "= super(Sequence, self).__getslice__(i, j) super(Sequence, self).__delslice__(i, j) try: self._gvalidate() except:", "dict data (%s)\" % e) raise ec, ei, tb def", "except: # return False def index(self, y): return super(Sequence, self).index(self._adapt_value(y,", "has no attribute '%s' (dict %s)\" % (k, \"has\" if", "oldvals) except Exception, e: print(\"das.types.Sequence.__setslice__: Failed to recover sequence data", "if isinstance(inst, klass): inst._gvalidate() return inst def __init__(self, *args): super(TypeBase,", "# Note: we can reach here only if k was", "except: ec, ei, tb = sys.exc_info() try: super(Dict, self).__setitem__(item[0], item[1])", "def __contains__(self, k): return self._dict.__contains__(self._get_alias(k)) def __cmp__(self, oth): return self._dict.__cmp__(oth._dict", "% e) raise ec, ei, tb def extend(self, y): newvals", "len(self) super(Sequence, self).__iadd__([self._adapt_value(x, index=n+i) for i, x in enumerate(y)]) try:", "for k in a0.keys(): k = self._adapt_key(k) if k in", "(%s)\" % e) raise ec, ei, tb def itervalues(self): for", "super(Dict, self).__setitem__(k, oldval) else: del(self[k]) except Exception, e: print(\"das.types.Dict.__setitem__: Failed", "self._dict.__setitem__(k, self._adapt_value(v, key=k)) try: self._gvalidate() except: ec, ei, tb =", "key super(Dict, self).__setitem__(_k, _v) except Exception, e: print(\"das.types.Dict.popitem: Failed to", "+= \", %s in use\" % current_version else: fullmsg +=", "\"hasn't\")) return self.__getattribute__(k) def __setattr__(self, k, v): # Special case", "rv.__iadd__(y) return rv def __setitem__(self, i, y): super(Sequence, self).__setitem__(i, self._adapt_value(y,", "enumerate(y)] for item in lst: if item in self: continue", "self).clear() super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__ior__: Failed to recover", "oth): return self._dict.__le__(oth._dict if isinstance(oth, Struct) else oth) def __gt__(self,", "rhs): st = self._get_schema_type() rv = self.__class__(rhs if st is", "Override of dict.pop def _pop(self, k, *args): _k = k", "oldval = self._dict.get(k, None) self._dict.__delitem__(k) try: self._gvalidate() except: ec, ei,", "x in self, self._get_schema_type().ordered_keys()) def _itervalues(self): for v in self._dict.itervalues():", "2: args = (args[0], self._adapt_value(args[1], key=args[0])) self._dict.setdefault(*args) # Override of", "< 0: if clamp: return 0 else: raise IndexError(\"list index", "raise ec, ei, tb def __iter__(self): for item in super(Sequence,", "key=k) else: for k, v in a0: k = self._adapt_key(k)", "def __imul__(self, n): oldlen = len(self) super(Sequence, self).__imul__(n) try: self._gvalidate()", "k): if hasattr(self.__class__, k): raise ReservedNameError(k) elif hasattr(self._dict, k): k2", "hasattr(self, \"_validate_globally\"): try: getattr(self, \"_validate_globally\")() except: _, ei, tb =", "None self.__dict__[\"_global_validation_enabled\"] = True def _wrap(self, rhs): st = self._get_schema_type()", "set() if len(args) == 1: a0 = args[0] if hasattr(a0,", "rhs): return self.__sub__(rhs) def union(self, rhs): return self.__or__(rhs) def intersection(self,", "*args, **kwargs): TypeBase.__init__(self) dict.__init__(self, *args, **kwargs) def _adapt_key(self, key): st", "tb def pop(self, *args): rv = super(Sequence, self).pop(*args) try: self._gvalidate()", "rv def __setitem__(self, i, y): super(Sequence, self).__setitem__(i, self._adapt_value(y, index=i)) self._gvalidate()", "args = (args[0], self._adapt_value(args[1], key=args[0])) self._dict.setdefault(*args) # Override of dict.update", "def update(self, *args): added = set() for y in args:", "^= y return rv def __rxor__(self, y): rv = self.copy()", "return -1 else: return 1 def __iter__(self): for item in", "return das.adapt_value(value, schema_type=self._get_schema_type(), key=key, index=index) def _validate(self, schema_type=None): if schema_type", "y): n = len(self) super(Sequence, self).append(self._adapt_value(y, index=n)) try: self._gvalidate() except:", "(type(self).__name__, k, k, k) st = self._get_schema_type() if st is", "return gvcb = self._get_validate_globally_cb() if gvcb is not None: gvcb()", "self._get_alias(k) self._check_reserved(k) wasset = (k in self._dict) oldval = (self._dict[k]", "elif len(args) > 1: raise Exception(\"update expected at most 1", "TypeBase.__init__(self) self.__dict__[\"_dict\"] = {} self._update(*args, **kwargs) def __getattr__(self, k): try:", "ec, ei, tb def __getslice__(self, i, j): return self._wrap(super(Sequence, self).__getslice__(i,", "need to check if _k was a valid key super(Dict,", "instead\" % (type(self).__name__, k, k, k) st = self._get_schema_type() if", "self.copy() rv |= y return rv def __ror__(self, y): return", "self._dict.__cmp__(oth._dict if isinstance(oth, Struct) else oth) def __eq__(self, oth): return", "y): return self.__sub__(y) def __ior__(self, y): oldvals = super(Set, self).copy()", "_k was a valid key super(Dict, self).__setitem__(_k, _v) except Exception,", "def add(self, e): ae = self._adapt_value(e, index=len(self)) if ae in", "oldvals[k] = self[k] else: remvals.add(k) self[k] = self._adapt_value(a0[k], key=k) else:", "__delitem__(self, i): ii = self._wrap_index(i, clamp=False) item = super(Sequence, self).__getitem__(ii)", "ei, tb return self def __sub__(self, y): rv = self.copy()", "self._dict.__contains__(self._get_alias(k)) def __cmp__(self, oth): return self._dict.__cmp__(oth._dict if isinstance(oth, Struct) else", "self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__isub__: Failed to recover set data", "self).__contains__(_v) # except: # return False def index(self, y): return", "modify for # to enable dynamic function set binding if", "self).__setitem__(_k, _v) except Exception, e: print(\"das.types.Dict.popitem: Failed to recover dict", "index(self, y): return super(Sequence, self).index(self._adapt_value(y, index=0)) def insert(self, i, y):", "def iteritems(self): for k, v in super(Dict, self).iteritems(): yield k,", "% current_version else: fullmsg += \", no version info\" if", "j) try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "= self._get_schema_type() if st is not None and st.has_key(k): aliasname", "try: super(Sequence, self).insert(ii, item) except Exception, e: print(\"das.types.Sequence.__delitem__: Failed to", "else: fullmsg += \": no requirements\" if current_version: fullmsg +=", "= sys.exc_info() try: super(Sequence, self).insert(idx, item) except Exception, e: print(\"das.types.Sequence.remove:", "ec, ei, tb = sys.exc_info() try: super(Set, self).add(item) except Exception,", "oldval else: del(self._dict[k]) except Exception, e: print(\"das.types.Struct.__setitem__: Failed to recover", "len(self.symmetric_difference(oth)) == 0: return 0 elif len(self) <= len(oth): return", "if len(args) == 1: a0 = args[0] if hasattr(a0, \"keys\"):", "kwargs.iteritems(): k = self._get_alias(k) self._check_reserved(k) self._dict[k] = self._adapt_value(v, key=k) self._gvalidate()", "j): return self._wrap(super(Sequence, self).__getslice__(i, j)) def __delslice__(self, i, j): oldvals", "% e) raise ec, ei, tb def _get_alias(self, k): st", "= sys.exc_info() try: for item in added: super(Set, self).remove(item) except", "v in oldvals.iteritems(): super(Dict, self).__setitem__(k, v) except Exception, e: print(\"das.types.Dict.update:", "self._dict.__getitem__(k)) def __setitem__(self, k, v): k = self._get_alias(k) self._check_reserved(k) wasset", "sys.exc_info() try: for item in added: super(Set, self).remove(item) except Exception,", "super(Set, self).__isub__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)])) try: self._gvalidate()", "key=k)) try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "getattr(self, k2) else: #print(\"Forward '%s' to dict class '%s'\" %", "__and__(self, y): rv = self.copy() rv &= y return rv", "ei, tb return self def __add__(self, y): rv = self[:]", "to dict class '%s'\" % (k, k)) return getattr(self._dict, k)", "TypeBase(object): @classmethod def TransferGlobalValidator(klass, src, dst): if isinstance(src, klass) and", "= self._wrap_index(i, clamp=True) super(Sequence, self).__setslice__(ii, ii+len(newvals), oldvals) except Exception, e:", "'_' + k if hasattr(self, k2): #print(\"Forward '%s' to %s", "Exception, e: print(\"das.types.Set.__isub__: Failed to recover set data (%s)\" %", "print(\"das.types.Struct.clear: Failed to recover struct data (%s)\" % e) raise", "k, v in kwargs.iteritems(): k = self._get_alias(k) self._check_reserved(k) self._dict[k] =", "self._dict.setdefault(*args) # Override of dict.update def _update(self, *args, **kwargs): if", "nargs == 2: args = (args[0], self._adapt_value(args[1], key=args[0])) self._dict.setdefault(*args) #", "e: print(\"das.types.Sequence.__delitem__: Failed to recover sequence data (%s)\" % e)", "= self._get_schema_type() if st is not None: # run self", "def __getitem__(self, k): return TypeBase.TransferGlobalValidator(self, super(Dict, self).__getitem__(self._adapt_key(k))) def __delitem__(self, k):", "some other purpose if len(self.symmetric_difference(oth)) == 0: return 0 elif", "tb = sys.exc_info() try: super(Sequence, self).insert(ii, item) except Exception, e:", "tb = sys.exc_info() try: super(Sequence, self).__setslice__(len(self) - len(newvals), len(self), [])", "= True def _wrap(self, rhs): st = self._get_schema_type() rv =", "_k = k k = self._get_alias(k) oldval = self._dict.get(k, None)", "oth): return self._dict.__eq__(oth._dict if isinstance(oth, Struct) else oth) def __ge__(self,", "ei, tb return self def __and__(self, y): rv = self.copy()", "try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try: self._dict[k]", "ei, tb def __contains__(self, k): return self._dict.__contains__(self._get_alias(k)) def __cmp__(self, oth):", "set() for y in args: lst = [self._adapt_value(x, index=i) for", "TypeBase.TransferGlobalValidator(self, v) def items(self): return [x for x in self.iteritems()]", "oldvals = super(Sequence, self).__getslice__(i, j) newvals = [self._adapt_value(x, index=i+k) for", "+ len(y))) def __getitem__(self, i): return TypeBase.TransferGlobalValidator(self, super(Tuple, self).__getitem__(i)) class", "__delitem__(self, k): _k = k k = self._get_alias(k) oldval =", "k in a0.keys(): k = self._adapt_key(k) if k in self:", "self).add(item) added.add(item) try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "[self._adapt_value(x, index=i) for i, x in enumerate(y)] for item in", "ei, tb return _v def popitem(self): item = super(Dict, self).popitem()", "inst def __init__(self, *args): super(TypeBase, self).__init__() self.__dict__[\"_schema_type\"] = None self.__dict__[\"_validate_globally_cb\"]", "super(GlobalValidationDisabled, self).__init__() self.data = data self.oldstate = None def __enter__(self):", "ec, ei, tb = sys.exc_info() try: for item in added:", "self[k] = self._adapt_value(v, key=k) elif len(args) > 1: raise Exception(\"update", "ei, tb = sys.exc_info() try: super(Set, self).remove(ae) except Exception, e:", "reserved name\" % name) class VersionError(Exception): def __init__(self, msg=None, current_version=None,", "ec, ei, tb = sys.exc_info() try: super(Set, self).__ior__(oldvals) except Exception,", "(%s)\" % e) raise ec, ei, tb def append(self, y):", "def __init__(self, args): TypeBase.__init__(self) set.__init__(self, args) def __iand__(self, y): oldvals", "dict.copy def _copy(self): return self._wrap(self) # Override of dict.setdefault def", "None and st.has_key(k): aliasname = das.schematypes.Alias.Name(st[k]) if aliasname is not", "= (k in self) oldval = (self[k] if wasset else", "(args[0], self._adapt_value(args[1], key=args[0])) super(Dict, self).setdefault(*args) def copy(self): return self._wrap(self) def", "k = self._get_alias(k) return TypeBase.TransferGlobalValidator(self, self._dict.__getitem__(k)) def __setitem__(self, k, v):", "= self[idx] super(Sequence, self).remove(item) try: self._gvalidate() except: ec, ei, tb", "\"keys\"): for k in a0.keys(): k = self._get_alias(k) self._check_reserved(k) self._dict[k]", "super(Set, self).pop() try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "self).__delitem__(i) try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "self._check_reserved(k) self._dict[k] = self._adapt_value(v, key=k) for k, v in kwargs.iteritems():", "return rv def __rsub__(self, y): return self.__sub__(y) def __ior__(self, y):", "of the method, tuple is already created # Maybe because", "_v = super(Dict, self).pop(_k, *args) try: self._gvalidate() except: ec, ei,", "*args): nargs = len(args) if nargs > 2: raise TypeError(\"setdefault", "ei, tb def __iter__(self): for item in super(Sequence, self).__iter__(): yield", "version info\" if msg: fullmsg = msg + \" v\"", "*args here, but at the time we reach # the", "in self: oldvals[k] = self[k] else: remvals.add(k) self[k] = self._adapt_value(v,", "ei, tb = sys.exc_info() try: super(Sequence, self).__setslice__(oldlen, len(self), []) except", "in lst: if item in self: continue super(Set, self).add(item) added.add(item)", "remvals.add(k) self[k] = self._adapt_value(a0[k], key=k) else: for k, v in", "self).__setitem__(item[0], item[1]) except Exception, e: print(\"das.types.Dict.popitem: Failed to recover dict", "is already created # Maybe because tuple is immutable? super(Tuple,", "= oldval else: del(self._dict[k]) except Exception, e: print(\"das.types.Struct.__setitem__: Failed to", "__delitem__(k) would fail) try: self._dict[k] = oldval except Exception, e:", "self.copy() rv &= y return rv def __rand__(self, y): return", "ec, ei, tb = sys.exc_info() try: super(Sequence, self).__setslice__(len(self) - len(newvals),", "in self._dict) oldval = (self._dict[k] if wasset else None) self._dict[k]", "e) raise ec, ei, tb def __iter__(self): for item in", "print(\"das.types.Dict.update: Failed to recover dict data (%s)\" % e) raise", "wasset else None) self._dict[k] = self._adapt_value(v, key=k) try: self._gvalidate() except:", "__delslice__(self, i, j): oldvals = super(Sequence, self).__getslice__(i, j) super(Sequence, self).__delslice__(i,", "self.__dict__[\"_schema_type\"] = schema_type def _get_validate_globally_cb(self): return self.__dict__[\"_validate_globally_cb\"] def _set_validate_globally_cb(self, cb):", "k k = self._get_alias(k) oldval = self._dict.get(k, None) retval =", "super(Sequence, self).__setslice__(i, j, newvals) try: self._gvalidate() except: ec, ei, tb", "fullmsg = \"ersion error\" if required_version: fullmsg += \": %s", "try: if args: super(Sequence, self).insert(self._wrap_index(args[0], n=len(self)+1, clamp=False), rv) else: super(Sequence,", "super(Dict, self).itervalues(): yield TypeBase.TransferGlobalValidator(self, v) def values(self): return [x for", "% (k, self.__class__.__name__, k2)) return getattr(self, k2) else: #print(\"Forward '%s'", "if gvcb is not None: gvcb() if hasattr(self, \"_validate_globally\"): try:", "an override method of the same name prefixed by '_'", "copy(self): return self._wrap(self) def add(self, e): ae = self._adapt_value(e, index=len(self))", "_is_global_validation_enabled(self): return self.__dict__[\"_global_validation_enabled\"] def _enable_global_validation(self, on): self.__dict__[\"_global_validation_enabled\"] = on class", "def __and__(self, y): rv = self.copy() rv &= y return", "v in super(Dict, self).itervalues(): yield TypeBase.TransferGlobalValidator(self, v) def values(self): return", "(\"[das] Field %s is deprecated, use %s instead\" % (repr(k),", "self._gvalidate() def __getitem__(self, i): return TypeBase.TransferGlobalValidator(self, super(Sequence, self).__getitem__(i)) def __delitem__(self,", "super(TypeBase, self).__init__() self.__dict__[\"_schema_type\"] = None self.__dict__[\"_validate_globally_cb\"] = None self.__dict__[\"_global_validation_enabled\"] =", "!= getattr(self._dict, k): raise ReservedNameError(k) else: msg = \"[das] %s's", "ei = das.ValidationError(\"Global Validation Failed (%s)\" % str(ei)) raise ei.__class__,", "no need to check if _k was a valid key", "# to enable dynamic function set binding if k ==", "most 2 arguments, got %d\" % nargs) if nargs ==", "index=n)) try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "sys.exc_info() try: super(Sequence, self).__setslice__(oldlen, len(self), []) except Exception, e: print(\"das.types.Sequence.__imul__:", "+ \" v\" + fullmsg else: fullmsg = \"V\" +", "def __setslice__(self, i, j, y): oldvals = super(Sequence, self).__getslice__(i, j)", "def __ior__(self, y): oldvals = super(Set, self).copy() super(Set, self).__ior__(set([self._adapt_value(x, index=i)", "ec, ei, tb def itervalues(self): for v in super(Dict, self).itervalues():", "% e) raise ec, ei, tb def __getitem__(self, k): k", "attribute (set __getattr__) return if k2 in self.__dict__: if self.__dict__[k2]", "for k, v in oldvals.iteritems(): super(Dict, self).__setitem__(k, v) except Exception,", "fullmsg = \"V\" + fullmsg super(VersionError, self).__init__(fullmsg) class GlobalValidationDisabled(object): def", "for i, x in enumerate(y)] for item in lst: if", "*args): added = set() for y in args: lst =", "v): k = self._adapt_key(k) wasset = (k in self) oldval", "self.copy() rv -= y return rv def __rsub__(self, y): return", "rv &= y return rv def __rand__(self, y): return self.__and__(y)", "k, v in super(Dict, self).iteritems(): yield k, TypeBase.TransferGlobalValidator(self, v) def", "self).update(items) except Exception, e: print(\"das.types.Dict.clear: Failed to recover dict data", "ei, tb # def __contains__(self, k): # try: # _k", "self._adapt_key(k) # return super(Dict, self).__contains__(_k) # except: # return False", "das.adapt_value(value, schema_type=self._get_schema_type(), key=key, index=index) def _validate(self, schema_type=None): if schema_type is", "def ValidateGlobally(klass, inst): if isinstance(inst, klass): inst._gvalidate() return inst def", "self).__delitem__(_k) try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "= self[:] rv.__imul__(n) return rv def __rmul__(self, n): return self.__mul__(n)", "__cmp__ # but we need it for some other purpose", "ei, tb = sys.exc_info() try: super(Set, self).add(item) except Exception, e:", "try: ii = self._wrap_index(i, clamp=True) super(Sequence, self).__setslice__(ii, ii+len(newvals), oldvals) except", "retval = self._dict.pop(k, *args) try: self._gvalidate() except: ec, ei, tb", "self).__getitem__(_k) super(Dict, self).__delitem__(_k) try: self._gvalidate() except: ec, ei, tb =", "nargs > 2: raise TypeError(\"_setdefault expected at most 2 arguments,", "self).__getitem__(i)) def __delitem__(self, i): ii = self._wrap_index(i, clamp=False) item =", "defined but a default value is provided, we should not", "rv = self.copy() rv ^= y return rv def __rxor__(self,", "self.__sub__(rhs) def union(self, rhs): return self.__or__(rhs) def intersection(self, rhs): return", "data (%s)\" % e) raise ec, ei, tb def update(self,", "< 0: if n is None: n = len(self) ii", "enumerate(y)] super(Sequence, self).extend(newvals) try: self._gvalidate() except: ec, ei, tb =", "try: for item in added: super(Set, self).remove(item) except Exception, e:", "raise ec, ei, tb return self def __xor__(self, y): rv", "added = set() for y in args: lst = [self._adapt_value(x,", "ei, tb def pop(self): item = super(Set, self).pop() try: self._gvalidate()", "print(\"das.types.Set.__iand__: Failed to recover set data (%s)\" % e) raise", "'%s' (dict %s)\" % (k, \"has\" if hasattr(self._dict, k) else", "except Exception, e: print(\"das.types.Struct.pop: Failed to recover struct data (%s)\"", "self._adapt_value(e, index=len(self)) if ae in self: return super(Set, self).add(ae) try:", "self._dict) oldval = (self._dict[k] if wasset else None) self._dict[k] =", "(self._dict[k] if wasset else None) self._dict.__setitem__(k, self._adapt_value(v, key=k)) try: self._gvalidate()", "None) self._dict[k] = self._adapt_value(v, key=k) try: self._gvalidate() except: ec, ei,", "self).__delslice__(i, j) try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "= oldval except Exception, e: print(\"das.types.Struct.__delattr__: Failed to recover struct", "if isinstance(oth, Struct) else oth) def __eq__(self, oth): return self._dict.__eq__(oth._dict", "self._check_reserved(args[0]) if nargs == 2: args = (args[0], self._adapt_value(args[1], key=args[0]))", "v) def values(self): return [x for x in self.itervalues()] def", "= sys.exc_info() ei = das.ValidationError(\"Global Validation Failed (%s)\" % str(ei))", "def _adapt_value(self, value, key=None, index=None): return das.adapt_value(value, schema_type=self._get_schema_type(), key=key, index=index)", "def _update(self, *args, **kwargs): if len(args) > 1: raise Exception(\"update", "if args: super(Sequence, self).insert(self._wrap_index(args[0], n=len(self)+1, clamp=False), rv) else: super(Sequence, self).append(rv)", "self._dict.has_key(self._get_alias(k)) # Override of dict.pop def _pop(self, k, *args): _k", "tb return retval # Override of dict.popitem def _popitem(self): k,", "\"_validate_globally\"): try: getattr(self, \"_validate_globally\")() except: _, ei, tb = sys.exc_info()", "(%s)\" % e) raise ec, ei, tb def __contains__(self, k):", "(k, \"has\" if hasattr(self._dict, k) else \"hasn't\")) return self.__getattribute__(k) def", "super(Dict, self).__setitem__(item[0], item[1]) except Exception, e: print(\"das.types.Dict.popitem: Failed to recover", "ordered_keys(self): return filter(lambda x: x in self, self._get_schema_type().ordered_keys()) def _itervalues(self):", "except: ec, ei, tb = sys.exc_info() try: super(Dict, self).__setitem__(_k, _v)", "except: ec, ei, tb = sys.exc_info() try: super(Dict, self).update(items) except", "def _iteritems(self): for k, v in self._dict.iteritems(): yield k, TypeBase.TransferGlobalValidator(self,", "self._get_schema_type() if st is not None: # run self validation", "raise ec, ei, tb def pop(self, *args): rv = super(Sequence,", "ei, tb = sys.exc_info() try: super(Sequence, self).__setslice__(n, len(self), []) except", "else: return ii else: return i def __imul__(self, n): oldlen", "VersionError(Exception): def __init__(self, msg=None, current_version=None, required_version=None): fullmsg = \"ersion error\"", "item def difference(self, rhs): return self.__sub__(rhs) def union(self, rhs): return", "k, v in kwargs.iteritems(): k = self._adapt_key(k) if k in", "index=0)) def insert(self, i, y): super(Sequence, self).insert(i, self._adapt_value(y, index=i)) try:", "and isinstance(dst, klass): dst._set_validate_globally_cb(src._gvalidate) return dst @classmethod def ValidateGlobally(klass, inst):", "not None and st.has_key(k): aliasname = das.schematypes.Alias.Name(st[k]) if aliasname is", "self._get_schema_type() if st is not None: n = das.get_schema_type_name(st) if", "= (args[0], self._adapt_value(args[1], key=args[0])) self._dict.setdefault(*args) # Override of dict.update def", "e) raise ec, ei, tb def update(self, *args): added =", "in enumerate(y)] super(Sequence, self).extend(newvals) try: self._gvalidate() except: ec, ei, tb", "in enumerate(y)])) try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "e: print(\"das.types.Set.pop: Failed to recover set data (%s)\" % e)", "info\" if msg: fullmsg = msg + \" v\" +", "len(args) > 1: raise Exception(\"update expected at most 1 arguments,", "expected at most 1 arguments, got %d\" % len(args)) for", "= self._get_alias(k) return TypeBase.TransferGlobalValidator(self, self._dict.__getitem__(k)) def __setitem__(self, k, v): k", "exception return False class TypeBase(object): @classmethod def TransferGlobalValidator(klass, src, dst):", "Struct) else oth) def __le__(self, oth): return self._dict.__le__(oth._dict if isinstance(oth,", "x in enumerate(y)] super(Sequence, self).__setslice__(i, j, newvals) try: self._gvalidate() except:", "isinstance(inst, klass): inst._gvalidate() return inst def __init__(self, *args): super(TypeBase, self).__init__()", "try: super(Sequence, self).__setslice__(oldlen, len(self), []) except Exception, e: print(\"das.types.Sequence.__imul__: Failed", "self._dict.__ge__(oth._dict if isinstance(oth, Struct) else oth) def __le__(self, oth): return", "hasattr(self, k2): #print(\"Forward '%s' to %s class '%s'\" % (k,", "not None: schema_type.validate(self) self._set_schema_type(schema_type) def _gvalidate(self): st = self._get_schema_type() if", "return self def __xor__(self, y): rv = self.copy() rv ^=", "__class__ member that we may want to modify for #", "print(\"das.types.Sequence.append: Failed to recover sequence data (%s)\" % e) raise", "raise ec, ei, tb def __delitem__(self, k): _k = k", "in current class k2 = '_' + k if hasattr(self,", "Failed to recover set data (%s)\" % e) raise ec,", "in args: lst = [self._adapt_value(x, index=i) for i, x in", "1: a0 = args[0] if hasattr(a0, \"keys\"): for k in", "return ii else: return i def __imul__(self, n): oldlen =", "item = self[idx] super(Sequence, self).remove(item) try: self._gvalidate() except: ec, ei,", "return self._dict.__le__(oth._dict if isinstance(oth, Struct) else oth) def __gt__(self, oth):", "i < 0: if n is None: n = len(self)", "except Exception, e: print(\"das.types.Dict.__setitem__: Failed to recover dict data (%s)\"", "print(\"das.types.Sequence.__delitem__: Failed to recover sequence data (%s)\" % e) raise", "try: super(Set, self).clear() super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__iand__: Failed", "tb = sys.exc_info() try: if wasset: self._dict[k] = oldval else:", "self).clear() super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__iand__: Failed to recover", "data (%s)\" % e) raise ec, ei, tb return self", "self._dict[k] = oldval except Exception, e: print(\"das.types.Struct.__delitem__: Failed to recover", "the core of the method, tuple is already created #", "= \"[das] %s's '%s(...)' method conflicts with data field '%s',", "return self._dict.has_key(self._get_alias(k)) # Override of dict.pop def _pop(self, k, *args):", "return i def __imul__(self, n): oldlen = len(self) super(Sequence, self).__imul__(n)", "data (%s)\" % e) raise ec, ei, tb def pop(self):", "sys.exc_info() try: super(Set, self).add(item) except Exception, e: print(\"das.types.Set.pop: Failed to", "rv = self.copy() rv ^= y return rv def __cmp__(self,", "# except: # return False def setdefault(self, *args): nargs =", "klass): dst._set_validate_globally_cb(src._gvalidate) return dst @classmethod def ValidateGlobally(klass, inst): if isinstance(inst,", "*args): _k = self._adapt_key(k) _v = super(Dict, self).pop(_k, *args) try:", "% e) raise ec, ei, tb return self def __and__(self,", "(k, k)) return getattr(self._dict, k) else: #raise AttributeError(\"'Struct' has no", "return self._wrap(self) def update(self, *args, **kwargs): oldvals = {} remvals", "self._dict.items() self._dict.clear() try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "but a default value is provided, we should not reach", "self._dict.popitem() try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "no version info\" if msg: fullmsg = msg + \"", "got %d\" % (len(self), len(self) + len(y))) def __getitem__(self, i):", "sys.exc_info() try: super(Set, self).remove(ae) except Exception, e: print(\"das.types.Set.add: Failed to", "tb def __delitem__(self, k): _k = k k = self._get_alias(k)", "return self._dict.__str__() def __repr__(self): return self._dict.__repr__() # Override of dict.has_key", "# Override of dict.clear def _clear(self): items = self._dict.items() self._dict.clear()", "inst): if isinstance(inst, klass): inst._gvalidate() return inst def __init__(self, *args):", "in added: super(Set, self).remove(item) except Exception, e: print(\"das.types.Set.update: Failed to", "data self.oldstate = None def __enter__(self): try: self.oldstate = self.data._is_global_validation_enabled()", "self).popitem() try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "e) raise ec, ei, tb # def __contains__(self, k): #", "if ii < 0: if clamp: return 0 else: raise", "raise ec, ei, tb def itervalues(self): for v in super(Dict,", "in self.__dict__: if self.__dict__[k2] != getattr(self._dict, k): raise ReservedNameError(k) else:", "Failed to recover sequence data (%s)\" % e) raise ec,", "setdefault(self, *args): nargs = len(args) if nargs > 2: raise", "raise ec, ei, tb # def __contains__(self, y): # try:", "ei, tb = sys.exc_info() try: self._dict.clear() self._dict.update(oldvals) except Exception, e:", "else: fullmsg = \"V\" + fullmsg super(VersionError, self).__init__(fullmsg) class GlobalValidationDisabled(object):", "len(self) super(Sequence, self).append(self._adapt_value(y, index=n)) try: self._gvalidate() except: ec, ei, tb", "ec, ei, tb def __getitem__(self, k): k = self._get_alias(k) return", "len(args) if nargs > 2: raise TypeError(\"setdefault expected at most", "k = self._get_alias(k) self._check_reserved(k) wasset = (k in self._dict) oldval", "Exception, e: print(\"das.types.Struct.__setattr__: Failed to recover struct data (%s)\" %", "Failed to recover dict data (%s)\" % e) raise ec,", "raise ec, ei, tb return _v def popitem(self): item =", "__iadd__(self, y): n = len(self) super(Sequence, self).__iadd__([self._adapt_value(x, index=n+i) for i,", "except Exception, e: print(\"das.types.Sequence.extend: Failed to recover sequence data (%s)\"", "%s instead\" % (repr(k), repr(aliasname))) # das.print_once(message) return aliasname return", "[]) except Exception, e: print(\"das.types.Sequence.__iadd__: Failed to recover sequence data", "in oldvals: oldvals[k] = self[k] else: remvals.add(k) self[k] = self._adapt_value(v,", "tb return rv def remove(self, y): idx = self.index(y) item", "ei, tb = sys.exc_info() try: super(Sequence, self).insert(idx, item) except Exception,", "print(\"das.types.Set.clear: Failed to recover set data (%s)\" % e) raise", "nargs) if nargs >= 1: self._check_reserved(args[0]) if nargs == 2:", "if wasset: self._dict[k] = oldval else: del(self._dict[k]) except Exception, e:", "k, *args): _k = self._adapt_key(k) _v = super(Dict, self).pop(_k, *args)", "data (%s)\" % e) raise ec, ei, tb class Set(TypeBase,", "(set __getattr__) return if k2 in self.__dict__: if self.__dict__[k2] !=", "return super(Set, self).add(ae) try: self._gvalidate() except: ec, ei, tb =", "self[k] = self._adapt_value(a0[k], key=k) else: for k, v in a0:", "self def __sub__(self, y): rv = self.copy() rv -= y", "self.__getattribute__(k) def __setattr__(self, k, v): # Special case for __class__", "item) def clear(self): oldvals = super(Set, self).copy() super(Set, self).clear() try:", "# Always re-raise exception return False class TypeBase(object): @classmethod def", "a0.keys(): k = self._get_alias(k) self._check_reserved(k) self._dict[k] = self._adapt_value(a0[k], key=k) else:", "return self._dict.__len__() def __str__(self): return self._dict.__str__() def __repr__(self): return self._dict.__repr__()", "return self.__or__(rhs) def intersection(self, rhs): return self.__and__(rhs) def symmetric_difference(self, rhs):", "oth) def __lt__(self, oth): return self._dict.__lt__(oth._dict if isinstance(oth, Struct) else", "v in kwargs.iteritems(): k = self._adapt_key(k) if k in self:", "dict is actually unchanged # -> no need to check", "msg: fullmsg = msg + \" v\" + fullmsg else:", "self._wrap(super(Sequence, self).__getslice__(i, j)) def __delslice__(self, i, j): oldvals = super(Sequence,", "aliasname return k def _check_reserved(self, k): if hasattr(self.__class__, k): raise", "ec, ei, tb = sys.exc_info() try: super(Set, self).clear() super(Set, self).__ior__(oldvals)", "no attribute '%s' (dict %s)\" % (k, \"has\" if hasattr(self._dict,", "added: super(Set, self).remove(item) except Exception, e: print(\"das.types.Set.update: Failed to recover", "[x for x in self.itervalues()] def _iteritems(self): for k, v", "oldvals = super(Set, self).copy() super(Set, self).__isub__(set([self._adapt_value(x, index=i) for i, x", "except Exception, e: print(\"das.types.Struct.clear: Failed to recover struct data (%s)\"", "raise das.ValidationError(\"Expected a tuple of size %d, got %d\" %", "ii < 0: if clamp: return 0 else: raise IndexError(\"list", "= oldval else: del(self._dict[k]) except Exception, e: print(\"das.types.Struct.__setattr__: Failed to", "tb = sys.exc_info() try: super(Sequence, self).insert(idx, item) except Exception, e:", "if len(self.symmetric_difference(oth)) == 0: return 0 elif len(self) <= len(oth):", "for k, v in kwargs.iteritems(): k = self._adapt_key(k) if k", "self._check_reserved(k) self._dict[k] = self._adapt_value(v, key=k) self._gvalidate() except: ec, ei, tb", "oldvals = super(Set, self).copy() super(Set, self).__ior__(set([self._adapt_value(x, index=i) for i, x", "most 1 arguments, got %d\" % len(args)) for k, v", "self.__or__(rhs) def intersection(self, rhs): return self.__and__(rhs) def symmetric_difference(self, rhs): return", "fail) try: self._dict[k] = oldval except Exception, e: print(\"das.types.Struct.__delattr__: Failed", "%s's '%s(...)' method conflicts with data field '%s', use '_%s(...)'", "k) else \"hasn't\")) return self.__getattribute__(k) def __setattr__(self, k, v): #", "try: # if _k i not defined but a default", "e) raise ec, ei, tb # Override of dict.copy def", "purpose if len(self.symmetric_difference(oth)) == 0: return 0 elif len(self) <=", "to recover struct data (%s)\" % e) raise ec, ei,", "return False def setdefault(self, *args): nargs = len(args) if nargs", "super(Sequence, self).append(rv) except Exception, e: print(\"das.types.Sequence.pop: Failed to recover sequence", "rv = self.__class__(rhs if st is None else st._validate_self(rhs)) rv._set_schema_type(self._get_schema_type())", "= super(Sequence, self).__getslice__(i, j) newvals = [self._adapt_value(x, index=i+k) for k,", "= sys.exc_info() try: if wasset: self._dict[k] = oldval else: del(self._dict[k])", "Override of dict.clear def _clear(self): items = self._dict.items() self._dict.clear() try:", "self).__init__(fullmsg) class GlobalValidationDisabled(object): def __init__(self, data): super(GlobalValidationDisabled, self).__init__() self.data =", "k in a0.keys(): k = self._get_alias(k) self._check_reserved(k) self._dict[k] = self._adapt_value(a0[k],", "False def index(self, y): return super(Sequence, self).index(self._adapt_value(y, index=0)) def insert(self,", "doesn't implement __cmp__ # but we need it for some", "message = (\"[das] Field %s is deprecated, use %s instead\"", "arguments, got %d\" % len(args)) for k, v in kwargs.iteritems():", "__getslice__(self, i, j): return self._wrap(super(Sequence, self).__getslice__(i, j)) def __delslice__(self, i,", "return inst def __init__(self, *args): super(TypeBase, self).__init__() self.__dict__[\"_schema_type\"] = None", "return self.__dict__[\"_validate_globally_cb\"] def _set_validate_globally_cb(self, cb): self.__dict__[\"_validate_globally_cb\"] = cb def _is_global_validation_enabled(self):", "__contains__(self, y): # try: # _v = self._adapt_value(y, index=0) #", "(%s)\" % e) raise ec, ei, tb return rv def", "clear(self): items = super(Dict, self).items() super(Dict, self).clear() try: self._gvalidate() except:", "ei, tb # def __contains__(self, y): # try: # _v", "= sys.exc_info() try: super(Dict, self).__setitem__(item[0], item[1]) except Exception, e: print(\"das.types.Dict.popitem:", "= self._adapt_key(k) # return super(Dict, self).__contains__(_k) # except: # return", "= self._dict.copy() try: if len(args) == 1: a0 = args[0]", "for i, x in enumerate(y)]) try: self._gvalidate() except: ec, ei,", "as dict is actually unchanged # -> no need to", "= self._dict.get(k, None) self._dict.__delitem__(k) try: self._gvalidate() except: ec, ei, tb", "but at the time we reach # the core of", "sys.exc_info() try: super(Sequence, self).pop(self._wrap_index(i, n=len(self)-1, clamp=True)) except Exception, e: print(\"das.types.Sequence.insert:", "ec, ei, tb def __delitem__(self, k): _k = k k", "(%s)\" % e) raise ec, ei, tb return retval #", "def _clear(self): items = self._dict.items() self._dict.clear() try: self._gvalidate() except: ec,", "= None self.__dict__[\"_validate_globally_cb\"] = None self.__dict__[\"_global_validation_enabled\"] = True def _wrap(self,", "raise ec, ei, tb def __getitem__(self, k): return TypeBase.TransferGlobalValidator(self, super(Dict,", "in self.itervalues()] def _iteritems(self): for k, v in self._dict.iteritems(): yield", "ec, ei, tb def extend(self, y): newvals = [self._adapt_value(x, index=len(self)+i)", "if not k in oldvals: oldvals[k] = self[k] else: remvals.add(k)", "else: msg = \"[das] %s's '%s(...)' method conflicts with data", "ec, ei, tb def pop(self, k, *args): _k = self._adapt_key(k)", "tb = sys.exc_info() try: super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.clear:", "super(Set, self).copy() super(Set, self).clear() try: self._gvalidate() except: ec, ei, tb", "set data (%s)\" % e) raise ec, ei, tb def", "cb def _is_global_validation_enabled(self): return self.__dict__[\"_global_validation_enabled\"] def _enable_global_validation(self, on): self.__dict__[\"_global_validation_enabled\"] =", "= self._get_alias(k) return TypeBase.TransferGlobalValidator(self, self._dict[k]) except KeyError: if hasattr(self._dict, k):", "a default value is provided, we should not reach here", "ii = self._wrap_index(i, clamp=True) super(Sequence, self).__setslice__(ii, ii, oldvals) except Exception,", "required\" % required_version else: fullmsg += \": no requirements\" if", "super(Set, self).clear() super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__ior__: Failed to", "% e) raise ec, ei, tb return rv def remove(self,", "return 0 elif len(self) <= len(oth): return -1 else: return", "raise ec, ei, tb return retval # Override of dict.popitem", "required_version=None): fullmsg = \"ersion error\" if required_version: fullmsg += \":", "% e) raise ec, ei, tb return item def difference(self,", "e) raise ec, ei, tb return self def __mul__(self, n):", "**kwargs): if len(args) > 1: raise Exception(\"update expected at most", "2: raise TypeError(\"_setdefault expected at most 2 arguments, got %d\"", "elif len(self) <= len(oth): return -1 else: return 1 def", "self).__setitem__(k, oldval) else: del(self[k]) except Exception, e: print(\"das.types.Dict.__setitem__: Failed to", "else: del(self._dict[k]) except Exception, e: print(\"das.types.Struct.__setattr__: Failed to recover struct", "= len(self) ii = i + n if ii <", "self._get_schema_type() return (key if st is None else das.adapt_value(key, schema_type=st.ktype))", "= on class Tuple(TypeBase, tuple): def __init__(self, *args): # Funny,", "j)) def __delslice__(self, i, j): oldvals = super(Sequence, self).__getslice__(i, j)", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: for k", "oldvals = super(Set, self).copy() super(Set, self).__iand__(set([self._adapt_value(x, index=i) for i, x", "tb = sys.exc_info() try: # if _k i not defined", "self[k] = self._adapt_value(v, key=k) try: self._gvalidate() except: ec, ei, tb", "dynamic function set binding if k == \"__class__\": super(Struct, self).__setattr__(k,", "oldvals = {} remvals = set() if len(args) == 1:", "def __delattr__(self, k): k = self._get_alias(k) oldval = self._dict.get(k, None)", "gvcb() if hasattr(self, \"_validate_globally\"): try: getattr(self, \"_validate_globally\")() except: _, ei,", "self._update(*args, **kwargs) def __getattr__(self, k): try: k = self._get_alias(k) return", "self._adapt_value(v, key=k) self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "e: print(\"das.types.Set.add: Failed to recover set data (%s)\" % e)", "y): rv = self.copy() rv ^= y return rv def", "oldvals = super(Set, self).copy() super(Set, self).__ixor__(set([self._adapt_value(x, index=i) for i, x", "i, j): return self._wrap(super(Sequence, self).__getslice__(i, j)) def __delslice__(self, i, j):", "instead\" % (repr(k), repr(aliasname))) # das.print_once(message) return aliasname return k", "__iter__(self): return self._dict.__iter__() def __len__(self): return self._dict.__len__() def __str__(self): return", "<filename>python/das/types.py import sys import das import traceback class ReservedNameError(Exception): def", "self[k] else: remvals.add(k) self[k] = self._adapt_value(v, key=k) try: self._gvalidate() except:", "= getattr(self._dict, k) def ordered_keys(self): return filter(lambda x: x in", "y): return self.__or__(y) def __ixor__(self, y): oldvals = super(Set, self).copy()", "len(args)) for k, v in kwargs.iteritems(): k = self._adapt_key(k) if", "__getattr__(self, k): try: k = self._get_alias(k) return TypeBase.TransferGlobalValidator(self, self._dict[k]) except", "self.__dict__[\"_validate_globally_cb\"] def _set_validate_globally_cb(self, cb): self.__dict__[\"_validate_globally_cb\"] = cb def _is_global_validation_enabled(self): return", "in a0.keys(): k = self._adapt_key(k) if k in self: oldvals[k]", "= self.copy() rv |= y return rv def __ror__(self, y):", "= sys.exc_info() try: if args: super(Sequence, self).insert(self._wrap_index(args[0], n=len(self)+1, clamp=False), rv)", "# Override of dict.popitem def _popitem(self): k, v = self._dict.popitem()", "except Exception, e: print(\"das.types.Set.clear: Failed to recover set data (%s)\"", "return super(Sequence, self).index(self._adapt_value(y, index=0)) def insert(self, i, y): super(Sequence, self).insert(i,", "e: print(\"das.types.Set.__ixor__: Failed to recover set data (%s)\" % e)", "ec, ei, tb def __delattr__(self, k): k = self._get_alias(k) oldval", "Set(TypeBase, set): def __init__(self, args): TypeBase.__init__(self) set.__init__(self, args) def __iand__(self,", "ii = self._wrap_index(i, clamp=False) item = super(Sequence, self).__getitem__(ii) super(Sequence, self).__delitem__(i)", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: if args:", "try: for k in remvals: super(Dict, self).__delitem__(k) for k, v", "sys.exc_info() try: if args: super(Sequence, self).insert(self._wrap_index(args[0], n=len(self)+1, clamp=False), rv) else:", "self).clear() super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.__ixor__: Failed to recover", "not None: # run self validation first (container validation) st._validate_self(self)", "oth): return self._dict.__lt__(oth._dict if isinstance(oth, Struct) else oth) def __iter__(self):", "is immutable? super(Tuple, self).__init__() def __add__(self, y): raise das.ValidationError(\"Expected a", "n = len(self) super(Sequence, self).__iadd__([self._adapt_value(x, index=n+i) for i, x in", "0: if n is None: n = len(self) ii =", "TypeBase.__init__(self) set.__init__(self, args) def __iand__(self, y): oldvals = super(Set, self).copy()", "k = self._get_alias(k) return TypeBase.TransferGlobalValidator(self, self._dict[k]) except KeyError: if hasattr(self._dict,", "of dict.has_key def _has_key(self, k): return self._dict.has_key(self._get_alias(k)) # Override of", "class Tuple(TypeBase, tuple): def __init__(self, *args): # Funny, we need", "'%s(...)' method conflicts with data field '%s', use '_%s(...)' to", "= sys.exc_info() try: if wasset: super(Dict, self).__setitem__(k, oldval) else: del(self[k])", "= self._dict.popitem() try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "def __iadd__(self, y): n = len(self) super(Sequence, self).__iadd__([self._adapt_value(x, index=n+i) for", "is not None: self.data._enable_global_validation(self.oldstate) self.oldstate = None # Always re-raise", "print(\"das.types.Dict.clear: Failed to recover dict data (%s)\" % e) raise", "self def __mul__(self, n): rv = self[:] rv.__imul__(n) return rv", "% (type(self).__name__, k, k, k) st = self._get_schema_type() if st", "filter(lambda x: x in self, self._get_schema_type().ordered_keys()) def _itervalues(self): for v", "super(Sequence, self).__delitem__(i) try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "self.__or__(y) def __ixor__(self, y): oldvals = super(Set, self).copy() super(Set, self).__ixor__(set([self._adapt_value(x,", "def difference(self, rhs): return self.__sub__(rhs) def union(self, rhs): return self.__or__(rhs)", "[x for x in self.itervalues()] def iteritems(self): for k, v", "= self._get_alias(k) self._check_reserved(k) self._dict[k] = self._adapt_value(v, key=k) self._gvalidate() except: ec,", "# run self validation first (container validation) st._validate_self(self) if hasattr(self,", "(%s)\" % e) raise ec, ei, tb def extend(self, y):", "of the same name prefixed by '_' in current class", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: super(Set, self).add(item)", "except: ec, ei, tb = sys.exc_info() try: super(Sequence, self).__setslice__(n, len(self),", "self.__dict__[\"_validate_globally_cb\"] = None self.__dict__[\"_global_validation_enabled\"] = True def _wrap(self, rhs): st", "> 1: raise Exception(\"update expected at most 1 arguments, got", "e: print(\"das.types.Dict.popitem: Failed to recover dict data (%s)\" % e)", "= sys.exc_info() try: super(Sequence, self).pop(self._wrap_index(i, n=len(self)-1, clamp=True)) except Exception, e:", "ei, tb # Override of dict.clear def _clear(self): items =", "to recover sequence data (%s)\" % e) raise ec, ei,", "k): return self._dict.has_key(self._get_alias(k)) # Override of dict.pop def _pop(self, k,", "ii+len(newvals), oldvals) except Exception, e: print(\"das.types.Sequence.__setslice__: Failed to recover sequence", "def __setattr__(self, k, v): # Special case for __class__ member", "k2 = '_' + k if hasattr(self, k2): #print(\"Forward '%s'", "self._get_schema_type() if st is not None and st.has_key(k): aliasname =", "isinstance(st[k], das.schematypes.Deprecated): # message = (\"[das] Field %s is deprecated,", "value, key=None, index=None): return das.adapt_value(value, schema_type=self._get_schema_type(), key=key, index=index) def _validate(self,", "'_%s(...)' to call it instead\" % (type(self).__name__, k, k, k)", "ReservedNameError(Exception): def __init__(self, name): super(ReservedNameError, self).__init__(\"'%s' is a reserved name\"", "self).insert(ii, item) except Exception, e: print(\"das.types.Sequence.__delitem__: Failed to recover sequence", "KeyError: if hasattr(self._dict, k): # Look for an override method", "def __init__(self, *args): TypeBase.__init__(self) list.__init__(self, *args) def _wrap_index(self, i, n=None,", "in kwargs.iteritems(): k = self._get_alias(k) self._check_reserved(k) self._dict[k] = self._adapt_value(v, key=k)", "def _setdefault(self, *args): nargs = len(args) if nargs > 2:", "except: ec, ei, tb = sys.exc_info() try: if wasset: super(Dict,", "e) raise ec, ei, tb def extend(self, y): newvals =", "def __xor__(self, y): rv = self.copy() rv ^= y return", "dict.popitem def _popitem(self): k, v = self._dict.popitem() try: self._gvalidate() except:", "def __init__(self, data): super(GlobalValidationDisabled, self).__init__() self.data = data self.oldstate =", "self.oldstate = None def __enter__(self): try: self.oldstate = self.data._is_global_validation_enabled() self.data._enable_global_validation(False)", "self._dict[k] = oldval else: del(self._dict[k]) except Exception, e: print(\"das.types.Struct.__setitem__: Failed", "= None # Always re-raise exception return False class TypeBase(object):", "self._adapt_key(k) if k in self: if not k in oldvals:", "def __setitem__(self, i, y): super(Sequence, self).__setitem__(i, self._adapt_value(y, index=i)) self._gvalidate() def", "def _get_schema_type(self): return self.__dict__[\"_schema_type\"] def _set_schema_type(self, schema_type): self.__dict__[\"_schema_type\"] = schema_type", "ec, ei, tb def copy(self): return self._wrap(self) def add(self, e):", "def __rxor__(self, y): rv = self.copy() rv ^= y return", "self).remove(ae) except Exception, e: print(\"das.types.Set.add: Failed to recover set data", "ec, ei, tb return rv def remove(self, y): idx =", "set): def __init__(self, args): TypeBase.__init__(self) set.__init__(self, args) def __iand__(self, y):", "% e) raise ec, ei, tb def copy(self): return self._wrap(self)", "__exit__(self, type, value, traceback): if self.oldstate is not None: self.data._enable_global_validation(self.oldstate)", "a0 = args[0] if hasattr(a0, \"keys\"): for k in a0.keys():", "# return super(Sequence, self).__contains__(_v) # except: # return False def", "data (%s)\" % e) raise ec, ei, tb def extend(self,", "actually unchanged # -> no need to check if _k", "we need to declare *args here, but at the time", "= sys.exc_info() try: super(Set, self).remove(ae) except Exception, e: print(\"das.types.Set.add: Failed", "y): n = len(self) super(Sequence, self).__iadd__([self._adapt_value(x, index=n+i) for i, x", "k) else: #raise AttributeError(\"'Struct' has no attribute '%s' (dict %s)\"", "class VersionError(Exception): def __init__(self, msg=None, current_version=None, required_version=None): fullmsg = \"ersion", "would fail) try: self._dict[k] = oldval except Exception, e: print(\"das.types.Struct.__delitem__:", "y): raise das.ValidationError(\"Expected a tuple of size %d, got %d\"", "def _enable_global_validation(self, on): self.__dict__[\"_global_validation_enabled\"] = on class Tuple(TypeBase, tuple): def", "ec, ei, tb = sys.exc_info() try: super(Set, self).remove(ae) except Exception,", "clear(self): oldvals = super(Set, self).copy() super(Set, self).clear() try: self._gvalidate() except:", "rv = self[:] rv.__imul__(n) return rv def __rmul__(self, n): return", "e: print(\"das.types.Struct.__setattr__: Failed to recover struct data (%s)\" % e)", "k in self: if not k in oldvals: oldvals[k] =", "return rv def __setitem__(self, i, y): super(Sequence, self).__setitem__(i, self._adapt_value(y, index=i))", "% e) raise ec, ei, tb return self def __sub__(self,", "def remove(self, y): idx = self.index(y) item = self[idx] super(Sequence,", "_gvalidate(self): st = self._get_schema_type() if st is not None: #", "2 arguments, got %d\" % nargs) if nargs >= 1:", "is not None: gvcb() if hasattr(self, \"_validate_globally\"): try: getattr(self, \"_validate_globally\")()", "data (%s)\" % e) raise ec, ei, tb def __getitem__(self,", "Exception(\"update expected at most 1 arguments, got %d\" % len(args))", "tb # def __contains__(self, k): # try: # _k =", "y): rv = self.copy() rv |= y return rv def", "super(Dict, self).__contains__(_k) # except: # return False def setdefault(self, *args):", "self._dict[k] = self._adapt_value(a0[k], key=k) else: for k, v in a0:", "set binding if k == \"__class__\": super(Struct, self).__setattr__(k, v) else:", "def __init__(self, *args, **kwargs): TypeBase.__init__(self) dict.__init__(self, *args, **kwargs) def _adapt_key(self,", "= sys.exc_info() try: super(Set, self).clear() super(Set, self).__ior__(oldvals) except Exception, e:", "tb = sys.exc_info() try: super(Sequence, self).__setslice__(n, len(self), []) except Exception,", "Exception, e: print(\"das.types.Set.update: Failed to recover set data (%s)\" %", "def index(self, y): return super(Sequence, self).index(self._adapt_value(y, index=0)) def insert(self, i,", "super(Set, self).copy() super(Set, self).__isub__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)]))", "self).remove(item) try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "def _has_key(self, k): return self._dict.has_key(self._get_alias(k)) # Override of dict.pop def", "__add__(self, y): raise das.ValidationError(\"Expected a tuple of size %d, got", "enumerate(y)])) try: self._gvalidate() except: ec, ei, tb = sys.exc_info() try:", "= self._adapt_value(v, key=k) self._gvalidate() except: ec, ei, tb = sys.exc_info()", "tb = sys.exc_info() try: super(Set, self).remove(ae) except Exception, e: print(\"das.types.Set.add:", "None) super(Dict, self).__setitem__(k, self._adapt_value(v, key=k)) try: self._gvalidate() except: ec, ei,", "schema_type.validate(self) self._set_schema_type(schema_type) def _gvalidate(self): st = self._get_schema_type() if st is", "def symmetric_difference(self, rhs): return self.__xor__(rhs) class Dict(TypeBase, dict): def __init__(self,", "out of range\") else: return ii else: return i def", "try: super(Set, self).add(item) except Exception, e: print(\"das.types.Set.pop: Failed to recover", "run self validation first (container validation) st._validate_self(self) if hasattr(self, \"_is_global_validation_enabled\"):", "item) except Exception, e: print(\"das.types.Sequence.__delitem__: Failed to recover sequence data", "ec, ei, tb = sys.exc_info() try: self._dict.update(items) except Exception, e:", "if wasset else None) self._dict.__setitem__(k, self._adapt_value(v, key=k)) try: self._gvalidate() except:", "Maybe because tuple is immutable? super(Tuple, self).__init__() def __add__(self, y):", "del(self[k]) except Exception, e: print(\"das.types.Dict.__setitem__: Failed to recover dict data", "def __iter__(self): return self._dict.__iter__() def __len__(self): return self._dict.__len__() def __str__(self):", "= sys.exc_info() try: super(Set, self).__ior__(oldvals) except Exception, e: print(\"das.types.Set.clear: Failed", "if isinstance(st[k], das.schematypes.Deprecated): # message = (\"[das] Field %s is", "else: remvals.add(k) self[k] = self._adapt_value(v, key=k) try: self._gvalidate() except: ec,", "oldval = (self._dict[k] if wasset else None) self._dict[k] = self._adapt_value(v,", "if wasset else None) self._dict[k] = self._adapt_value(v, key=k) try: self._gvalidate()", "__add__(self, y): rv = self[:] rv.__iadd__(y) return rv def __setitem__(self,", "tb def __delattr__(self, k): k = self._get_alias(k) oldval = self._dict.get(k,", "% (n, msg) das.print_once(msg) self.__dict__[k2] = getattr(self._dict, k) def ordered_keys(self):", "list): def __init__(self, *args): TypeBase.__init__(self) list.__init__(self, *args) def _wrap_index(self, i,", "is not None: n = das.get_schema_type_name(st) if n: msg =", "= self._get_alias(k) oldval = self._dict.get(k, None) retval = self._dict.pop(k, *args)", "raise ec, ei, tb def extend(self, y): newvals = [self._adapt_value(x,", "except: ec, ei, tb = sys.exc_info() try: for k in", "try: if wasset: self._dict[k] = oldval else: del(self._dict[k]) except Exception,", "to check if _k was a valid key super(Dict, self).__setitem__(_k,", "no requirements\" if current_version: fullmsg += \", %s in use\"", "else oth) def __eq__(self, oth): return self._dict.__eq__(oth._dict if isinstance(oth, Struct)", "v in a0: k = self._get_alias(k) self._check_reserved(k) self._dict[k] = self._adapt_value(v,", "st is None else st._validate_self(rhs)) rv._set_schema_type(self._get_schema_type()) return rv def _adapt_value(self,", "n = len(self) super(Sequence, self).append(self._adapt_value(y, index=n)) try: self._gvalidate() except: ec,", "__iand__(self, y): oldvals = super(Set, self).copy() super(Set, self).__iand__(set([self._adapt_value(x, index=i) for", "def _validate(self, schema_type=None): if schema_type is None: schema_type = self._get_schema_type()", "name prefixed by '_' in current class k2 = '_'", "def _is_global_validation_enabled(self): return self.__dict__[\"_global_validation_enabled\"] def _enable_global_validation(self, on): self.__dict__[\"_global_validation_enabled\"] = on", "= oldval except Exception, e: print(\"das.types.Struct.__delitem__: Failed to recover struct", "def copy(self): return self._wrap(self) def add(self, e): ae = self._adapt_value(e,", "__len__(self): return self._dict.__len__() def __str__(self): return self._dict.__str__() def __repr__(self): return", "1: self._check_reserved(args[0]) if nargs == 2: args = (args[0], self._adapt_value(args[1],", "arguments, got %d\" % len(args)) oldvals = self._dict.copy() try: if", "self).pop(self._wrap_index(i, n=len(self)-1, clamp=True)) except Exception, e: print(\"das.types.Sequence.insert: Failed to recover", "%d\" % len(args)) oldvals = self._dict.copy() try: if len(args) ==", "def __contains__(self, y): # try: # _v = self._adapt_value(y, index=0)", "in self._dict.iteritems(): yield k, TypeBase.TransferGlobalValidator(self, v) def _items(self): return [x", "return self.__dict__[\"_schema_type\"] def _set_schema_type(self, schema_type): self.__dict__[\"_schema_type\"] = schema_type def _get_validate_globally_cb(self):", "would fail) try: self._dict[k] = oldval except Exception, e: print(\"das.types.Struct.__delattr__:", "# return False def setdefault(self, *args): nargs = len(args) if", "__le__(self, oth): return self._dict.__le__(oth._dict if isinstance(oth, Struct) else oth) def", "= das.ValidationError(\"Global Validation Failed (%s)\" % str(ei)) raise ei.__class__, ei,", "k): k2 = \"_\" + k if hasattr(self, k2): #", "don't need to create forwarding attribute (set __getattr__) return if", "raise ec, ei, tb return self def __or__(self, y): rv", "super(Sequence, self).__imul__(n) try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "self.__and__(rhs) def symmetric_difference(self, rhs): return self.__xor__(rhs) class Dict(TypeBase, dict): def", "raise ec, ei, tb def pop(self): item = super(Set, self).pop()", "class Struct(TypeBase): def __init__(self, *args, **kwargs): TypeBase.__init__(self) self.__dict__[\"_dict\"] = {}", "%s is deprecated, use %s instead\" % (repr(k), repr(aliasname))) #", "to call it instead\" % (type(self).__name__, k, k, k) st", "Exception, e: print(\"das.types.Sequence.__imul__: Failed to recover sequence data (%s)\" %", "== 1: a0 = args[0] if hasattr(a0, \"keys\"): for k", "data (%s)\" % e) raise ec, ei, tb # def", "(len(self), len(self) + len(y))) def __getitem__(self, i): return TypeBase.TransferGlobalValidator(self, super(Tuple,", "for __class__ member that we may want to modify for", "and st.has_key(k): aliasname = das.schematypes.Alias.Name(st[k]) if aliasname is not None:", "Struct(TypeBase): def __init__(self, *args, **kwargs): TypeBase.__init__(self) self.__dict__[\"_dict\"] = {} self._update(*args,", "else \"hasn't\")) return self.__getattribute__(k) def __setattr__(self, k, v): # Special", "% e) raise ec, ei, tb return self def __xor__(self,", "k = self._get_alias(k) self._check_reserved(k) self._dict[k] = self._adapt_value(v, key=k) self._gvalidate() except:", "len(args)) oldvals = self._dict.copy() try: if len(args) == 1: a0", "Exception, e: print(\"das.types.Sequence.__iadd__: Failed to recover sequence data (%s)\" %", "TypeBase.TransferGlobalValidator(self, item) def __setslice__(self, i, j, y): oldvals = super(Sequence,", "pop(self, k, *args): _k = self._adapt_key(k) _v = super(Dict, self).pop(_k,", "msg + \" v\" + fullmsg else: fullmsg = \"V\"", "if k == \"__class__\": super(Struct, self).__setattr__(k, v) else: k =", "msg = \"[das] %s's '%s(...)' method conflicts with data field", "self def __or__(self, y): rv = self.copy() rv |= y", "__gt__(self, oth): return self._dict.__gt__(oth._dict if isinstance(oth, Struct) else oth) def", "sys.exc_info() try: if wasset: self._dict[k] = oldval else: del(self._dict[k]) except", "self).append(self._adapt_value(y, index=n)) try: self._gvalidate() except: ec, ei, tb = sys.exc_info()", "ei, tb = sys.exc_info() try: for k in remvals: super(Dict,", "class Dict(TypeBase, dict): def __init__(self, *args, **kwargs): TypeBase.__init__(self) dict.__init__(self, *args,", "self._gvalidate() except: ec, ei, tb = sys.exc_info() try: self._dict[k] =", "# Maybe because tuple is immutable? super(Tuple, self).__init__() def __add__(self,", "def __setitem__(self, k, v): k = self._get_alias(k) self._check_reserved(k) wasset =", "except Exception, e: print(\"das.types.Set.__iand__: Failed to recover set data (%s)\"", "Failed (%s)\" % str(ei)) raise ei.__class__, ei, tb def _get_schema_type(self):", "in self._dict.itervalues(): yield TypeBase.TransferGlobalValidator(self, v) def _values(self): return [x for", "reach here only if k was a valid key (otherwise", "\"[das] %s's '%s(...)' method conflicts with data field '%s', use", "except Exception, e: print(\"das.types.Sequence.remove: Failed to recover sequence data (%s)\"", "raise ec, ei, tb def update(self, *args): added = set()", "__or__(self, y): rv = self.copy() rv |= y return rv", "% nargs) if nargs == 2: args = (args[0], self._adapt_value(args[1],", "hasattr(self.__class__, k): raise ReservedNameError(k) elif hasattr(self._dict, k): k2 = \"_\"", "not self._is_global_validation_enabled(): # Skip global validaton return gvcb = self._get_validate_globally_cb()", "%s class '%s'\" % (k, self.__class__.__name__, k2)) return getattr(self, k2)", "= sys.exc_info() try: super(Sequence, self).pop() except Exception, e: print(\"das.types.Sequence.append: Failed", "= set() for y in args: lst = [self._adapt_value(x, index=i)", "y): oldvals = super(Set, self).copy() super(Set, self).__iand__(set([self._adapt_value(x, index=i) for i,", "def pop(self, *args): rv = super(Sequence, self).pop(*args) try: self._gvalidate() except:", "hasattr(self._dict, k): # Look for an override method of the", "class ReservedNameError(Exception): def __init__(self, name): super(ReservedNameError, self).__init__(\"'%s' is a reserved", "except Exception, e: print(\"das.types.Sequence.insert: Failed to recover sequence data (%s)\"", "super(Dict, self).__setitem__(k, v) except Exception, e: print(\"das.types.Dict.update: Failed to recover", "fail) try: self._dict[k] = oldval except Exception, e: print(\"das.types.Struct.__delitem__: Failed", "__rxor__(self, y): rv = self.copy() rv ^= y return rv", "return (key if st is None else das.adapt_value(key, schema_type=st.ktype)) def", "#print(\"Forward '%s' to %s class '%s'\" % (k, self.__class__.__name__, k2))", "is not None and st.has_key(k): aliasname = das.schematypes.Alias.Name(st[k]) if aliasname", "def itervalues(self): for v in super(Dict, self).itervalues(): yield TypeBase.TransferGlobalValidator(self, v)", "size %d, got %d\" % (len(self), len(self) + len(y))) def", "k): return self._dict.__contains__(self._get_alias(k)) def __cmp__(self, oth): return self._dict.__cmp__(oth._dict if isinstance(oth,", "if clamp: return 0 else: raise IndexError(\"list index out of", "else oth) def __iter__(self): return self._dict.__iter__() def __len__(self): return self._dict.__len__()", "self).__contains__(_k) # except: # return False def setdefault(self, *args): nargs", "e) raise ec, ei, tb def __getslice__(self, i, j): return", "ei, tb = sys.exc_info() try: super(Sequence, self).pop(self._wrap_index(i, n=len(self)-1, clamp=True)) except", "= len(self) super(Sequence, self).append(self._adapt_value(y, index=n)) try: self._gvalidate() except: ec, ei,", "rv.__imul__(n) return rv def __rmul__(self, n): return self.__mul__(n) def __iadd__(self,", "Tuple(TypeBase, tuple): def __init__(self, *args): # Funny, we need to", "tb = sys.exc_info() try: ii = self._wrap_index(i, clamp=True) super(Sequence, self).__setslice__(ii,", "self).__getslice__(i, j)) def __delslice__(self, i, j): oldvals = super(Sequence, self).__getslice__(i,", "super(Sequence, self).insert(idx, item) except Exception, e: print(\"das.types.Sequence.remove: Failed to recover", "except Exception, e: print(\"das.types.Set.pop: Failed to recover set data (%s)\"", "hasattr(self, k2): # don't need to create forwarding attribute (set", "item in self: continue super(Set, self).add(item) added.add(item) try: self._gvalidate() except:", "recover set data (%s)\" % e) raise ec, ei, tb", "at most 2 arguments, got %d\" % nargs) if nargs", "schema_type): self.__dict__[\"_schema_type\"] = schema_type def _get_validate_globally_cb(self): return self.__dict__[\"_validate_globally_cb\"] def _set_validate_globally_cb(self,", "arguments, got %d\" % nargs) if nargs == 2: args", "error\" if required_version: fullmsg += \": %s required\" % required_version", "das.schematypes.Deprecated): # message = (\"[das] Field %s is deprecated, use", "Exception, e: print(\"das.types.Set.add: Failed to recover set data (%s)\" %", "a valid key super(Dict, self).__setitem__(_k, _v) except Exception, e: print(\"das.types.Dict.popitem:", "None) self._dict.__setitem__(k, self._adapt_value(v, key=k)) try: self._gvalidate() except: ec, ei, tb", "raise ec, ei, tb return self def __and__(self, y): rv" ]
[ "os.listdir(full_path) for entity in entity_list: ignore_it = False if _ignore", "of size {}\".format(get_size_format(size))) print(\"A detailed report can be found using", "base_path.strip().split('/')[-1] save_filename = examine_name + '.json' if not os.path.lexists(constants.save_folder_name): execute_bash(\"mkdir", "errors.append(full_path) return edit_dict def track(base_path: str, dir_path: str, output: bool", "'s': size, 'p': base_path, 'time': get_time(stats), 'dirs': info} # write", "'/' + entity, 'time': get_time(stats)} except FileNotFoundError: errors.append(full_path + '/'", ".json file # folder = {'t': 'd', 's': get_size(dir_dict), 'p':", "[FILE/FOLDER]' command \") else: no_of_files += 1 stats = os.stat(base_path)", "1 edit_dict[entity] = {'t': 'f', 's': stats.st_size, 'p': full_path +", "the main file/folder in the .json file # i :", "examine_name, save_filename examine_name = base_path.strip().split('/')[-1] save_filename = examine_name + '.json'", "if _ignore: get_ignore_list() if os.path.isdir(base_path): info = get_info_dict('') size =", "full_path + '/' + entity, 'time': get_time(stats), 'dirs': dir_dict} if", "file/folder # p : full path of the file/folder #", "None: global examine_name, save_filename examine_name = base_path.strip().split('/')[-1] save_filename = examine_name", "'/' + entity): # ignoring cache temp etc files ignore_it", "+ '/' + entity dir_dict = get_info_dict(new_sub_path) edit_dict[entity] = {'t':", "'d', 's': size, 'p': base_path, 'time': get_time(stats), 'dirs': info} write", "The dictionary containing info about directory contents # time :", "'s': stats.st_size, 'p': full_path + '/' + entity, 'time': get_time(stats)}", "found using the 'file_tb.py print [FILE/FOLDER]' command \") # pp(info)", "edit_dict = dict() try: entity_list = os.listdir(full_path) for entity in", "examine_name = '' save_filename = '' _base_path = None _ignore", "if not ignore_it: try: stats = os.stat(full_path + '/' +", "track(base_path: str, dir_path: str, output: bool = False, ignore: bool", "base_path, 'time': get_time(stats), 'dirs': info} write = {'n': examine_name, 'ts':", "False, ignore: bool = False) -> list: global _base_path, no_of_dirs,", "'p': base_path, 'time': get_time(stats)} write = {'n': examine_name, 'ts': time.time(),", "False if _ignore and to_be_ignored(full_path + '/' + entity): #", "containing info about directory contents # time : edit time", "'/' + entity) if not os.path.islink(full_path + '/' + entity):", "{'t': 'd', 's': get_size(dir_dict), 'p': full_path + '/' + entity,", "pp(info) return errors if __name__ == '__main__': track(os.getcwd(), os.getcwd(), output=True)", "get_time(stats), 'dirs': dir_dict} # file = {'t': 'f', 's': stats.st_size,", "+ entity, 'time': get_time(stats)} # info = {'t': 'd', 's':", "except PermissionError: errors.append(full_path) return edit_dict def track(base_path: str, dir_path: str,", "except FileNotFoundError: errors.append(full_path + '/' + entity) except PermissionError: errors.append(full_path)", "for entity in entity_list: ignore_it = False if _ignore and", "using the 'file_tb.py print [FILE/FOLDER]' command \") # pp(info) return", "full_path = full_path[:-1] edit_dict = dict() try: entity_list = os.listdir(full_path)", "ignore_it: try: stats = os.stat(full_path + '/' + entity) if", "# folder = {'t': 'd', 's': get_size(dir_dict), 'p': full_path +", "'dirs': dir_dict} if os.path.isfile(full_path + '/' + entity): no_of_files +=", "'f', 's': stats.st_size, 'p': full_path + '/' + entity, 'time':", "= [] def get_save_config(base_path: str) -> None: global examine_name, save_filename", "the file/folder # s : size of the file/folder #", "= {'n': examine_name, 'ts': time.time(), 'i': info} # info =", "# dirs : The dictionary containing info about directory contents", "'p': base_path, 'time': get_time(stats), 'dirs': info} write = {'n': examine_name,", ": directory # f : file # ts : timestamp", "os.path.lexists(constants.save_folder_name): execute_bash(\"mkdir \" + constants.save_folder_name) def get_info_dict(sub_path: str) -> dict:", "= False errors = [] def get_save_config(base_path: str) -> None:", "{'t': 'f', 's': stats.st_size, 'p': base_path, 'time': get_time(stats)} write =", "'f', 's': stats.st_size, 'p': base_path, 'time': get_time(stats)} write = {'n':", "edit time of the file/folder # s : size of", "file\") print(\"The file is of size {}\".format(get_size_format(stats.st_size))) print(\"A detailed report", "# n : name of the main file/folder in the", "'/' + entity, 'time': get_time(stats)} # info = {'t': 'd',", "base_path, 'time': get_time(stats)} # write = {'n': examine_name, 'ts': time.time(),", "os.path.isdir(base_path): info = get_info_dict('') size = get_size(info) no_of_dirs += 1", "'dirs': info} # write = {'n': examine_name, 'ts': time.time(), 'i':", "None _ignore = False errors = [] def get_save_config(base_path: str)", "timestamp # dirs : The dictionary containing info about directory", "{'t': 'd', 's': size, 'p': base_path, 'time': get_time(stats), 'dirs': info}", "True if not ignore_it: try: stats = os.stat(full_path + '/'", "d or f # d : directory # f :", "write = {'n': examine_name, 'ts': time.time(), 'i': info} # info", "size of the file/folder # p : full path of", "entity, 'time': get_time(stats), 'dirs': dir_dict} if os.path.isfile(full_path + '/' +", "'/' + entity): if os.path.isdir(full_path + '/' + entity): no_of_dirs", "try: entity_list = os.listdir(full_path) for entity in entity_list: ignore_it =", "str, output: bool = False, ignore: bool = False) ->", "'ts': time.time(), 'i': info} write_to_json_file(write, constants.save_folder_name + \"/\" + save_filename)", "\" + constants.save_folder_name) def get_info_dict(sub_path: str) -> dict: global no_of_files,", "is of size {}\".format(get_size_format(size))) print(\"A detailed report can be found", "info = get_info_dict('') size = get_size(info) no_of_dirs += 1 stats", ": full path of the file/folder # n : name", "entity) except PermissionError: errors.append(full_path) return edit_dict def track(base_path: str, dir_path:", "= '' save_filename = '' _base_path = None _ignore =", "+ entity): if os.path.isdir(full_path + '/' + entity): no_of_dirs +=", "size {}\".format(get_size_format(size))) print(\"A detailed report can be found using the", "size # t : type - d or f #", "the contents in the .json file # folder = {'t':", "'time': get_time(stats)} # write = {'n': examine_name, 'ts': time.time(), 'i':", "get_size(info) no_of_dirs += 1 stats = os.stat(base_path) info = {'t':", "{}\".format(get_size_format(size))) print(\"A detailed report can be found using the 'file_tb.py", "edit_dict[entity] = {'t': 'f', 's': stats.st_size, 'p': full_path + '/'", "not ignore_it: try: stats = os.stat(full_path + '/' + entity)", "'/' + entity) except PermissionError: errors.append(full_path) return edit_dict def track(base_path:", "info} # info = {'t': 'f', 's': stats.st_size, 'p': base_path,", ".json file size # t : type - d or", "type - d or f # d : directory #", "+ '/' + entity): if os.path.isdir(full_path + '/' + entity):", "full_path + '/' + entity, 'time': get_time(stats)} except FileNotFoundError: errors.append(full_path", "+ constants.save_folder_name) def get_info_dict(sub_path: str) -> dict: global no_of_files, no_of_dirs,", "= os.stat(base_path) info = {'t': 'f', 's': stats.st_size, 'p': base_path,", "to_be_ignored(full_path + '/' + entity): # ignoring cache temp etc", "get_ignore_list() if os.path.isdir(base_path): info = get_info_dict('') size = get_size(info) no_of_dirs", "+ entity dir_dict = get_info_dict(new_sub_path) edit_dict[entity] = {'t': 'd', 's':", "dict() try: entity_list = os.listdir(full_path) for entity in entity_list: ignore_it", "try: stats = os.stat(full_path + '/' + entity) if not", "False) -> list: global _base_path, no_of_dirs, no_of_files, save_filename, _ignore, errors", "f : file # ts : timestamp # dirs :", "- d or f # d : directory # f", "global _base_path, no_of_dirs, no_of_files, save_filename, _ignore, errors no_of_dirs = 0", "dir_dict} if os.path.isfile(full_path + '/' + entity): no_of_files += 1", "_ignore: get_ignore_list() if os.path.isdir(base_path): info = get_info_dict('') size = get_size(info)", "size {}\".format(get_size_format(stats.st_size))) print(\"A detailed report can be found using the", "_base_path = base_path _ignore = ignore get_save_config(base_path) if _ignore: get_ignore_list()", "the .json file # i : info about the contents", "= {'t': 'f', 's': stats.st_size, 'p': base_path, 'time': get_time(stats)} #", "+ entity): no_of_files += 1 edit_dict[entity] = {'t': 'f', 's':", "= None _ignore = False errors = [] def get_save_config(base_path:", "os.path.islink(full_path + '/' + entity): if os.path.isdir(full_path + '/' +", "'p': full_path + '/' + entity, 'time': get_time(stats)} except FileNotFoundError:", "= {'t': 'd', 's': get_size(dir_dict), 'p': full_path + '/' +", "= dict() try: entity_list = os.listdir(full_path) for entity in entity_list:", "= os.listdir(full_path) for entity in entity_list: ignore_it = False if", "_ignore, errors no_of_dirs = 0 no_of_files = 0 print(\"Tracking...\") _base_path", "# s : size of the file/folder # p :", "time # short-forms are used, so as to reduce the", "= False if _ignore and to_be_ignored(full_path + '/' + entity):", ": size of the file/folder # p : full path", "= '' _base_path = None _ignore = False errors =", "if _ignore and to_be_ignored(full_path + '/' + entity): # ignoring", "+ '.json' if not os.path.lexists(constants.save_folder_name): execute_bash(\"mkdir \" + constants.save_folder_name) def", "execute_bash(\"mkdir \" + constants.save_folder_name) def get_info_dict(sub_path: str) -> dict: global", "# info = {'t': 'd', 's': size, 'p': base_path, 'time':", "_base_path + '/' + sub_path full_path = full_path.strip() if full_path.endswith('/'):", "size = get_size(info) no_of_dirs += 1 stats = os.stat(base_path) info", "'s': stats.st_size, 'p': base_path, 'time': get_time(stats)} write = {'n': examine_name,", "to reduce the .json file size # t : type", "output: print(\"Successfully analysed the file\") print(\"The file is of size", "info} no_of_files = 0 no_of_dirs = 0 examine_name = ''", "= full_path.strip() if full_path.endswith('/'): full_path = full_path[:-1] edit_dict = dict()", "file/folder # s : size of the file/folder # p", "# write = {'n': examine_name, 'ts': time.time(), 'i': info} no_of_files", "i : info about the contents in the .json file", "'.json' if not os.path.lexists(constants.save_folder_name): execute_bash(\"mkdir \" + constants.save_folder_name) def get_info_dict(sub_path:", "= examine_name + '.json' if not os.path.lexists(constants.save_folder_name): execute_bash(\"mkdir \" +", "get_time(stats)} # write = {'n': examine_name, 'ts': time.time(), 'i': info}", "import time # short-forms are used, so as to reduce", "detailed report can be found using the 'file_tb.py print [FILE/FOLDER]'", "-> dict: global no_of_files, no_of_dirs, _base_path, _ignore, errors full_path =", "1 stats = os.stat(base_path) info = {'t': 'f', 's': stats.st_size,", "time.time(), 'i': info} # info = {'t': 'f', 's': stats.st_size,", "+ entity, 'time': get_time(stats), 'dirs': dir_dict} # file = {'t':", "def get_save_config(base_path: str) -> None: global examine_name, save_filename examine_name =", "= get_info_dict('') size = get_size(info) no_of_dirs += 1 stats =", "+ \"/\" + save_filename) if output: print(\"Successfully analysed the folder", "file # i : info about the contents in the", "no_of_dirs += 1 new_sub_path = sub_path + '/' + entity", "0 examine_name = '' save_filename = '' _base_path = None", "ignore_it = False if _ignore and to_be_ignored(full_path + '/' +", "+ '/' + entity, 'time': get_time(stats), 'dirs': dir_dict} # file", "the .json file # folder = {'t': 'd', 's': get_size(dir_dict),", "+ base_path) print(\"Found {} folder(s)\".format(no_of_dirs)) print(\"Found {} file(s)\".format(no_of_files)) print(\"The directory", "stats = os.stat(base_path) info = {'t': 'd', 's': size, 'p':", "global examine_name, save_filename examine_name = base_path.strip().split('/')[-1] save_filename = examine_name +", ": name of the main file/folder in the .json file", "str) -> None: global examine_name, save_filename examine_name = base_path.strip().split('/')[-1] save_filename", "ignore get_save_config(base_path) if _ignore: get_ignore_list() if os.path.isdir(base_path): info = get_info_dict('')", "no_of_files += 1 edit_dict[entity] = {'t': 'f', 's': stats.st_size, 'p':", "_base_path, no_of_dirs, no_of_files, save_filename, _ignore, errors no_of_dirs = 0 no_of_files", "-> None: global examine_name, save_filename examine_name = base_path.strip().split('/')[-1] save_filename =", "+ '/' + entity): # ignoring cache temp etc files", "os.stat(full_path + '/' + entity) if not os.path.islink(full_path + '/'", "not os.path.islink(full_path + '/' + entity): if os.path.isdir(full_path + '/'", "path of the file/folder # n : name of the", ": file # ts : timestamp # dirs : The", "name of the main file/folder in the .json file #", ": timestamp # dirs : The dictionary containing info about", "\"/\" + save_filename) if output: print(\"Successfully analysed the folder \"", "stats = os.stat(full_path + '/' + entity) if not os.path.islink(full_path", "sub_path full_path = full_path.strip() if full_path.endswith('/'): full_path = full_path[:-1] edit_dict", "size, 'p': base_path, 'time': get_time(stats), 'dirs': info} write = {'n':", "ignore: bool = False) -> list: global _base_path, no_of_dirs, no_of_files,", "if not os.path.islink(full_path + '/' + entity): if os.path.isdir(full_path +", "sub_path + '/' + entity dir_dict = get_info_dict(new_sub_path) edit_dict[entity] =", "1 new_sub_path = sub_path + '/' + entity dir_dict =", "f # d : directory # f : file #", "used, so as to reduce the .json file size #", "time of the file/folder # s : size of the", "save_filename) if output: print(\"Successfully analysed the file\") print(\"The file is", "help import * import time # short-forms are used, so", "'dirs': info} write = {'n': examine_name, 'ts': time.time(), 'i': info}", "dir_path: str, output: bool = False, ignore: bool = False)", "'/' + sub_path full_path = full_path.strip() if full_path.endswith('/'): full_path =", "stats.st_size, 'p': base_path, 'time': get_time(stats)} write = {'n': examine_name, 'ts':", "etc files ignore_it = True if not ignore_it: try: stats", "get_info_dict(new_sub_path) edit_dict[entity] = {'t': 'd', 's': get_size(dir_dict), 'p': full_path +", "the 'file_tb.py print [FILE/FOLDER]' command \") else: no_of_files += 1", "file/folder in the .json file # i : info about", "output: bool = False, ignore: bool = False) -> list:", "= True if not ignore_it: try: stats = os.stat(full_path +", "examine_name + '.json' if not os.path.lexists(constants.save_folder_name): execute_bash(\"mkdir \" + constants.save_folder_name)", ": edit time of the file/folder # s : size", "entity): # ignoring cache temp etc files ignore_it = True", "= get_size(info) no_of_dirs += 1 stats = os.stat(base_path) info =", "d : directory # f : file # ts :", "write = {'n': examine_name, 'ts': time.time(), 'i': info} no_of_files =", "stats.st_size, 'p': full_path + '/' + entity, 'time': get_time(stats)} #", "_ignore = ignore get_save_config(base_path) if _ignore: get_ignore_list() if os.path.isdir(base_path): info", "# t : type - d or f # d", "info = {'t': 'd', 's': size, 'p': base_path, 'time': get_time(stats),", "{'t': 'f', 's': stats.st_size, 'p': base_path, 'time': get_time(stats)} # write", "base_path) print(\"Found {} folder(s)\".format(no_of_dirs)) print(\"Found {} file(s)\".format(no_of_files)) print(\"The directory is", "'file_tb.py print [FILE/FOLDER]' command \") else: no_of_files += 1 stats", "no_of_files, save_filename, _ignore, errors no_of_dirs = 0 no_of_files = 0", "directory is of size {}\".format(get_size_format(size))) print(\"A detailed report can be", "entity_list = os.listdir(full_path) for entity in entity_list: ignore_it = False", "full_path.endswith('/'): full_path = full_path[:-1] edit_dict = dict() try: entity_list =", "and to_be_ignored(full_path + '/' + entity): # ignoring cache temp", "examine_name = base_path.strip().split('/')[-1] save_filename = examine_name + '.json' if not", "'i': info} # info = {'t': 'f', 's': stats.st_size, 'p':", "PermissionError: errors.append(full_path) return edit_dict def track(base_path: str, dir_path: str, output:", "'time': get_time(stats), 'dirs': info} write = {'n': examine_name, 'ts': time.time(),", "{'t': 'f', 's': stats.st_size, 'p': full_path + '/' + entity,", "short-forms are used, so as to reduce the .json file", "write_to_json_file(write, constants.save_folder_name + \"/\" + save_filename) if output: print(\"Successfully analysed", "if output: print(\"Successfully analysed the folder \" + base_path) print(\"Found", "print(\"Found {} folder(s)\".format(no_of_dirs)) print(\"Found {} file(s)\".format(no_of_files)) print(\"The directory is of", "{}\".format(get_size_format(stats.st_size))) print(\"A detailed report can be found using the 'file_tb.py", "of the main file/folder in the .json file # i", "0 print(\"Tracking...\") _base_path = base_path _ignore = ignore get_save_config(base_path) if", "output: print(\"Successfully analysed the folder \" + base_path) print(\"Found {}", "file = {'t': 'f', 's': stats.st_size, 'p': full_path + '/'", "are used, so as to reduce the .json file size", "bool = False) -> list: global _base_path, no_of_dirs, no_of_files, save_filename,", "contents # time : edit time of the file/folder #", "+ '/' + entity): no_of_files += 1 edit_dict[entity] = {'t':", "contents in the .json file # folder = {'t': 'd',", "{} file(s)\".format(no_of_files)) print(\"The directory is of size {}\".format(get_size_format(size))) print(\"A detailed", "_ignore and to_be_ignored(full_path + '/' + entity): # ignoring cache", "# ts : timestamp # dirs : The dictionary containing", "[FILE/FOLDER]' command \") # pp(info) return errors if __name__ ==", "from help import * import time # short-forms are used,", "# f : file # ts : timestamp # dirs", "\"/\" + save_filename) if output: print(\"Successfully analysed the file\") print(\"The", "edit_dict def track(base_path: str, dir_path: str, output: bool = False,", "-> list: global _base_path, no_of_dirs, no_of_files, save_filename, _ignore, errors no_of_dirs", "the file/folder # p : full path of the file/folder", "get_time(stats), 'dirs': info} write = {'n': examine_name, 'ts': time.time(), 'i':", "'d', 's': get_size(dir_dict), 'p': full_path + '/' + entity, 'time':", "def track(base_path: str, dir_path: str, output: bool = False, ignore:", "full_path = _base_path + '/' + sub_path full_path = full_path.strip()", "not os.path.lexists(constants.save_folder_name): execute_bash(\"mkdir \" + constants.save_folder_name) def get_info_dict(sub_path: str) ->", "entity, 'time': get_time(stats), 'dirs': dir_dict} # file = {'t': 'f',", "# p : full path of the file/folder # n", "save_filename = '' _base_path = None _ignore = False errors", "= full_path[:-1] edit_dict = dict() try: entity_list = os.listdir(full_path) for", "+ entity) except PermissionError: errors.append(full_path) return edit_dict def track(base_path: str,", "'i': info} write_to_json_file(write, constants.save_folder_name + \"/\" + save_filename) if output:", "+ entity): # ignoring cache temp etc files ignore_it =", "print(\"A detailed report can be found using the 'file_tb.py print", "write = {'n': examine_name, 'ts': time.time(), 'i': info} write_to_json_file(write, constants.save_folder_name", "'/' + entity): no_of_files += 1 edit_dict[entity] = {'t': 'f',", "'time': get_time(stats)} except FileNotFoundError: errors.append(full_path + '/' + entity) except", "str, dir_path: str, output: bool = False, ignore: bool =", "+ entity): no_of_dirs += 1 new_sub_path = sub_path + '/'", "about the contents in the .json file # folder =", "print(\"Successfully analysed the file\") print(\"The file is of size {}\".format(get_size_format(stats.st_size)))", "* import time # short-forms are used, so as to", "p : full path of the file/folder # n :", "is of size {}\".format(get_size_format(stats.st_size))) print(\"A detailed report can be found", "the .json file size # t : type - d", "examine_name, 'ts': time.time(), 'i': info} # info = {'t': 'f',", "os.stat(base_path) info = {'t': 'f', 's': stats.st_size, 'p': base_path, 'time':", "time.time(), 'i': info} no_of_files = 0 no_of_dirs = 0 examine_name", "of size {}\".format(get_size_format(stats.st_size))) print(\"A detailed report can be found using", "0 no_of_files = 0 print(\"Tracking...\") _base_path = base_path _ignore =", "get_time(stats), 'dirs': dir_dict} if os.path.isfile(full_path + '/' + entity): no_of_files", "save_filename, _ignore, errors no_of_dirs = 0 no_of_files = 0 print(\"Tracking...\")", "if not os.path.lexists(constants.save_folder_name): execute_bash(\"mkdir \" + constants.save_folder_name) def get_info_dict(sub_path: str)", "get_time(stats)} # info = {'t': 'd', 's': size, 'p': base_path,", "+ '/' + entity, 'time': get_time(stats), 'dirs': dir_dict} if os.path.isfile(full_path", "n : name of the main file/folder in the .json", "info} write_to_json_file(write, constants.save_folder_name + \"/\" + save_filename) if output: print(\"Successfully", "else: no_of_files += 1 stats = os.stat(base_path) info = {'t':", "can be found using the 'file_tb.py print [FILE/FOLDER]' command \")", "no_of_dirs = 0 no_of_files = 0 print(\"Tracking...\") _base_path = base_path", "'i': info} no_of_files = 0 no_of_dirs = 0 examine_name =", "'time': get_time(stats), 'dirs': dir_dict} if os.path.isfile(full_path + '/' + entity):", "+ '/' + entity): no_of_dirs += 1 new_sub_path = sub_path", "list: global _base_path, no_of_dirs, no_of_files, save_filename, _ignore, errors no_of_dirs =", "# ignoring cache temp etc files ignore_it = True if", "no_of_files = 0 print(\"Tracking...\") _base_path = base_path _ignore = ignore", ": info about the contents in the .json file #", "entity): if os.path.isdir(full_path + '/' + entity): no_of_dirs += 1", "= 0 no_of_dirs = 0 examine_name = '' save_filename =", "get_time(stats)} write = {'n': examine_name, 'ts': time.time(), 'i': info} write_to_json_file(write,", "{'n': examine_name, 'ts': time.time(), 'i': info} no_of_files = 0 no_of_dirs", "constants.save_folder_name + \"/\" + save_filename) if output: print(\"Successfully analysed the", "time : edit time of the file/folder # s :", "+= 1 new_sub_path = sub_path + '/' + entity dir_dict", "def get_info_dict(sub_path: str) -> dict: global no_of_files, no_of_dirs, _base_path, _ignore,", "+ '/' + entity, 'time': get_time(stats)} except FileNotFoundError: errors.append(full_path +", "print [FILE/FOLDER]' command \") # pp(info) return errors if __name__", "'ts': time.time(), 'i': info} # info = {'t': 'f', 's':", "dir_dict = get_info_dict(new_sub_path) edit_dict[entity] = {'t': 'd', 's': get_size(dir_dict), 'p':", "edit_dict[entity] = {'t': 'd', 's': get_size(dir_dict), 'p': full_path + '/'", "'/' + entity, 'time': get_time(stats), 'dirs': dir_dict} # file =", "False errors = [] def get_save_config(base_path: str) -> None: global", ": The dictionary containing info about directory contents # time", "'/' + entity, 'time': get_time(stats), 'dirs': dir_dict} if os.path.isfile(full_path +", "errors full_path = _base_path + '/' + sub_path full_path =", "# info = {'t': 'f', 's': stats.st_size, 'p': base_path, 'time':", "'p': full_path + '/' + entity, 'time': get_time(stats)} # info", "examine_name, 'ts': time.time(), 'i': info} write_to_json_file(write, constants.save_folder_name + \"/\" +", "or f # d : directory # f : file", "'f', 's': stats.st_size, 'p': base_path, 'time': get_time(stats)} # write =", "info} # write = {'n': examine_name, 'ts': time.time(), 'i': info}", "= {'n': examine_name, 'ts': time.time(), 'i': info} write_to_json_file(write, constants.save_folder_name +", "cache temp etc files ignore_it = True if not ignore_it:", "'p': base_path, 'time': get_time(stats)} # write = {'n': examine_name, 'ts':", "entity) if not os.path.islink(full_path + '/' + entity): if os.path.isdir(full_path", "+ \"/\" + save_filename) if output: print(\"Successfully analysed the file\")", "= ignore get_save_config(base_path) if _ignore: get_ignore_list() if os.path.isdir(base_path): info =", "0 no_of_dirs = 0 examine_name = '' save_filename = ''", "if os.path.isfile(full_path + '/' + entity): no_of_files += 1 edit_dict[entity]", "t : type - d or f # d :", "# pp(info) return errors if __name__ == '__main__': track(os.getcwd(), os.getcwd(),", "stats.st_size, 'p': full_path + '/' + entity, 'time': get_time(stats)} except", "of the file/folder # p : full path of the", "= False) -> list: global _base_path, no_of_dirs, no_of_files, save_filename, _ignore,", "_base_path = None _ignore = False errors = [] def", "main file/folder in the .json file # i : info", "print(\"The file is of size {}\".format(get_size_format(stats.st_size))) print(\"A detailed report can", "+ '/' + entity, 'time': get_time(stats)} # info = {'t':", "+ '/' + entity) except PermissionError: errors.append(full_path) return edit_dict def", "= sub_path + '/' + entity dir_dict = get_info_dict(new_sub_path) edit_dict[entity]", "# short-forms are used, so as to reduce the .json", "# i : info about the contents in the .json", "file # folder = {'t': 'd', 's': get_size(dir_dict), 'p': full_path", "full_path + '/' + entity, 'time': get_time(stats)} # info =", "base_path, 'time': get_time(stats), 'dirs': info} # write = {'n': examine_name,", "errors = [] def get_save_config(base_path: str) -> None: global examine_name,", "#! /usr/bin/python3 from help import * import time # short-forms", "so as to reduce the .json file size # t", "full path of the file/folder # n : name of", "entity dir_dict = get_info_dict(new_sub_path) edit_dict[entity] = {'t': 'd', 's': get_size(dir_dict),", "files ignore_it = True if not ignore_it: try: stats =", "# file = {'t': 'f', 's': stats.st_size, 'p': full_path +", "time.time(), 'i': info} write_to_json_file(write, constants.save_folder_name + \"/\" + save_filename) if", "entity_list: ignore_it = False if _ignore and to_be_ignored(full_path + '/'", "[] def get_save_config(base_path: str) -> None: global examine_name, save_filename examine_name", "print(\"Successfully analysed the folder \" + base_path) print(\"Found {} folder(s)\".format(no_of_dirs))", "_ignore = False errors = [] def get_save_config(base_path: str) ->", "+= 1 stats = os.stat(base_path) info = {'t': 'd', 's':", "s : size of the file/folder # p : full", "dir_dict} # file = {'t': 'f', 's': stats.st_size, 'p': full_path", "FileNotFoundError: errors.append(full_path + '/' + entity) except PermissionError: errors.append(full_path) return", "1 stats = os.stat(base_path) info = {'t': 'd', 's': size,", "report can be found using the 'file_tb.py print [FILE/FOLDER]' command", "'time': get_time(stats), 'dirs': dir_dict} # file = {'t': 'f', 's':", "'/' + entity): no_of_dirs += 1 new_sub_path = sub_path +", "the file/folder # n : name of the main file/folder", "_ignore, errors full_path = _base_path + '/' + sub_path full_path", "the file\") print(\"The file is of size {}\".format(get_size_format(stats.st_size))) print(\"A detailed", "'p': base_path, 'time': get_time(stats), 'dirs': info} # write = {'n':", "'time': get_time(stats)} write = {'n': examine_name, 'ts': time.time(), 'i': info}", "os.stat(base_path) info = {'t': 'd', 's': size, 'p': base_path, 'time':", "'s': size, 'p': base_path, 'time': get_time(stats), 'dirs': info} write =", "file size # t : type - d or f", "save_filename) if output: print(\"Successfully analysed the folder \" + base_path)", "= base_path.strip().split('/')[-1] save_filename = examine_name + '.json' if not os.path.lexists(constants.save_folder_name):", "get_save_config(base_path) if _ignore: get_ignore_list() if os.path.isdir(base_path): info = get_info_dict('') size", "get_time(stats)} except FileNotFoundError: errors.append(full_path + '/' + entity) except PermissionError:", "\") else: no_of_files += 1 stats = os.stat(base_path) info =", "entity): no_of_dirs += 1 new_sub_path = sub_path + '/' +", "stats.st_size, 'p': base_path, 'time': get_time(stats)} # write = {'n': examine_name,", "+ entity, 'time': get_time(stats)} except FileNotFoundError: errors.append(full_path + '/' +", "= {'n': examine_name, 'ts': time.time(), 'i': info} no_of_files = 0", "no_of_dirs, _base_path, _ignore, errors full_path = _base_path + '/' +", "'p': full_path + '/' + entity, 'time': get_time(stats), 'dirs': dir_dict}", "{'n': examine_name, 'ts': time.time(), 'i': info} write_to_json_file(write, constants.save_folder_name + \"/\"", "get_time(stats), 'dirs': info} # write = {'n': examine_name, 'ts': time.time(),", "ignore_it = True if not ignore_it: try: stats = os.stat(full_path", "+ entity) if not os.path.islink(full_path + '/' + entity): if", "in entity_list: ignore_it = False if _ignore and to_be_ignored(full_path +", "\") # pp(info) return errors if __name__ == '__main__': track(os.getcwd(),", "'file_tb.py print [FILE/FOLDER]' command \") # pp(info) return errors if", "_base_path, _ignore, errors full_path = _base_path + '/' + sub_path", "no_of_dirs, no_of_files, save_filename, _ignore, errors no_of_dirs = 0 no_of_files =", "= 0 examine_name = '' save_filename = '' _base_path =", "save_filename = examine_name + '.json' if not os.path.lexists(constants.save_folder_name): execute_bash(\"mkdir \"", "os.path.isdir(full_path + '/' + entity): no_of_dirs += 1 new_sub_path =", "reduce the .json file size # t : type -", "+ entity, 'time': get_time(stats), 'dirs': dir_dict} if os.path.isfile(full_path + '/'", "bool = False, ignore: bool = False) -> list: global", "'/' + entity dir_dict = get_info_dict(new_sub_path) edit_dict[entity] = {'t': 'd',", "'s': get_size(dir_dict), 'p': full_path + '/' + entity, 'time': get_time(stats),", "found using the 'file_tb.py print [FILE/FOLDER]' command \") else: no_of_files", "+ save_filename) if output: print(\"Successfully analysed the file\") print(\"The file", "no_of_files = 0 no_of_dirs = 0 examine_name = '' save_filename", "entity, 'time': get_time(stats)} # info = {'t': 'd', 's': size,", "size, 'p': base_path, 'time': get_time(stats), 'dirs': info} # write =", "entity, 'time': get_time(stats)} except FileNotFoundError: errors.append(full_path + '/' + entity)", "full_path[:-1] edit_dict = dict() try: entity_list = os.listdir(full_path) for entity", "get_info_dict(sub_path: str) -> dict: global no_of_files, no_of_dirs, _base_path, _ignore, errors", "'d', 's': size, 'p': base_path, 'time': get_time(stats), 'dirs': info} #", "+ '/' + entity) if not os.path.islink(full_path + '/' +", "# time : edit time of the file/folder # s", "errors.append(full_path + '/' + entity) except PermissionError: errors.append(full_path) return edit_dict", "dirs : The dictionary containing info about directory contents #", "dictionary containing info about directory contents # time : edit", "'ts': time.time(), 'i': info} no_of_files = 0 no_of_dirs = 0", "print(\"The directory is of size {}\".format(get_size_format(size))) print(\"A detailed report can", "# d : directory # f : file # ts", "command \") # pp(info) return errors if __name__ == '__main__':", "+= 1 edit_dict[entity] = {'t': 'f', 's': stats.st_size, 'p': full_path", "'' save_filename = '' _base_path = None _ignore = False", "entity in entity_list: ignore_it = False if _ignore and to_be_ignored(full_path", "print(\"Found {} file(s)\".format(no_of_files)) print(\"The directory is of size {}\".format(get_size_format(size))) print(\"A", "str) -> dict: global no_of_files, no_of_dirs, _base_path, _ignore, errors full_path", "+ '/' + sub_path full_path = full_path.strip() if full_path.endswith('/'): full_path", "return edit_dict def track(base_path: str, dir_path: str, output: bool =", "# write = {'n': examine_name, 'ts': time.time(), 'i': info} #", "no_of_files, no_of_dirs, _base_path, _ignore, errors full_path = _base_path + '/'", "= 0 no_of_files = 0 print(\"Tracking...\") _base_path = base_path _ignore", "folder(s)\".format(no_of_dirs)) print(\"Found {} file(s)\".format(no_of_files)) print(\"The directory is of size {}\".format(get_size_format(size)))", "get_size(dir_dict), 'p': full_path + '/' + entity, 'time': get_time(stats), 'dirs':", "temp etc files ignore_it = True if not ignore_it: try:", "save_filename examine_name = base_path.strip().split('/')[-1] save_filename = examine_name + '.json' if", "if output: print(\"Successfully analysed the file\") print(\"The file is of", "be found using the 'file_tb.py print [FILE/FOLDER]' command \") #", "in the .json file # folder = {'t': 'd', 's':", "errors no_of_dirs = 0 no_of_files = 0 print(\"Tracking...\") _base_path =", "no_of_files += 1 stats = os.stat(base_path) info = {'t': 'f',", "file is of size {}\".format(get_size_format(stats.st_size))) print(\"A detailed report can be", ": type - d or f # d : directory", "about directory contents # time : edit time of the", "'' _base_path = None _ignore = False errors = []", "file(s)\".format(no_of_files)) print(\"The directory is of size {}\".format(get_size_format(size))) print(\"A detailed report", "os.path.isfile(full_path + '/' + entity): no_of_files += 1 edit_dict[entity] =", "command \") else: no_of_files += 1 stats = os.stat(base_path) info", "= {'t': 'd', 's': size, 'p': base_path, 'time': get_time(stats), 'dirs':", "of the file/folder # s : size of the file/folder", "be found using the 'file_tb.py print [FILE/FOLDER]' command \") else:", "= {'t': 'f', 's': stats.st_size, 'p': full_path + '/' +", "= _base_path + '/' + sub_path full_path = full_path.strip() if", "entity): no_of_files += 1 edit_dict[entity] = {'t': 'f', 's': stats.st_size,", "= base_path _ignore = ignore get_save_config(base_path) if _ignore: get_ignore_list() if", "+ sub_path full_path = full_path.strip() if full_path.endswith('/'): full_path = full_path[:-1]", "base_path, 'time': get_time(stats)} write = {'n': examine_name, 'ts': time.time(), 'i':", "+ save_filename) if output: print(\"Successfully analysed the folder \" +", "info about the contents in the .json file # folder", "ignoring cache temp etc files ignore_it = True if not", "get_info_dict('') size = get_size(info) no_of_dirs += 1 stats = os.stat(base_path)", "= 0 print(\"Tracking...\") _base_path = base_path _ignore = ignore get_save_config(base_path)", "directory # f : file # ts : timestamp #", "info about directory contents # time : edit time of", "if os.path.isdir(base_path): info = get_info_dict('') size = get_size(info) no_of_dirs +=", "new_sub_path = sub_path + '/' + entity dir_dict = get_info_dict(new_sub_path)", "as to reduce the .json file size # t :", "dict: global no_of_files, no_of_dirs, _base_path, _ignore, errors full_path = _base_path", "file # ts : timestamp # dirs : The dictionary", "\" + base_path) print(\"Found {} folder(s)\".format(no_of_dirs)) print(\"Found {} file(s)\".format(no_of_files)) print(\"The", "folder = {'t': 'd', 's': get_size(dir_dict), 'p': full_path + '/'", "no_of_dirs += 1 stats = os.stat(base_path) info = {'t': 'd',", "global no_of_files, no_of_dirs, _base_path, _ignore, errors full_path = _base_path +", "constants.save_folder_name) def get_info_dict(sub_path: str) -> dict: global no_of_files, no_of_dirs, _base_path,", "= False, ignore: bool = False) -> list: global _base_path,", "= get_info_dict(new_sub_path) edit_dict[entity] = {'t': 'd', 's': get_size(dir_dict), 'p': full_path", "directory contents # time : edit time of the file/folder", "using the 'file_tb.py print [FILE/FOLDER]' command \") else: no_of_files +=", ".json file # i : info about the contents in", "info} write = {'n': examine_name, 'ts': time.time(), 'i': info} write_to_json_file(write,", "/usr/bin/python3 from help import * import time # short-forms are", "get_save_config(base_path: str) -> None: global examine_name, save_filename examine_name = base_path.strip().split('/')[-1]", "of the file/folder # n : name of the main", "full_path = full_path.strip() if full_path.endswith('/'): full_path = full_path[:-1] edit_dict =", "in the .json file # i : info about the", "print [FILE/FOLDER]' command \") else: no_of_files += 1 stats =", "import * import time # short-forms are used, so as", "if os.path.isdir(full_path + '/' + entity): no_of_dirs += 1 new_sub_path", "full_path.strip() if full_path.endswith('/'): full_path = full_path[:-1] edit_dict = dict() try:", "'time': get_time(stats), 'dirs': info} # write = {'n': examine_name, 'ts':", "stats = os.stat(base_path) info = {'t': 'f', 's': stats.st_size, 'p':", "ts : timestamp # dirs : The dictionary containing info", "info = {'t': 'f', 's': stats.st_size, 'p': base_path, 'time': get_time(stats)}", "'s': stats.st_size, 'p': base_path, 'time': get_time(stats)} # write = {'n':", "analysed the folder \" + base_path) print(\"Found {} folder(s)\".format(no_of_dirs)) print(\"Found", "examine_name, 'ts': time.time(), 'i': info} no_of_files = 0 no_of_dirs =", "'time': get_time(stats)} # info = {'t': 'd', 's': size, 'p':", "folder \" + base_path) print(\"Found {} folder(s)\".format(no_of_dirs)) print(\"Found {} file(s)\".format(no_of_files))", "full_path + '/' + entity, 'time': get_time(stats), 'dirs': dir_dict} #", "'dirs': dir_dict} # file = {'t': 'f', 's': stats.st_size, 'p':", "= {'t': 'f', 's': stats.st_size, 'p': base_path, 'time': get_time(stats)} write", "{'n': examine_name, 'ts': time.time(), 'i': info} # info = {'t':", "= os.stat(full_path + '/' + entity) if not os.path.islink(full_path +", "file/folder # n : name of the main file/folder in", "analysed the file\") print(\"The file is of size {}\".format(get_size_format(stats.st_size))) print(\"A", "base_path _ignore = ignore get_save_config(base_path) if _ignore: get_ignore_list() if os.path.isdir(base_path):", "the 'file_tb.py print [FILE/FOLDER]' command \") # pp(info) return errors", "if full_path.endswith('/'): full_path = full_path[:-1] edit_dict = dict() try: entity_list", "+= 1 stats = os.stat(base_path) info = {'t': 'f', 's':", "the folder \" + base_path) print(\"Found {} folder(s)\".format(no_of_dirs)) print(\"Found {}", "{} folder(s)\".format(no_of_dirs)) print(\"Found {} file(s)\".format(no_of_files)) print(\"The directory is of size", "print(\"Tracking...\") _base_path = base_path _ignore = ignore get_save_config(base_path) if _ignore:", "= os.stat(base_path) info = {'t': 'd', 's': size, 'p': base_path,", "no_of_dirs = 0 examine_name = '' save_filename = '' _base_path" ]
[ "return result def extdef_map_list_src_to_ast(extdef_src_list): \"\"\" Turns textual external definition map", "return continuation(opts) # To have good results from static analyzer", "requested execute sequentially pool = multiprocessing.Pool(1 if args.verbose > 2", "as it was requested. return number_of_bugs if args.status_bugs else exit_code", "govern_analyzer_runs(args) else: # Run build command and analyzer with compiler", "compiler_language from libscanbuild.clang import get_version, get_arguments, get_triple_arch, \\ ClangErrorException from", "the build process. \"\"\" return len(args) and not re.search(r'configure|autogen', args[0])", "generates those. \"\"\" result = [] if args.store_model: result.append('-analyzer-store={0}'.format(args.store_model)) if", "flags into a separate basket if arg == '-arch': result['arch_list'].append(next(args))", "we don't want to see. elif re.match(r'^-W.+', arg) and not", "https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception \"\"\"", "CTU_EXTDEF_MAP_FILENAME.\"\"\" def generate_extdef_map_lines(extdefmap_dir): \"\"\" Iterate over all lines of input", "Run build command and analyzer with compiler wrappers. environment =", "\"\"\" ctu_config = get_ctu_config_from_args(args) # If we do a CTU", "libscanbuild.shell import decode __all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper'] COMPILER_WRAPPER_CC =", "ast_command.append(opts['file']) ast_command.append('-o') ast_command.append(ast_path) logging.debug(\"Generating AST using '%s'\", ast_command) run_command(ast_command, cwd=opts['directory'])", "+ [opts['file']] triarch = get_triple_arch(cmd, cwd) ctu_options = ['ctu-dir=' +", "except Exception: logging.error(\"Problem occurred during analysis.\", exc_info=1) return None @require(['clang',", "= { 'flags': [], # the filtered compiler flags 'arch_list':", "compiler wrappers. environment = setup_environment(args) exit_code = run_build(args.build, env=environment) #", "@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'ctu']) def ctu_collect_phase(opts): \"\"\" Preprocess", "parameters) current = run(parameters) # display error message from the", "build, -- Analyze: run the analyzer against the captured commands,", "CTU_TEMP_DEFMAP_FOLDER) extdef_map_lines = generate_extdef_map_lines(extdefmap_dir) mangled_ast_pairs = create_global_ctu_extdef_map(extdef_map_lines) write_global_map(triple_arch, mangled_ast_pairs) #", "'args.excludes' are absolute paths. filename = os.path.normpath(os.path.join(directory, filename)) return any(re.match(r'^'", "This method generates those. \"\"\" result = [] if args.store_model:", "os.rmdir(name) def analyzer_params(args): \"\"\" A group of command line arguments", "'output_format']) def run_analyzer(opts, continuation=report_failure): \"\"\" It assembles the analysis command", "language not known') return None elif language not in accepted:", "for triple_path in triple_arches: if os.path.isdir(triple_path): triple_arch = os.path.basename(triple_path) extdefmap_dir", "message from the static analyzer if current is not None:", "'.mi', 'c++': '.ii'} return mapping.get(opts['language'], '.i') def destination(): \"\"\" Creates", "the 'require' decorator. It's like an 'assert' to check the", "major report is the preprocessor output. The output filename generated", "'w') as handle: handle.write(opts['file'] + os.linesep) handle.write(error.title().replace('_', ' ') +", "need to set up everything for the # wrappers, because", "'ctu': get_ctu_config_from_args(args) } logging.debug('run analyzer against compilation database') with open(args.cdb,", "os.path.dirname(ast_path) if not os.path.isdir(ast_dir): try: os.makedirs(ast_dir) except OSError: # In", "current['error_output']: logging.info(line.rstrip()) pool.close() pool.join() def govern_analyzer_runs(args): \"\"\" Governs multiple runs", "ast_command) run_command(ast_command, cwd=opts['directory']) def map_extdefs(triple_arch): \"\"\" Generate external definition map", "from compilation database 'file', # entry from compilation database 'clang',", "mangled_name, ast_file in mangled_ast_pairs: out_file.write('%s %s\\n' % (mangled_name, ast_file)) triple_arches", "the parent directory of the output directory. keep -- a", "not exclude( cmd['file'], cmd['directory'])) # when verbose output requested execute", "line arguments can mapped to command line arguments of the", "implementation just append an undefine macro at the end opts.update({'flags':", "if language is None and compiler is not None: language", "Each method first check that the needed parameters received. (This", "map_extdefs(triple_arch) @require(['ctu']) def dispatch_ctu(opts, continuation=run_analyzer): \"\"\" Execute only one phase", "'w') as handle: handle.writelines(opts['error_output']) handle.close() @require(['clang', 'directory', 'flags', 'direct_args', 'file',", "But we need to set up everything for the #", "output file name for reports. \"\"\" if opts['output_format'] in {", "args.clang if need_analyzer(args.build) else '', 'ANALYZE_BUILD_REPORT_DIR': args.output, 'ANALYZE_BUILD_REPORT_FORMAT': args.output_format, 'ANALYZE_BUILD_REPORT_FAILURES':", "logging.debug('analysis, on arch: %s', current) opts.update({'flags': ['-arch', current] + opts['flags']})", "Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0", "mangled_to_asts.items(): if len(ast_files) == 1: mangled_ast_pairs.append((mangled_name, next(iter(ast_files)))) return mangled_ast_pairs def", "supported') return None else: logging.debug('analysis, language: %s', language) opts.update({'language': language,", "report directory. \"\"\" stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-' stamp = datetime.datetime.now().strftime(stamp_format) parent_dir", "if extdef_ast_list: with tempfile.NamedTemporaryFile(mode='w', dir=extern_defs_map_folder, delete=False) as out_file: out_file.write(\"\\n\".join(extdef_ast_list) +", "ex: result = {'error_output': ex.error, 'exit_code': 0} if opts.get('output_failures', False):", "False): opts.update(result) continuation(opts) return result except ClangErrorException as ex: result", "runs parallel on multiple threads, all compilation units are separately", "Make relative path out of absolute path = path[1:] if", "architectures. \"\"\" disabled = frozenset({'ppc', 'ppc64'}) received_list = opts.pop('arch_list') if", "language is None: logging.debug('skip analysis, language not known') return None", "we deliberately remove collection data before and # also after", "'plist-html', 'plist-multi-file', or 'sarif' 'output_failures', # generate crash reports or", "at %s/results-merged.sarif.\" keep = True else: if keep: msg =", "= [opts['clang'], '-fsyntax-only', '-E'] + opts['flags'] + \\ [opts['file'], '-o',", "# we don't care about extra warnings, but we should", "re.match(r'^-Wno-.+', arg): pass # and consider everything else as compilation", "is it a compilation? compilation = split_command(execution.cmd) if compilation is", "keep: os.rmdir(name) def analyzer_params(args): \"\"\" A group of command line", "of those is missing. \"\"\" def decorator(function): @functools.wraps(function) def wrapper(*args,", "= command if isinstance(command, list) else decode(command) logging.debug(\"Run analyzer against", "conflicting names out of CTU. :param extdef_map_lines: Contains the id", "compiler wrappers. \"\"\" return compiler_wrapper(analyze_compiler_wrapper_impl) def analyze_compiler_wrapper_impl(result, execution): \"\"\" Implements", "frozenset({'ppc', 'ppc64'}) received_list = opts.pop('arch_list') if received_list: # filter out", "as new output with report_directory(args.output, args.keep_empty, args.output_format) as args.output: #", "compiler flags 'arch_list': [], # list of architecture flags 'language':", "# don't run analyzer when compilation fails. or when it's", "phase runs parallel on multiple threads, all compilation units are", "opts['output_dir'] try: cwd = opts['directory'] cmd = get_arguments([opts['clang'], '--analyze'] +", "phases and dir. \"\"\" ctu_config = json.loads(ctu_conf_json) # Recover namedtuple", "'.info.txt' file. \"\"\" def extension(): \"\"\" Generate preprocessor file extension.", "process already created it. pass ast_command = [opts['clang'], '-emit-ast'] ast_command.extend(args)", "= opts['directory'] cmd = [opts['clang'], '--analyze'] + opts['direct_args'] \\ +", "Everything else is 'Other Error'. error = 'crash' if opts['exit_code']", "multiprocessing.Pool(1 if args.verbose > 2 else None) for current in", "extdef_map_lines: Contains the id of a definition (mangled name) and", "= \"Removing directory '%s' because it contains no report.\" logging.warning(msg,", "only. cwd = opts['directory'] cmd = [opts['clang'], '-fsyntax-only', '-E'] +", "error message from the static analyzer for line in current['error_output']:", "\"\"\" Turns textual external definition map list with source files", "external definition map list with source files into an external", "merge_ctu_extdef_maps(ctu_config.dir) args.ctu_phases = CtuConfig(collect=False, analyze=True, dir='', extdef_map_cmd='') run_analyzer_parallel(args) shutil.rmtree(ctu_config.dir, ignore_errors=True)", "after the run. If the user asks only for a", "in a determined order. \"\"\" files = glob.glob(os.path.join(extdefmap_dir, '*')) files.sort()", "arguments can mapped to command line arguments of the analyzer.", "good results from static analyzer certain compiler options shall be", "= glob.glob(os.path.join(ctudir, '*')) for triple_path in triple_arches: if os.path.isdir(triple_path): triple_arch", "if opts.get('output_failures', False): opts.update(result) continuation(opts) return result except ClangErrorException as", "shall be # omitted. The compiler flag filtering only affects", "arg): pass # and consider everything else as compilation flag.", "1, '-exported_symbols_list': 1, '-current_version': 1, '-compatibility_version': 1, '-init': 1, '-e':", "import defaultdict from libscanbuild import command_entry_point, compiler_wrapper, \\ wrapper_environment, run_build,", "for a in received_list if a not in disabled] if", "write the captured output too with open(name + '.stderr.txt', 'w')", "subprocess Popen.returncode is negative when child terminated # by signal.)", "can mapped to command line arguments of the analyzer. This", "['-x', language] + opts['flags']}) return continuation(opts) @require(['arch_list', 'flags']) def arch_check(opts,", "output with report_directory( args.output, args.keep_empty, args.output_format) as args.output: # Run", "\"\"\" def extension(): \"\"\" Generate preprocessor file extension. \"\"\" mapping", "id of a definition (mangled name) and the originating source", "mode. \"\"\" ctu_config = get_ctu_config_from_args(args) # If we do a", "run the analyzer. current = filtered_list.pop() logging.debug('analysis, on arch: %s',", "command = command if isinstance(command, list) else decode(command) logging.debug(\"Run analyzer", "the contract between the caller and the called method.) \"\"\"", "handle.write(get_version(opts['clang'])) handle.close() # write the captured output too with open(name", "CTU. :param extdef_map_lines: Contains the id of a definition (mangled", "the chosen phases and dir. \"\"\" return ( CtuConfig(collect=args.ctu_phases.collect, analyze=args.ctu_phases.analyze,", "= [opts['clang'], '--analyze'] + opts['direct_args'] \\ + opts['flags'] + [opts['file']]", "by the 'require' decorator. It's like an 'assert' to check", "the analyzer or generate report. To run `scan-build` against the", "Mangled name - AST file pairs. :rtype: List of (str,", "collection # data first. if ctu_config.collect: shutil.rmtree(ctu_config.dir, ignore_errors=True) # If", "analysis command line and executes it. Capture the output of", "# take language elif arg == '-x': result['language'] = next(args)", "in IGNORED_FLAGS: count = IGNORED_FLAGS[arg] for _ in range(count): next(args)", "Create preprocessor output file name. (This is blindly following the", "results from static analyzer certain compiler options shall be #", "command line 'force_debug', # kill non debug macros 'output_dir', #", "data before and # also after the run. If the", "the report directory as new output with report_directory( args.output, args.keep_empty,", "= os.path.join(opts['ctu'].dir, triple_arch, 'ast', os.path.realpath(opts['file'])[1:] + '.ast') ast_path = os.path.abspath(ast_joined_path)", "group of command line arguments can mapped to command line", "when requested. \"\"\" if opts.pop('force_debug'): # lazy implementation just append", "compiler and capture the location for the build process. \"\"\"", "during the build, -- Analyze: run the analyzer against the", "None @require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language', 'error_output', 'exit_code']) def", "KeyError('{0} not passed to {1}'.format( key, function.__name__)) return function(*args, **kwargs)", "target(): \"\"\" Creates output file name for reports. \"\"\" if", "to examine bug reports.\" else: msg = \"View result at", "Return true when any excluded directory prefix the filename. \"\"\"", "received. (This is done by the 'require' decorator. It's like", "we do a CTU collect (1st phase) we remove all", "compilation? compilation = split_command(execution.cmd) if compilation is None: return #", "definition maps into a global one. As the collect phase", "generate_extdef_map_lines(extdefmap_dir): \"\"\" Iterate over all lines of input files in", "= 'crash' if opts['exit_code'] < 0 else 'other_error' # Create", "the compiler and capture the location for the build process.", "tempfile.mkdtemp(prefix=stamp, dir=parent_dir) logging.info('Report directory created: %s', name) try: yield name", "step might be necessary, when compiler wrappers are used. That's", "= os.path.splitdrive(path)[1] # Make relative path out of absolute path", "'ANALYZE_BUILD_REPORT_DIR': args.output, 'ANALYZE_BUILD_REPORT_FORMAT': args.output_format, 'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '',", "analyzer compiler wrapper functionality. \"\"\" # don't run analyzer when", "'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'), 'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS', '').split(' '), 'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'), 'directory': execution.cwd,", "or 'sarif' 'output_failures', # generate crash reports or not 'ctu'])", "opts['flags'] + ['-UNDEBUG']}) return continuation(opts) @require(['language', 'compiler', 'file', 'flags']) def", "if key not in args[0]: raise KeyError('{0} not passed to", "not os.path.isdir(ast_dir): try: os.makedirs(ast_dir) except OSError: # In case an", "'.ii'} return mapping.get(opts['language'], '.i') def destination(): \"\"\" Creates failures directory", "not possible the given method just return and break the", "# so we can leave it empty args.ctu_phases = CtuConfig(collect=True,", "if current is not None: # display error message from", "wrapper. \"\"\" environment = dict(os.environ) environment.update(wrapper_environment(args)) environment.update({ 'CC': COMPILER_WRAPPER_CC, 'CXX':", "arg) and not re.match(r'^-Wno-.+', arg): pass # and consider everything", "are coming from args.ctu_dir and extdef_map_cmd, # so we can", "switches filtered_list = [a for a in received_list if a", "ast file) pairs into final file. \"\"\" extern_defs_map_file = os.path.join(ctudir,", "ast_files in mangled_to_asts.items(): if len(ast_files) == 1: mangled_ast_pairs.append((mangled_name, next(iter(ast_files)))) return", "option will be overwritten '-o': 1, # will set up", "1, '-current_version': 1, '-compatibility_version': 1, '-init': 1, '-e': 1, '-seg1addr':", "Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for", "'flags']) def language_check(opts, continuation=filter_debug_flags): \"\"\" Find out the language from", "extdef_map_cmd=args.extdef_map_cmd) if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir') else CtuConfig(collect=False, analyze=False,", "of absolute path = path[1:] if path[0] == os.sep else", "def get_ctu_config_from_json(ctu_conf_json): \"\"\" CTU configuration is created from the chosen", "def decorator(function): @functools.wraps(function) def wrapper(*args, **kwargs): for key in required:", "an undefine macro at the end opts.update({'flags': opts['flags'] + ['-UNDEBUG']})", "phase of 2 phases of CTU if needed. \"\"\" ctu_config", "through one of the given architectures. \"\"\" disabled = frozenset({'ppc',", "extdefmap_command.append('--') extdefmap_command.extend(args) logging.debug(\"Generating external definition map using '%s'\", extdefmap_command) extdef_src_list", "result except ClangErrorException as ex: result = {'error_output': ex.error, 'exit_code':", "# times). If there are multiple arch are given and", "command line arguments can mapped to command line arguments of", "None, # compilation language, None, if not specified 'compiler': compiler_language(command)", "is # left so multiple analyze runs can use the", "empty args.ctu_phases = CtuConfig(collect=True, analyze=False, dir='', extdef_map_cmd='') run_analyzer_parallel(args) merge_ctu_extdef_maps(ctu_config.dir) args.ctu_phases", "into a separate basket if arg == '-arch': result['arch_list'].append(next(args)) #", "'command': [execution.cmd[0], '-c'] + compilation.flags, 'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU')) } # call", "def exclude(filename, directory): \"\"\" Return true when any excluded directory", "against the captured commands. if need_analyzer(args.build): govern_analyzer_runs(args) else: # Run", "absolute paths. filename = os.path.normpath(os.path.join(directory, filename)) return any(re.match(r'^' + exclude_directory,", "given method just return and break the chain. The passed", "static analyzer run. # # Keys are the option name,", "ast_joined_path = os.path.join(opts['ctu'].dir, triple_arch, 'ast', os.path.realpath(opts['file'])[1:] + '.ast') ast_path =", "possible the given method just return and break the chain.", "'--analyze'] + opts['direct_args'] + opts['flags'] + [opts['file'], '-o', target()], cwd)", "== '-x': result['language'] = next(args) # parameters which looks source", "relative to directory. Need to turn # it to absolute", "in pool.imap_unordered(run, generator): if current is not None: # display", "is the preprocessor output. The output filename generated randomly. The", "currently does not support sarif format. msg = \"Run 'scan-view", "import datetime import shutil import glob from collections import defaultdict", "open(extern_defs_map_file, 'w') as out_file: for mangled_name, ast_file in mangled_ast_pairs: out_file.write('%s", "in compilation.files: parameters.update({'file': source}) logging.debug('analyzer parameters %s', parameters) current =", "commands, -- Report: create a cover report from the analyzer", "report_directory( args.output, args.keep_empty, args.output_format) as args.output: # Run against a", "os.path.isdir(ast_dir): try: os.makedirs(ast_dir) except OSError: # In case an other", "it should be silent and no need to run the", "__all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper'] COMPILER_WRAPPER_CC = 'analyze-cc' COMPILER_WRAPPER_CXX =", "dir=parent_dir) logging.info('Report directory created: %s', name) try: yield name finally:", "arch, CTU_EXTDEF_MAP_FILENAME) with open(extern_defs_map_file, 'w') as out_file: for mangled_name, ast_file", "of CTU if needed. \"\"\" ctu_config = opts['ctu'] if ctu_config.collect", "ctu_config.analyze: cwd = opts['directory'] cmd = [opts['clang'], '--analyze'] + opts['direct_args']", "next(args) # parameters which looks source file are not flags", "CTU if needed. \"\"\" ctu_config = opts['ctu'] if ctu_config.collect or", "libscanbuild.compilation import split_command, classify_source, \\ compiler_language from libscanbuild.clang import get_version,", "needed. \"\"\" ctu_config = opts['ctu'] if ctu_config.collect or ctu_config.analyze: assert", "return ctu_collect_phase(opts) if ctu_config.analyze: cwd = opts['directory'] cmd = [opts['clang'],", "!= 'sarif': # 'scan-view' currently does not support sarif format.", "definition map file for the current source. \"\"\" args =", "WITH LLVM-exception \"\"\" This module implements the 'scan-build' command API.", "will re-assign the report directory as new output with report_directory(args.output,", "project configure step, it should be silent and no need", "continuation=run_analyzer): \"\"\" Execute only one phase of 2 phases of", "parent directory of the output directory. keep -- a boolean", "run. # # Keys are the option name, value number", "extdef_ast_list.append(mangled_name + \" \" + ast_path) return extdef_ast_list @require(['clang', 'directory',", "mangled_ast_pairs): \"\"\" Write (mangled name, ast file) pairs into final", "out the language from command line parameters or file name", "those should not change the pre-processing step. # But that's", "logging.info(line.rstrip()) pool.close() pool.join() def govern_analyzer_runs(args): \"\"\" Governs multiple runs in", "# Make relative path out of absolute path = path[1:]", "under the Apache License v2.0 with LLVM Exceptions. # See", "if need_analyzer(args.build): govern_analyzer_runs(args) else: # Run build command and analyzer", "between the caller and the called method.) \"\"\" try: command", "decorator @require(['command', # entry from compilation database 'directory', # entry", "# language can be given as a parameter... language =", "if isinstance(command, list) else decode(command) logging.debug(\"Run analyzer against '%s'\", command)", "the LLVM Project, under the Apache License v2.0 with LLVM", "a temporary file in CTU_TEMP_DEFMAP_FOLDER. These definition maps contain the", "= path[1:] if path[0] == os.sep else path ast_path =", "report directory as new output with report_directory( args.output, args.keep_empty, args.output_format)", "take language elif arg == '-x': result['language'] = next(args) #", "1) mangled_to_asts[mangled_name].add(ast_file) mangled_ast_pairs = [] for mangled_name, ast_files in mangled_to_asts.items():", "might be necessary, when compiler wrappers are used. That's the", "and the originating source (the corresponding AST file) name. :type", "for mangled_name, ast_files in mangled_to_asts.items(): if len(ast_files) == 1: mangled_ast_pairs.append((mangled_name,", "the report directory. hint -- could specify the parent directory", "os.path.realpath(opts['file'])[1:] + '.ast') ast_path = os.path.abspath(ast_joined_path) ast_dir = os.path.dirname(ast_path) if", "'-o': 1, # will set up own output file #", "multiprocessing import tempfile import functools import subprocess import contextlib import", "utf-8 -*- # Part of the LLVM Project, under the", "CTU analysis. \"\"\" def generate_ast(triple_arch): \"\"\" Generates ASTs for the", "\"\"\" It assembles the analysis command line and executes it.", "Generate external definition map file for the current source. \"\"\"", "the same data gathered by a single # collection run.", "If the analysis is not possible the given method just", "2 else None) for current in pool.imap_unordered(run, generator): if current", "generator): if current is not None: # display error message", "blindly following the # Perl implementation.) (handle, name) = tempfile.mkstemp(suffix=extension(),", "state. It checks the required attributes in the passed state", "option will be overwritten '-fsyntax-only': 0, # static analyzer option", "(-x) and architecture (-arch) flags for future processing. \"\"\" result", "'-compatibility_version': 1, '-init': 1, '-e': 1, '-seg1addr': 1, '-bundle_loader': 1,", "files: with open(filename, 'r') as in_file: for line in in_file:", "cwd) run_command(cmd, cwd=cwd) except subprocess.CalledProcessError: pass except ClangErrorException: pass #", "\"\"\" Takes iterator of individual external definition maps and creates", "report_failure(opts): \"\"\" Create report when analyzer failed. The major report", "\"\"\" if opts['output_format'] in { 'plist', 'plist-html', 'plist-multi-file'}: (handle, name)", "'sarif': (handle, name) = tempfile.mkstemp(prefix='result-', suffix='.sarif', dir=opts['output_dir']) os.close(handle) return name", "language not supported') return None else: logging.debug('analysis, language: %s', language)", "runs (collect or analyze) are launched from here. run_analyzer_parallel(args) if", "arch_check(opts) except Exception: logging.error(\"Problem occurred during analysis.\", exc_info=1) return None", "COMPILER_WRAPPER_CXX, 'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '', 'ANALYZE_BUILD_REPORT_DIR': args.output, 'ANALYZE_BUILD_REPORT_FORMAT':", "def target(): \"\"\" Creates output file name for reports. \"\"\"", "required. But we need to set up everything for the", "opts['direct_args'] + opts['flags'] extdefmap_command = [opts['ctu'].extdef_map_cmd] extdefmap_command.append(opts['file']) extdefmap_command.append('--') extdefmap_command.extend(args) logging.debug(\"Generating", "find out from source file extension if language is None", "'file', 'output_dir', 'output_format']) def run_analyzer(opts, continuation=report_failure): \"\"\" It assembles the", "= classify_source(opts['file'], compiler == 'c') if language is None: logging.debug('skip", "the compiler invocation. \"\"\" accepted = frozenset({ 'c', 'c++', 'objective-c',", "given architectures. \"\"\" disabled = frozenset({'ppc', 'ppc64'}) received_list = opts.pop('arch_list')", "subprocess import contextlib import datetime import shutil import glob from", "@require(['ctu']) def dispatch_ctu(opts, continuation=run_analyzer): \"\"\" Execute only one phase of", "'c-cpp-output', 'c++-cpp-output', 'objective-c-cpp-output' }) # language can be given as", "As the collect phase runs parallel on multiple threads, all", "and `analyze-c++` compiler wrappers. \"\"\" return compiler_wrapper(analyze_compiler_wrapper_impl) def analyze_compiler_wrapper_impl(result, execution):", "status as it was requested. return number_of_bugs if args.status_bugs else", "\\ ClangErrorException from libscanbuild.shell import decode __all__ = ['scan_build', 'analyze_build',", "0} except subprocess.CalledProcessError as ex: result = {'error_output': ex.output, 'exit_code':", "\"\"\" return compiler_wrapper(analyze_compiler_wrapper_impl) def analyze_compiler_wrapper_impl(result, execution): \"\"\" Implements analyzer compiler", "def run_analyzer_parallel(args): \"\"\" Runs the analyzer against the given compilation", "if args.output_failures else '', 'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)), 'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if", "\"\"\" Iterate over all lines of input files in a", "all data needed by CTU analysis. \"\"\" def generate_ast(triple_arch): \"\"\"", ":returns: Mangled name - AST file pairs. :rtype: List of", "\", 1) # Normalize path on windows as well path", "suffix='.sarif', dir=opts['output_dir']) os.close(handle) return name return opts['output_dir'] try: cwd =", "function(*args, **kwargs) return wrapper return decorator @require(['command', # entry from", "external definition map file for the current source. \"\"\" args", "not support sarif format. msg = \"Run 'scan-view %s' to", "report from the analyzer outputs. \"\"\" import re import os", "the CC/CXX values # for the Makefile. if args.intercept_first: #", "out_file.write('%s %s\\n' % (mangled_name, ast_file)) triple_arches = glob.glob(os.path.join(ctudir, '*')) for", "os.getenv('ANALYZE_BUILD_PARAMETERS', '').split(' '), 'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'), 'directory': execution.cwd, 'command': [execution.cmd[0], '-c']", "\"\"\" Execute only one phase of 2 phases of CTU", "= opts.pop('compiler') # ... or find out from source file", "directory '%s' because it contains no report.\" logging.warning(msg, name) if", "it. Capture the output of the analysis and returns with", "run_command, CtuConfig from libscanbuild.arguments import parse_args_for_scan_build, \\ parse_args_for_analyze_build from libscanbuild.intercept", "continuation to generate it. \"\"\" def target(): \"\"\" Creates output", "and compiler is not None: language = classify_source(opts['file'], compiler ==", "language elif arg == '-x': result['language'] = next(args) # parameters", "}) return environment @command_entry_point def analyze_compiler_wrapper(): \"\"\" Entry point for", "because 'configure' needs to capture the CC/CXX values # for", "% (mangled_name, ast_file)) triple_arches = glob.glob(os.path.join(ctudir, '*')) for triple_path in", "needed parameters received. (This is done by the 'require' decorator.", "(the corresponding AST file) name. :type extdef_map_lines: Iterator of str.", "cwd) generate_ast(triple_arch) map_extdefs(triple_arch) @require(['ctu']) def dispatch_ctu(opts, continuation=run_analyzer): \"\"\" Execute only", "at the end opts.update({'flags': opts['flags'] + ['-UNDEBUG']}) return continuation(opts) @require(['language',", "separate basket if arg == '-arch': result['arch_list'].append(next(args)) # take language", "output directory. keep -- a boolean value to keep or", "COMPILER_WRAPPER_CXX = 'analyze-c++' CTU_EXTDEF_MAP_FILENAME = 'externalDefMap.txt' CTU_TEMP_DEFMAP_FOLDER = 'tmpExternalDefMaps' @command_entry_point", "there are multiple arch are given and are not #", "return continuation(opts) else: logging.debug('skip analysis, found not supported arch') return", "'directory', 'flags', 'direct_args', 'file', 'ctu']) def ctu_collect_phase(opts): \"\"\" Preprocess source", "Run build command with intercept module. exit_code = capture(args) #", "will re-assign the report directory as new output with report_directory(", "\\ parse_args_for_analyze_build from libscanbuild.intercept import capture from libscanbuild.report import document", "return any(re.match(r'^' + exclude_directory, filename) for exclude_directory in args.excludes) consts", "don't want to see. elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+',", "location for the build process. \"\"\" return len(args) and not", "step, it should be silent and no need to run", "data gathered by a single # collection run. if ctu_config.collect", "requested. if result or not os.getenv('ANALYZE_BUILD_CLANG'): return # check is", "opts['flags'] extdefmap_command = [opts['ctu'].extdef_map_cmd] extdefmap_command.append(opts['file']) extdefmap_command.append('--') extdefmap_command.extend(args) logging.debug(\"Generating external definition", "compilation language, None, if not specified 'compiler': compiler_language(command) # 'c'", "'--serialize-diagnostics': 1 } def classify_parameters(command): \"\"\" Prepare compiler flags (filters", "from collections import defaultdict from libscanbuild import command_entry_point, compiler_wrapper, \\", "path) 'direct_args', # arguments from command line 'force_debug', # kill", "display error message from the static analyzer if current is", "len(ast_files) == 1: mangled_ast_pairs.append((mangled_name, next(iter(ast_files)))) return mangled_ast_pairs def merge_ctu_extdef_maps(ctudir): \"\"\"", "# See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH", "-- Report: create a cover report from the analyzer outputs.", "'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU')) } # call static analyzer against the compilation", "ctu_config.collect: return ctu_collect_phase(opts) if ctu_config.analyze: cwd = opts['directory'] cmd =", "Implements analyzer compiler wrapper functionality. \"\"\" # don't run analyzer", "logging.debug('skip analysis, found not supported arch') return None else: logging.debug('analysis,", "= capture(args) # Run the analyzer against the captured commands.", "information about the crash with open(name + '.info.txt', 'w') as", "None: logging.debug('skip analysis, language not known') return None elif language", "\"\"\" Entry point to run (or not) static analyzer against", "libscanbuild.intercept import capture from libscanbuild.report import document from libscanbuild.compilation import", "are not flags elif re.match(r'^[^-].+', arg) and classify_source(arg): pass #", "None and compiler is not None: language = classify_source(opts['file'], compiler", "context also saved into '.info.txt' file. \"\"\" def extension(): \"\"\"", "'tmpExternalDefMaps' @command_entry_point def scan_build(): \"\"\" Entry point for scan-build command.", "= get_arguments(cmd, cwd) run_command(cmd, cwd=cwd) except subprocess.CalledProcessError: pass except ClangErrorException:", "or find out from source file extension if language is", "continuation=language_check): \"\"\" Do run analyzer through one of the given", "and -arch switches filtered_list = [a for a in received_list", "the analyzer against the captured commands. if need_analyzer(args.build): govern_analyzer_runs(args) else:", "analyze_build(): \"\"\" Entry point for analyze-build command. \"\"\" args =", "'CC': COMPILER_WRAPPER_CC, 'CXX': COMPILER_WRAPPER_CXX, 'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '',", "examine bug reports.\" else: msg = \"View result at %s/results-merged.sarif.\"", "out nondebug macros when requested. \"\"\" if opts.pop('force_debug'): # lazy", "opts['direct_args'].extend(direct_options) return continuation(opts) @require(['flags', 'force_debug']) def filter_debug_flags(opts, continuation=dispatch_ctu): \"\"\" Filter", "COMPILER_WRAPPER_CC = 'analyze-cc' COMPILER_WRAPPER_CXX = 'analyze-c++' CTU_EXTDEF_MAP_FILENAME = 'externalDefMap.txt' CTU_TEMP_DEFMAP_FOLDER", "result['arch_list'].append(next(args)) # take language elif arg == '-x': result['language'] =", "asked for a collect (1st) and analyze (2nd) phase, we", "be # omitted. The compiler flag filtering only affects the", "static analyzer for line in current['error_output']: logging.info(line.rstrip()) pool.close() pool.join() def", "os.makedirs(failures_dir) return failures_dir # Classify error type: when Clang terminated", "checkers = ','.join(args.enable_checker) result.extend(['-analyzer-checker', checkers]) if args.disable_checker: checkers = ','.join(args.disable_checker)", "piece in pieces for elem in [constant, piece]] def get_ctu_config_from_args(args):", "generating all data needed by CTU analysis. \"\"\" def generate_ast(triple_arch):", "But that's the only pass we have before run the", "opts['ctu'] if ctu_config.collect or ctu_config.analyze: assert ctu_config.collect != ctu_config.analyze if", "file name. (This is blindly following the # Perl implementation.)", "path[1:] if path[0] == os.sep else path ast_path = os.path.join(\"ast\",", "the mangled names and the source (AST generated from the", "output filename generated randomly. The compiler output also captured into", "os.makedirs(ast_dir) except OSError: # In case an other process already", "# call static analyzer against the compilation for source in", "cwd = opts['directory'] cmd = [opts['clang'], '--analyze'] + opts['direct_args'] +", "import shutil import glob from collections import defaultdict from libscanbuild", "an # all-in-one run where we deliberately remove collection data", "from libscanbuild.compilation import split_command, classify_source, \\ compiler_language from libscanbuild.clang import", "triple_arches = glob.glob(os.path.join(ctudir, '*')) for triple_path in triple_arches: if os.path.isdir(triple_path):", "because it contains no report.\" logging.warning(msg, name) if not keep:", "global one. As the collect phase runs parallel on multiple", "Run the analyzer against the captured commands. if need_analyzer(args.build): govern_analyzer_runs(args)", "parse_args_for_analyze_build() # will re-assign the report directory as new output", "already created it. pass ast_command = [opts['clang'], '-emit-ast'] ast_command.extend(args) ast_command.append('-w')", "from the perl implementation. '-g': 0, '-save-temps': 0, '-install_name': 1,", "opts.update({'flags': opts['flags'] + ['-UNDEBUG']}) return continuation(opts) @require(['language', 'compiler', 'file', 'flags'])", "the analyzer against a compilation db. govern_analyzer_runs(args) # Cover report", "key, function.__name__)) return function(*args, **kwargs) return wrapper return decorator @require(['command',", "the original sequence and the odd elements are the prefix.", "The output filename generated randomly. The compiler output also captured", "'flags', 'file', 'output_dir', 'language', 'error_output', 'exit_code']) def report_failure(opts): \"\"\" Create", "def govern_analyzer_runs(args): \"\"\" Governs multiple runs in CTU mode or", "times). If there are multiple arch are given and are", "when analyzer failed. The major report is the preprocessor output.", "from args.ctu_dir and extdef_map_cmd, # so we can leave it", "out from source file extension if language is None and", "contains no report.\" logging.warning(msg, name) if not keep: os.rmdir(name) def", "requested. \"\"\" if opts.pop('force_debug'): # lazy implementation just append an", "hasattr(args.ctu_phases, 'dir') else CtuConfig(collect=False, analyze=False, dir='', extdef_map_cmd='')) def get_ctu_config_from_json(ctu_conf_json): \"\"\"", "about the crash with open(name + '.info.txt', 'w') as handle:", "args.plugins)) if args.enable_checker: checkers = ','.join(args.enable_checker) result.extend(['-analyzer-checker', checkers]) if args.disable_checker:", "functionality. \"\"\" # don't run analyzer when compilation fails. or", "for the Makefile. if args.intercept_first: # Run build command with", "= tempfile.mkstemp(suffix=extension(), prefix='clang_' + error + '_', dir=destination()) os.close(handle) #", "iterate on the compile options args = iter(command[1:]) for arg", "negative when child terminated # by signal.) Everything else is", "if opts['output_format'] in { 'plist', 'plist-html', 'plist-multi-file'}: (handle, name) =", "arch: %s', current) opts.update({'flags': ['-arch', current] + opts['flags']}) return continuation(opts)", "own output file # flags below are inherited from the", "classify_source(arg): pass # ignore some flags elif arg in IGNORED_FLAGS:", "flags for future processing. \"\"\" result = { 'flags': [],", "build command and analyzer with compiler wrappers. environment = setup_environment(args)", "name) = tempfile.mkstemp(prefix='result-', suffix='.sarif', dir=opts['output_dir']) os.close(handle) return name return opts['output_dir']", "cwd=cwd) except subprocess.CalledProcessError: pass except ClangErrorException: pass # write general", "\"\"\" Decorator for checking the required values in state. It", "report.\" logging.warning(msg, name) if not keep: os.rmdir(name) def analyzer_params(args): \"\"\"", "report is the preprocessor output. The output filename generated randomly.", "'directory', # entry from compilation database 'file', # entry from", "\"Run 'scan-view %s' to examine bug reports.\" else: msg =", "will be overwritten '-fsyntax-only': 0, # static analyzer option will", "elif opts['output_format'] == 'sarif': (handle, name) = tempfile.mkstemp(prefix='result-', suffix='.sarif', dir=opts['output_dir'])", "Run the analyzer against a compilation db. govern_analyzer_runs(args) # Cover", "= ','.join(args.enable_checker) result.extend(['-analyzer-checker', checkers]) if args.disable_checker: checkers = ','.join(args.disable_checker) result.extend(['-analyzer-disable-checker',", "extdefmap_command.append(opts['file']) extdefmap_command.append('--') extdefmap_command.extend(args) logging.debug(\"Generating external definition map using '%s'\", extdefmap_command)", "re-assign the report directory as new output with report_directory(args.output, args.keep_empty,", "if not exits yet. \"\"\" failures_dir = os.path.join(opts['output_dir'], 'failures') if", "passed state and stop when any of those is missing.", "file for the current source. \"\"\" args = opts['direct_args'] +", "temporary file in CTU_TEMP_DEFMAP_FOLDER. These definition maps contain the mangled", "or when it's not requested. if result or not os.getenv('ANALYZE_BUILD_CLANG'):", "and bug counting. number_of_bugs = document(args) # Set exit status", "are inherited from the perl implementation. '-g': 0, '-save-temps': 0,", "cases, when analyzer run # is not required. But we", "elif arg == '-x': result['language'] = next(args) # parameters which", "as out_file: for mangled_name, ast_file in mangled_ast_pairs: out_file.write('%s %s\\n' %", "\"\"\" args = parse_args_for_analyze_build() # will re-assign the report directory", "language = classify_source(opts['file'], compiler == 'c') if language is None:", "element is from the original sequence and the odd elements", "a determined order. \"\"\" files = glob.glob(os.path.join(extdefmap_dir, '*')) files.sort() for", "files shall go 'output_format', # it's 'plist', 'html', 'plist-html', 'plist-multi-file',", "do a CTU collect (1st phase) we remove all previous", "command. there are cases, when analyzer run # is not", "logging.info('Report directory created: %s', name) try: yield name finally: if", "name. :type extdef_map_lines: Iterator of str. :returns: Mangled name -", "= tempfile.mkdtemp(prefix=stamp, dir=parent_dir) logging.info('Report directory created: %s', name) try: yield", "def arch_check(opts, continuation=language_check): \"\"\" Do run analyzer through one of", "analyzer against the captured commands, -- Report: create a cover", "path[0] == os.sep else path ast_path = os.path.join(\"ast\", path +", "compilation database') with open(args.cdb, 'r') as handle: generator = (dict(cmd,", "'plist', 'plist-html', 'plist-multi-file'}: (handle, name) = tempfile.mkstemp(prefix='report-', suffix='.plist', dir=opts['output_dir']) os.close(handle)", "\"\"\" import re import os import os.path import json import", "finally: if os.listdir(name): if output_format != 'sarif': # 'scan-view' currently", "Iterate over all lines of input files in a determined", "open(name + '.info.txt', 'w') as handle: handle.write(opts['file'] + os.linesep) handle.write(error.title().replace('_',", "'plist-multi-file', or 'sarif' 'output_failures', # generate crash reports or not", "'objective-c', 'objective-c++', 'c-cpp-output', 'c++-cpp-output', 'objective-c-cpp-output' }) # language can be", "a in received_list if a not in disabled] if filtered_list:", "of the LLVM Project, under the Apache License v2.0 with", "# If the user asked for a collect (1st) and", "get_ctu_config_from_args(args) # If we do a CTU collect (1st phase)", "to run (or not) static analyzer against a single entry", "These definition maps contain the mangled names and the source", "current) opts.update({'flags': ['-arch', current] + opts['flags']}) return continuation(opts) else: logging.debug('skip", "= IGNORED_FLAGS[arg] for _ in range(count): next(args) # we don't", "in current['error_output']: logging.info(line.rstrip()) pool.close() pool.join() def govern_analyzer_runs(args): \"\"\" Governs multiple", "the prefix. eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2,", "all previous collection # data first. if ctu_config.collect: shutil.rmtree(ctu_config.dir, ignore_errors=True)", "handle.writelines(opts['error_output']) handle.close() @require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir', 'output_format']) def", "2, 0, 3] \"\"\" return [elem for piece in pieces", "compilation.files: parameters.update({'file': source}) logging.debug('analyzer parameters %s', parameters) current = run(parameters)", "if not os.path.isdir(extern_defs_map_folder): try: os.makedirs(extern_defs_map_folder) except OSError: # In case", "\"\"\" Creates failures directory if not exits yet. \"\"\" failures_dir", "'.stderr.txt', 'w') as handle: handle.writelines(opts['error_output']) handle.close() @require(['clang', 'directory', 'flags', 'direct_args',", "also influenced by the compiler invocation. \"\"\" accepted = frozenset({", "+ \\ [opts['file'], '-o', name] try: cmd = get_arguments(cmd, cwd)", "os.makedirs(extern_defs_map_folder) except OSError: # In case an other process already", "parse_args_for_scan_build, \\ parse_args_for_analyze_build from libscanbuild.intercept import capture from libscanbuild.report import", "with open(extern_defs_map_file, 'w') as out_file: for mangled_name, ast_file in mangled_ast_pairs:", "cwd=cwd) return {'error_output': output, 'exit_code': 0} except subprocess.CalledProcessError as ex:", "Execute Clang again, but run the syntax check only. cwd", "not # the same, those should not change the pre-processing", "cwd) output = run_command(cmd, cwd=cwd) return {'error_output': output, 'exit_code': 0}", "'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'), 'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'), 'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS', '').split(' '), 'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'),", "we do an # all-in-one run where we deliberately remove", "decode(command) logging.debug(\"Run analyzer against '%s'\", command) opts.update(classify_parameters(command)) return arch_check(opts) except", "against a build is done in multiple steps: -- Intercept:", "run_build(args.build, env=environment) # Cover report generation and bug counting. number_of_bugs", "'analyze-c++' CTU_EXTDEF_MAP_FILENAME = 'externalDefMap.txt' CTU_TEMP_DEFMAP_FOLDER = 'tmpExternalDefMaps' @command_entry_point def scan_build():", "ast_path = os.path.join(\"ast\", path + \".ast\") extdef_ast_list.append(mangled_name + \" \"", "\"\"\" This module implements the 'scan-build' command API. To run", "\"\"\" Runs the analyzer against the given compilation database. \"\"\"", "# Normalize path on windows as well path = os.path.splitdrive(path)[1]", "# it's 'plist', 'html', 'plist-html', 'plist-multi-file', or 'sarif' 'output_failures', #", "name) = tempfile.mkstemp(suffix=extension(), prefix='clang_' + error + '_', dir=destination()) os.close(handle)", "def analyze_compiler_wrapper(): \"\"\" Entry point for `analyze-cc` and `analyze-c++` compiler", "any(re.match(r'^' + exclude_directory, filename) for exclude_directory in args.excludes) consts =", "bug counting. number_of_bugs = document(args) # Set exit status as", "return name elif opts['output_format'] == 'sarif': (handle, name) = tempfile.mkstemp(prefix='result-',", "against compilation database') with open(args.cdb, 'r') as handle: generator =", "def destination(): \"\"\" Creates failures directory if not exits yet.", "!= ctu_config.analyze if ctu_config.collect: return ctu_collect_phase(opts) if ctu_config.analyze: cwd =", "opts['directory'] cmd = [opts['clang'], '--analyze'] + opts['direct_args'] \\ + opts['flags']", "'directory', 'flags', 'file', 'output_dir', 'language', 'error_output', 'exit_code']) def report_failure(opts): \"\"\"", "not change the pre-processing step. # But that's the only", "dir=opts['output_dir']) os.close(handle) return name elif opts['output_format'] == 'sarif': (handle, name)", "cmd = [opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] \\ +", "a single phase data is # left so multiple analyze", "cmd = get_arguments(cmd, cwd) run_command(cmd, cwd=cwd) except subprocess.CalledProcessError: pass except", "map keeping only unique names. We leave conflicting names out", "== 1: mangled_ast_pairs.append((mangled_name, next(iter(ast_files)))) return mangled_ast_pairs def merge_ctu_extdef_maps(ctudir): \"\"\" Merge", "= {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'} return mapping.get(opts['language'], '.i')", "analyzer option will be overwritten '-o': 1, # will set", "task is decomposed into smaller methods which are calling each", "os.getenv('ANALYZE_BUILD_REPORT_FAILURES'), 'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS', '').split(' '), 'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'), 'directory': execution.cwd, 'command':", "result.append('-analyzer-stats') if args.analyze_headers: result.append('-analyzer-opt-analyze-headers') if args.stats: result.append('-analyzer-checker=debug.Stats') if args.maxloop: result.extend(['-analyzer-max-loop',", "# check is it a compilation? compilation = split_command(execution.cmd) if", "ctu_config.collect: merge_ctu_extdef_maps(ctu_config.dir) def setup_environment(args): \"\"\" Set up environment for build", "to interpose compiler wrapper. \"\"\" environment = dict(os.environ) environment.update(wrapper_environment(args)) environment.update({", "a single entry of the compilation database. This complex task", "if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir') else CtuConfig(collect=False, analyze=False, dir='',", "report files shall go 'output_format', # it's 'plist', 'html', 'plist-html',", "@command_entry_point def scan_build(): \"\"\" Entry point for scan-build command. \"\"\"", "required attributes in the passed state and stop when any", "compilation units are separately mapped into a temporary file in", "key in required: if key not in args[0]: raise KeyError('{0}", "the called method.) \"\"\" try: command = opts.pop('command') command =", "extdef_map_lines = generate_extdef_map_lines(extdefmap_dir) mangled_ast_pairs = create_global_ctu_extdef_map(extdef_map_lines) write_global_map(triple_arch, mangled_ast_pairs) # Remove", "passed parameter is a python dictionary. Each method first check", "are the option name, value number of options to skip", "command with intercept module. exit_code = capture(args) # Run the", "the report directory as new output with report_directory(args.output, args.keep_empty, args.output_format)", "= 'analyze-c++' CTU_EXTDEF_MAP_FILENAME = 'externalDefMap.txt' CTU_TEMP_DEFMAP_FOLDER = 'tmpExternalDefMaps' @command_entry_point def", "language_check(opts, continuation=filter_debug_flags): \"\"\" Find out the language from command line", "since 'args.excludes' are absolute paths. filename = os.path.normpath(os.path.join(directory, filename)) return", "%s/results-merged.sarif.\" keep = True else: if keep: msg = \"Report", "report. To run `scan-build` against the configure step might be", "'ctu']) # ctu control options def run(opts): \"\"\" Entry point", "return arch_check(opts) except Exception: logging.error(\"Problem occurred during analysis.\", exc_info=1) return", "command during the build, -- Analyze: run the analyzer against", "file. And some more execution context also saved into '.info.txt'", "os.getenv('ANALYZE_BUILD_FORCE_DEBUG'), 'directory': execution.cwd, 'command': [execution.cmd[0], '-c'] + compilation.flags, 'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU'))", "ast_file = line.strip().split(' ', 1) mangled_to_asts[mangled_name].add(ast_file) mangled_ast_pairs = [] for", "no need to run the analyzer or generate report. To", "\"\"\" Creates output file name for reports. \"\"\" if opts['output_format']", "output file name. (This is blindly following the # Perl", "= [opts['clang'], '-emit-ast'] ast_command.extend(args) ast_command.append('-w') ast_command.append(opts['file']) ast_command.append('-o') ast_command.append(ast_path) logging.debug(\"Generating AST", "pass # write general information about the crash with open(name", "If there are multiple arch are given and are not", "arch') return None else: logging.debug('analysis, on default arch') return continuation(opts)", "'.join(os.uname()) + os.linesep) handle.write(get_version(opts['clang'])) handle.close() # write the captured output", "by CTU analysis. \"\"\" def generate_ast(triple_arch): \"\"\" Generates ASTs for", "mangled_ast_pairs def merge_ctu_extdef_maps(ctudir): \"\"\" Merge individual external definition maps into", "into a global map file: CTU_EXTDEF_MAP_FILENAME.\"\"\" def generate_extdef_map_lines(extdefmap_dir): \"\"\" Iterate", "args.output: # Run against a build command. there are cases,", "args.constraints_model: result.append('-analyzer-constraints={0}'.format( args.constraints_model)) if args.internal_stats: result.append('-analyzer-stats') if args.analyze_headers: result.append('-analyzer-opt-analyze-headers') if", "\\ + [opts['file']] triple_arch = get_triple_arch(cmd, cwd) generate_ast(triple_arch) map_extdefs(triple_arch) @require(['ctu'])", "it contains no report.\" logging.warning(msg, name) if not keep: os.rmdir(name)", "extdef_map_cmd='') run_analyzer_parallel(args) shutil.rmtree(ctu_config.dir, ignore_errors=True) else: # Single runs (collect or", "# entry from compilation database 'directory', # entry from compilation", "None elif language not in accepted: logging.debug('skip analysis, language not", "# compilation language, None, if not specified 'compiler': compiler_language(command) #", "database') with open(args.cdb, 'r') as handle: generator = (dict(cmd, **consts)", "create a cover report from the analyzer outputs. \"\"\" import", "def analyzer_params(args): \"\"\" A group of command line arguments can", "return number_of_bugs if args.status_bugs else exit_code @command_entry_point def analyze_build(): \"\"\"", "mangled_to_asts[mangled_name].add(ast_file) mangled_ast_pairs = [] for mangled_name, ast_files in mangled_to_asts.items(): if", "only one arch given (or the same multiple # times).", "Merge individual external definition maps into a global one. As", "import tempfile import functools import subprocess import contextlib import datetime", "created: %s', name) try: yield name finally: if os.listdir(name): if", "analyzer against a single entry of the compilation database. This", "before and # also after the run. If the user", "elem in [constant, piece]] def get_ctu_config_from_args(args): \"\"\" CTU configuration is", "if args.plugins: result.extend(prefix_with('-load', args.plugins)) if args.enable_checker: checkers = ','.join(args.enable_checker) result.extend(['-analyzer-checker',", "into smaller methods which are calling each other in chain.", "new output with report_directory(args.output, args.keep_empty, args.output_format) as args.output: # Run", "the configure step might be necessary, when compiler wrappers are", "ctu_collect_phase(opts) if ctu_config.analyze: cwd = opts['directory'] cmd = [opts['clang'], '--analyze']", "from analyze-cc or analyze-c++ return CtuConfig(collect=ctu_config[0], analyze=ctu_config[1], dir=ctu_config[2], extdef_map_cmd=ctu_config[3]) def", "also saved into '.info.txt' file. \"\"\" def extension(): \"\"\" Generate", "# for the Makefile. if args.intercept_first: # Run build command", "args.clang, 'output_dir': args.output, 'output_format': args.output_format, 'output_failures': args.output_failures, 'direct_args': analyzer_params(args), 'force_debug':", "everything for the # wrappers, because 'configure' needs to capture", "exit_code = run_build(args.build, env=environment) # Cover report generation and bug", "either absolute or relative to directory. Need to turn #", "extern_defs_map_file = os.path.join(ctudir, arch, CTU_EXTDEF_MAP_FILENAME) with open(extern_defs_map_file, 'w') as out_file:", "for line in current['error_output']: logging.info(line.rstrip()) @contextlib.contextmanager def report_directory(hint, keep, output_format):", "ctu_config = opts['ctu'] if ctu_config.collect or ctu_config.analyze: assert ctu_config.collect !=", "if needed. \"\"\" ctu_config = opts['ctu'] if ctu_config.collect or ctu_config.analyze:", "map list with ast files. \"\"\" extdef_ast_list = [] for", "analyzer outputs. \"\"\" import re import os import os.path import", "line and executes it. Capture the output of the analysis", "IGNORED_FLAGS: count = IGNORED_FLAGS[arg] for _ in range(count): next(args) #", "directory '%s' contains no report, but kept.\" else: msg =", "path out of absolute path = path[1:] if path[0] ==", "run. if ctu_config.collect and ctu_config.analyze: # CTU strings are coming", "pass # and consider everything else as compilation flag. else:", "compilation db. govern_analyzer_runs(args) # Cover report generation and bug counting.", "import subprocess import contextlib import datetime import shutil import glob", "report_directory(hint, keep, output_format): \"\"\" Responsible for the report directory. hint", "file # flags below are inherited from the perl implementation.", "intent of the build command. When static analyzer run against", "name - AST file pairs. :rtype: List of (str, str)", "warnings, but we should suppress ones # that we don't", "are calling each other in chain. If the analysis is", "not in accepted: logging.debug('skip analysis, language not supported') return None", "logging.debug('analysis, language: %s', language) opts.update({'language': language, 'flags': ['-x', language] +", "Generates ASTs for the current compilation command. \"\"\" args =", "prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3] \"\"\"", "the given compilation database. \"\"\" def exclude(filename, directory): \"\"\" Return", "the user asks only for a single phase data is", "if opts.get('output_failures', False): opts.update(result) continuation(opts) return result def extdef_map_list_src_to_ast(extdef_src_list): \"\"\"", "exit_code = capture(args) # Run the analyzer against the captured", "'-arch': result['arch_list'].append(next(args)) # take language elif arg == '-x': result['language']", "and analyze (2nd) phase, we do an # all-in-one run", "args.ctu_dir and extdef_map_cmd, # so we can leave it empty", "for line in in_file: yield line def write_global_map(arch, mangled_ast_pairs): \"\"\"", "Execute only one phase of 2 phases of CTU if", "# will set up own output file # flags below", "contract between the caller and the called method.) \"\"\" try:", "on default arch') return continuation(opts) # To have good results", "None else: logging.debug('analysis, language: %s', language) opts.update({'language': language, 'flags': ['-x',", "'flags', 'direct_args', 'file', 'output_dir', 'output_format']) def run_analyzer(opts, continuation=report_failure): \"\"\" It", "command. When static analyzer run against project configure step, it", "re.match(r'^[^-].+', arg) and classify_source(arg): pass # ignore some flags elif", "def run(opts): \"\"\" Entry point to run (or not) static", "analysis and returns with it. If failure reports are requested,", "Creates output file name for reports. \"\"\" if opts['output_format'] in", "keep: msg = \"Report directory '%s' contains no report, but", "not keep: os.rmdir(name) def analyzer_params(args): \"\"\" A group of command", "state and stop when any of those is missing. \"\"\"", "change the pre-processing step. # But that's the only pass", "0 else 'other_error' # Create preprocessor output file name. (This", "extdefmap_command.extend(args) logging.debug(\"Generating external definition map using '%s'\", extdefmap_command) extdef_src_list =", "which had their definition. These files should be merged at", "Takes iterator of individual external definition maps and creates a", "passed to {1}'.format( key, function.__name__)) return function(*args, **kwargs) return wrapper", "name for reports. \"\"\" if opts['output_format'] in { 'plist', 'plist-html',", "created from the chosen phases and dir. \"\"\" return (", "Project, under the Apache License v2.0 with LLVM Exceptions. #", "by a signal it's a 'Crash'. # (python subprocess Popen.returncode", "( CtuConfig(collect=args.ctu_phases.collect, analyze=args.ctu_phases.analyze, dir=args.ctu_dir, extdef_map_cmd=args.extdef_map_cmd) if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases,", "want to see. elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg):", "\"\"\" Responsible for the report directory. hint -- could specify", "pieces for elem in [constant, piece]] def get_ctu_config_from_args(args): \"\"\" CTU", "in args[0]: raise KeyError('{0} not passed to {1}'.format( key, function.__name__))", "map list with source files into an external definition map", "In case an other process already created it. pass if", "args = parse_args_for_scan_build() # will re-assign the report directory as", "extdef_map_lines: mangled_name, ast_file = line.strip().split(' ', 1) mangled_to_asts[mangled_name].add(ast_file) mangled_ast_pairs =", "'*')) files.sort() for filename in files: with open(filename, 'r') as", "for a single phase data is # left so multiple", "run_command(cmd, cwd=cwd) return {'error_output': output, 'exit_code': 0} except subprocess.CalledProcessError as", "'output_dir', 'output_format']) def run_analyzer(opts, continuation=report_failure): \"\"\" It assembles the analysis", "There should be only one arch given (or the same", "fails. or when it's not requested. if result or not", "exit status as it was requested. return number_of_bugs if args.status_bugs", "files should be merged at the end into a global", "return function(*args, **kwargs) return wrapper return decorator @require(['command', # entry", "extra warnings, but we should suppress ones # that we", "and not re.match(r'^-Wno-.+', arg): pass # and consider everything else", "one arch given (or the same multiple # times). If", "given as a parameter... language = opts.pop('language') compiler = opts.pop('compiler')", "checkers = ','.join(args.disable_checker) result.extend(['-analyzer-disable-checker', checkers]) return prefix_with('-Xclang', result) def require(required):", "[opts['clang'], '-emit-ast'] ast_command.extend(args) ast_command.append('-w') ast_command.append(opts['file']) ast_command.append('-o') ast_command.append(ast_path) logging.debug(\"Generating AST using", "import multiprocessing import tempfile import functools import subprocess import contextlib", "args.disable_checker: checkers = ','.join(args.disable_checker) result.extend(['-analyzer-disable-checker', checkers]) return prefix_with('-Xclang', result) def", "keep = True else: if keep: msg = \"Report directory", "\"Report directory '%s' contains no report, but kept.\" else: msg", "args.analyzer_config]) if args.verbose >= 4: result.append('-analyzer-display-progress') if args.plugins: result.extend(prefix_with('-load', args.plugins))", "stamp = datetime.datetime.now().strftime(stamp_format) parent_dir = os.path.abspath(hint) if not os.path.exists(parent_dir): os.makedirs(parent_dir)", "shall go 'output_format', # it's 'plist', 'html', 'plist-html', 'plist-multi-file', or", "= [a for a in received_list if a not in", "1, '-bundle_loader': 1, '-multiply_defined': 1, '-sectorder': 3, '--param': 1, '--serialize-diagnostics':", "output with report_directory(args.output, args.keep_empty, args.output_format) as args.output: # Run the", "to set up everything for the # wrappers, because 'configure'", "not None: language = classify_source(opts['file'], compiler == 'c') if language", "'directory', 'flags', 'direct_args', 'file', 'output_dir', 'output_format']) def run_analyzer(opts, continuation=report_failure): \"\"\"", "'experimental-enable-naive-ctu-analysis=true'] analyzer_options = prefix_with('-analyzer-config', ctu_options) direct_options = prefix_with('-Xanalyzer', analyzer_options) opts['direct_args'].extend(direct_options)", "options to skip IGNORED_FLAGS = { '-c': 0, # compile", "database 'file', # entry from compilation database 'clang', # clang", "opts.update(result) continuation(opts) return result def extdef_map_list_src_to_ast(extdef_src_list): \"\"\" Turns textual external", "[opts['file']] triarch = get_triple_arch(cmd, cwd) ctu_options = ['ctu-dir=' + os.path.join(ctu_config.dir,", "len(args) and not re.search(r'configure|autogen', args[0]) def prefix_with(constant, pieces): \"\"\" From", "pool.close() pool.join() def govern_analyzer_runs(args): \"\"\" Governs multiple runs in CTU", "not) static analyzer against a single entry of the compilation", "%s', current) opts.update({'flags': ['-arch', current] + opts['flags']}) return continuation(opts) else:", "cwd = opts['directory'] cmd = [opts['clang'], '--analyze'] + opts['direct_args'] \\", "database 'directory', # entry from compilation database 'file', # entry", "returns with it. If failure reports are requested, it calls", "'direct_args', # arguments from command line 'force_debug', # kill non", "with tempfile.NamedTemporaryFile(mode='w', dir=extern_defs_map_folder, delete=False) as out_file: out_file.write(\"\\n\".join(extdef_ast_list) + \"\\n\") cwd", "end into a global map file: CTU_EXTDEF_MAP_FILENAME.\"\"\" def generate_extdef_map_lines(extdefmap_dir): \"\"\"", "as handle: handle.writelines(opts['error_output']) handle.close() @require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir',", "args.plugins: result.extend(prefix_with('-load', args.plugins)) if args.enable_checker: checkers = ','.join(args.enable_checker) result.extend(['-analyzer-checker', checkers])", "and extdef_map_cmd, # so we can leave it empty args.ctu_phases", "dir='', extdef_map_cmd='')) def get_ctu_config_from_json(ctu_conf_json): \"\"\" CTU configuration is created from", "ctu_config.collect: shutil.rmtree(ctu_config.dir, ignore_errors=True) # If the user asked for a", "are launched from here. run_analyzer_parallel(args) if ctu_config.collect: merge_ctu_extdef_maps(ctu_config.dir) def setup_environment(args):", "triple_arch = os.path.basename(triple_path) extdefmap_dir = os.path.join(ctudir, triple_arch, CTU_TEMP_DEFMAP_FOLDER) extdef_map_lines =", "compilation database 'directory', # entry from compilation database 'file', #", "compiler_wrapper(analyze_compiler_wrapper_impl) def analyze_compiler_wrapper_impl(result, execution): \"\"\" Implements analyzer compiler wrapper functionality.", "args.status_bugs else exit_code @command_entry_point def analyze_build(): \"\"\" Entry point for", "'--param': 1, '--serialize-diagnostics': 1 } def classify_parameters(command): \"\"\" Prepare compiler", "we have before run the analyzer. current = filtered_list.pop() logging.debug('analysis,", "@contextlib.contextmanager def report_directory(hint, keep, output_format): \"\"\" Responsible for the report", "in CTU mode or runs once in normal mode. \"\"\"", "a 'Crash'. # (python subprocess Popen.returncode is negative when child", "multiple # times). If there are multiple arch are given", "analyzer if current is not None: for line in current['error_output']:", "failures_dir = os.path.join(opts['output_dir'], 'failures') if not os.path.isdir(failures_dir): os.makedirs(failures_dir) return failures_dir", "architecture (-arch) flags for future processing. \"\"\" result = {", "ex: result = {'error_output': ex.output, 'exit_code': ex.returncode} if opts.get('output_failures', False):", "yield line def write_global_map(arch, mangled_ast_pairs): \"\"\" Write (mangled name, ast", "if args.analyze_headers: result.append('-analyzer-opt-analyze-headers') if args.stats: result.append('-analyzer-checker=debug.Stats') if args.maxloop: result.extend(['-analyzer-max-loop', str(args.maxloop)])", "def analyze_compiler_wrapper_impl(result, execution): \"\"\" Implements analyzer compiler wrapper functionality. \"\"\"", "method generates those. \"\"\" result = [] if args.store_model: result.append('-analyzer-store={0}'.format(args.store_model))", "files into an external definition map list with ast files.", "extdef_src_list: mangled_name, path = extdef_src_txt.split(\" \", 1) # Normalize path", "file) pairs into final file. \"\"\" extern_defs_map_file = os.path.join(ctudir, arch,", "The decision also influenced by the compiler invocation. \"\"\" accepted", "- AST file pairs. :rtype: List of (str, str) tuples.", "if result or not os.getenv('ANALYZE_BUILD_CLANG'): return # check is it", "a definition (mangled name) and the originating source (the corresponding", "opts['flags'] + [opts['file'], '-o', target()], cwd) output = run_command(cmd, cwd=cwd)", "\"\"\" Set up environment for build command to interpose compiler", "source}) logging.debug('analyzer parameters %s', parameters) current = run(parameters) # display", "setup_environment(args): \"\"\" Set up environment for build command to interpose", "use the same data gathered by a single # collection", "but kept.\" else: msg = \"Removing directory '%s' because it", "every second element is from the original sequence and the", "output = run_command(cmd, cwd=cwd) return {'error_output': output, 'exit_code': 0} except", "directory. keep -- a boolean value to keep or delete", "wrappers are used. That's the moment when build setup check", "{ 'plist', 'plist-html', 'plist-multi-file'}: (handle, name) = tempfile.mkstemp(prefix='report-', suffix='.plist', dir=opts['output_dir'])", "# Single runs (collect or analyze) are launched from here.", "analyzer certain compiler options shall be # omitted. The compiler", "we can leave it empty args.ctu_phases = CtuConfig(collect=True, analyze=False, dir='',", "Recover namedtuple from json when coming from analyze-cc or analyze-c++", "occurred during analysis.\", exc_info=1) return None @require(['clang', 'directory', 'flags', 'file',", "checks the required attributes in the passed state and stop", "run the static analyzer against a build is done in", "# Perl implementation.) (handle, name) = tempfile.mkstemp(suffix=extension(), prefix='clang_' + error", "\"\\n\") cwd = opts['directory'] cmd = [opts['clang'], '--analyze'] + opts['direct_args']", "arch') return continuation(opts) # To have good results from static", "def ctu_collect_phase(opts): \"\"\" Preprocess source by generating all data needed", "current source. \"\"\" args = opts['direct_args'] + opts['flags'] extdefmap_command =", "IGNORED_FLAGS[arg] for _ in range(count): next(args) # we don't care", "get_arguments(cmd, cwd) run_command(cmd, cwd=cwd) except subprocess.CalledProcessError: pass except ClangErrorException: pass", "} def classify_parameters(command): \"\"\" Prepare compiler flags (filters some and", "if not specified 'compiler': compiler_language(command) # 'c' or 'c++' }", "multiple arch are given and are not # the same,", "a global one. As the collect phase runs parallel on", "result.append('-analyzer-display-progress') if args.plugins: result.extend(prefix_with('-load', args.plugins)) if args.enable_checker: checkers = ','.join(args.enable_checker)", "source file are not flags elif re.match(r'^[^-].+', arg) and classify_source(arg):", "'sarif' 'output_failures', # generate crash reports or not 'ctu']) #", "in [constant, piece]] def get_ctu_config_from_args(args): \"\"\" CTU configuration is created", "and take out language (-x) and architecture (-arch) flags for", "analyzer_options = prefix_with('-analyzer-config', ctu_options) direct_options = prefix_with('-Xanalyzer', analyzer_options) opts['direct_args'].extend(direct_options) return", "args.output, 'output_format': args.output_format, 'output_failures': args.output_failures, 'direct_args': analyzer_params(args), 'force_debug': args.force_debug, 'ctu':", "else is 'Other Error'. error = 'crash' if opts['exit_code'] <", "%s', parameters) current = run(parameters) # display error message from", "from static analyzer certain compiler options shall be # omitted.", "os.path.join(opts['output_dir'], 'failures') if not os.path.isdir(failures_dir): os.makedirs(failures_dir) return failures_dir # Classify", "ClangErrorException: pass # write general information about the crash with", "the user asked for a collect (1st) and analyze (2nd)", "list of architecture flags 'language': None, # compilation language, None,", "ex.returncode} if opts.get('output_failures', False): opts.update(result) continuation(opts) return result except ClangErrorException", "= ['ctu-dir=' + os.path.join(ctu_config.dir, triarch), 'experimental-enable-naive-ctu-analysis=true'] analyzer_options = prefix_with('-analyzer-config', ctu_options)", "executes it. Capture the output of the analysis and returns", "libscanbuild.clang import get_version, get_arguments, get_triple_arch, \\ ClangErrorException from libscanbuild.shell import", "AST file pairs. :rtype: List of (str, str) tuples. \"\"\"", "triple_arch, CTU_TEMP_DEFMAP_FOLDER) extdef_map_lines = generate_extdef_map_lines(extdefmap_dir) mangled_ast_pairs = create_global_ctu_extdef_map(extdef_map_lines) write_global_map(triple_arch, mangled_ast_pairs)", "Set up environment for build command to interpose compiler wrapper.", "os.path.abspath(ast_joined_path) ast_dir = os.path.dirname(ast_path) if not os.path.isdir(ast_dir): try: os.makedirs(ast_dir) except", "exclude_directory in args.excludes) consts = { 'clang': args.clang, 'output_dir': args.output,", "directory created: %s', name) try: yield name finally: if os.listdir(name):", "From a sequence create another sequence where every second element", "skip IGNORED_FLAGS = { '-c': 0, # compile option will", "(collect or analyze) are launched from here. run_analyzer_parallel(args) if ctu_config.collect:", "language (-x) and architecture (-arch) flags for future processing. \"\"\"", "opts['exit_code'] < 0 else 'other_error' # Create preprocessor output file", "os.getenv('ANALYZE_BUILD_REPORT_FORMAT'), 'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'), 'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS', '').split(' '), 'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'), 'directory':", "ignore some flags elif arg in IGNORED_FLAGS: count = IGNORED_FLAGS[arg]", "it a compilation? compilation = split_command(execution.cmd) if compilation is None:", "raise KeyError('{0} not passed to {1}'.format( key, function.__name__)) return function(*args,", "if os.path.isdir(triple_path): triple_arch = os.path.basename(triple_path) extdefmap_dir = os.path.join(ctudir, triple_arch, CTU_TEMP_DEFMAP_FOLDER)", "as ex: result = {'error_output': ex.error, 'exit_code': 0} if opts.get('output_failures',", "of architecture flags 'language': None, # compilation language, None, if", "return # check is it a compilation? compilation = split_command(execution.cmd)", "return wrapper return decorator @require(['command', # entry from compilation database", "@require(['flags', 'force_debug']) def filter_debug_flags(opts, continuation=dispatch_ctu): \"\"\" Filter out nondebug macros", "0, # compile option will be overwritten '-fsyntax-only': 0, #", "(or not) static analyzer against a single entry of the", "against the given compilation database. \"\"\" def exclude(filename, directory): \"\"\"", "'-fsyntax-only', '-E'] + opts['flags'] + \\ [opts['file'], '-o', name] try:", "'scan-view' currently does not support sarif format. msg = \"Run", "if args.disable_checker: checkers = ','.join(args.disable_checker) result.extend(['-analyzer-disable-checker', checkers]) return prefix_with('-Xclang', result)", "def report_failure(opts): \"\"\" Create report when analyzer failed. The major", "These files should be merged at the end into a", "args.output_failures else '', 'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)), 'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug", "too with open(name + '.stderr.txt', 'w') as handle: handle.writelines(opts['error_output']) handle.close()", "overwritten '-fsyntax-only': 0, # static analyzer option will be overwritten", "'%s'\", command) opts.update(classify_parameters(command)) return arch_check(opts) except Exception: logging.error(\"Problem occurred during", "not known') return None elif language not in accepted: logging.debug('skip", "-- a boolean value to keep or delete the empty", "with report_directory(args.output, args.keep_empty, args.output_format) as args.output: # Run the analyzer", "the chosen phases and dir. \"\"\" ctu_config = json.loads(ctu_conf_json) #", "entry from compilation database 'file', # entry from compilation database", "same, those should not change the pre-processing step. # But", "is either absolute or relative to directory. Need to turn", "run the analyzer against the captured commands, -- Report: create", "list with source files into an external definition map list", "extdef_map_list_src_to_ast(extdef_src_list): \"\"\" Turns textual external definition map list with source", "iterator of individual external definition maps and creates a global", "originating source (the corresponding AST file) name. :type extdef_map_lines: Iterator", "analyzer run # is not required. But we need to", "the current source. \"\"\" args = opts['direct_args'] + opts['flags'] extdefmap_command", "and architecture (-arch) flags for future processing. \"\"\" result =", "subprocess.CalledProcessError: pass except ClangErrorException: pass # write general information about", "Remove all temporary files shutil.rmtree(extdefmap_dir, ignore_errors=True) def run_analyzer_parallel(args): \"\"\" Runs", "# Create preprocessor output file name. (This is blindly following", "for the # wrappers, because 'configure' needs to capture the", "\"\"\" def exclude(filename, directory): \"\"\" Return true when any excluded", "'scan-build' command API. To run the static analyzer against a", "for key in required: if key not in args[0]: raise", "during analysis.\", exc_info=1) return None @require(['clang', 'directory', 'flags', 'file', 'output_dir',", "filtered_list = [a for a in received_list if a not", "API. To run the static analyzer against a build is", "hint -- could specify the parent directory of the output", "# Keys are the option name, value number of options", "macros 'output_dir', # where generated report files shall go 'output_format',", "cwd=opts['directory']) extdef_ast_list = extdef_map_list_src_to_ast(extdef_src_list) extern_defs_map_folder = os.path.join(opts['ctu'].dir, triple_arch, CTU_TEMP_DEFMAP_FOLDER) if", "up own output file # flags below are inherited from", "'language': None, # compilation language, None, if not specified 'compiler':", "Apache-2.0 WITH LLVM-exception \"\"\" This module implements the 'scan-build' command", "It checks the required attributes in the passed state and", "turn # it to absolute since 'args.excludes' are absolute paths.", "'require' decorator. It's like an 'assert' to check the contract", "compiler = opts.pop('compiler') # ... or find out from source", "previous collection # data first. if ctu_config.collect: shutil.rmtree(ctu_config.dir, ignore_errors=True) #", "analyzer. current = filtered_list.pop() logging.debug('analysis, on arch: %s', current) opts.update({'flags':", "are the prefix. eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0,", "have good results from static analyzer certain compiler options shall", "+ opts['flags']}) return continuation(opts) @require(['arch_list', 'flags']) def arch_check(opts, continuation=language_check): \"\"\"", "analyzer against a build is done in multiple steps: --", "ast_command.append(ast_path) logging.debug(\"Generating AST using '%s'\", ast_command) run_command(ast_command, cwd=opts['directory']) def map_extdefs(triple_arch):", "def map_extdefs(triple_arch): \"\"\" Generate external definition map file for the", "elif arg in IGNORED_FLAGS: count = IGNORED_FLAGS[arg] for _ in", "from json when coming from analyze-cc or analyze-c++ return CtuConfig(collect=ctu_config[0],", "an 'assert' to check the contract between the caller and", "in mangled_ast_pairs: out_file.write('%s %s\\n' % (mangled_name, ast_file)) triple_arches = glob.glob(os.path.join(ctudir,", "CtuConfig(collect=False, analyze=False, dir='', extdef_map_cmd='')) def get_ctu_config_from_json(ctu_conf_json): \"\"\" CTU configuration is", "'Other Error'. error = 'crash' if opts['exit_code'] < 0 else", "+ \" \" + ast_path) return extdef_ast_list @require(['clang', 'directory', 'flags',", "'.stderr.txt' file. And some more execution context also saved into", "against a build command. there are cases, when analyzer run", "'', 'ANALYZE_BUILD_REPORT_DIR': args.output, 'ANALYZE_BUILD_REPORT_FORMAT': args.output_format, 'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else", "extension. The decision also influenced by the compiler invocation. \"\"\"", "next(args) # we don't care about extra warnings, but we", "Run against a build command. there are cases, when analyzer", "with open(args.cdb, 'r') as handle: generator = (dict(cmd, **consts) for", "no report.\" logging.warning(msg, name) if not keep: os.rmdir(name) def analyzer_params(args):", "\"\"\" Preprocess source by generating all data needed by CTU", "if a not in disabled] if filtered_list: # There should", "if args.stats: result.append('-analyzer-checker=debug.Stats') if args.maxloop: result.extend(['-analyzer-max-loop', str(args.maxloop)]) if args.output_format: result.append('-analyzer-output={0}'.format(args.output_format))", "analyzer with compiler wrappers. environment = setup_environment(args) exit_code = run_build(args.build,", "and no need to run the analyzer or generate report.", "build command. When static analyzer run against project configure step,", "import capture from libscanbuild.report import document from libscanbuild.compilation import split_command,", "for piece in pieces for elem in [constant, piece]] def", "entry from compilation database 'directory', # entry from compilation database", "analyzer against '%s'\", command) opts.update(classify_parameters(command)) return arch_check(opts) except Exception: logging.error(\"Problem", "for reports. \"\"\" if opts['output_format'] in { 'plist', 'plist-html', 'plist-multi-file'}:", "external definition map list with ast files. \"\"\" extdef_ast_list =", "corresponding AST file) name. :type extdef_map_lines: Iterator of str. :returns:", "\"\"\" Generate preprocessor file extension. \"\"\" mapping = {'objective-c++': '.mii',", "or 'c++' } # iterate on the compile options args", "and consider everything else as compilation flag. else: result['flags'].append(arg) return", "command line arguments of the analyzer. This method generates those.", "affects the static analyzer run. # # Keys are the", "be silent and no need to run the analyzer or", "extdef_ast_list: with tempfile.NamedTemporaryFile(mode='w', dir=extern_defs_map_folder, delete=False) as out_file: out_file.write(\"\\n\".join(extdef_ast_list) + \"\\n\")", "os.path.join(ctudir, arch, CTU_EXTDEF_MAP_FILENAME) with open(extern_defs_map_file, 'w') as out_file: for mangled_name,", "triple_arch = get_triple_arch(cmd, cwd) generate_ast(triple_arch) map_extdefs(triple_arch) @require(['ctu']) def dispatch_ctu(opts, continuation=run_analyzer):", "invocation. \"\"\" accepted = frozenset({ 'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output',", "'', 'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args)) }) return environment @command_entry_point def analyze_compiler_wrapper(): \"\"\"", "+ opts['flags'] extdefmap_command = [opts['ctu'].extdef_map_cmd] extdefmap_command.append(opts['file']) extdefmap_command.append('--') extdefmap_command.extend(args) logging.debug(\"Generating external", "tuples. \"\"\" mangled_to_asts = defaultdict(set) for line in extdef_map_lines: mangled_name,", "maps and creates a global map keeping only unique names.", "return CtuConfig(collect=ctu_config[0], analyze=ctu_config[1], dir=ctu_config[2], extdef_map_cmd=ctu_config[3]) def create_global_ctu_extdef_map(extdef_map_lines): \"\"\" Takes iterator", "run_analyzer_parallel(args) shutil.rmtree(ctu_config.dir, ignore_errors=True) else: # Single runs (collect or analyze)", "failure reports are requested, it calls the continuation to generate", "pass we have before run the analyzer. current = filtered_list.pop()", "filtering only affects the static analyzer run. # # Keys", "opts['flags'] + \\ [opts['file'], '-o', name] try: cmd = get_arguments(cmd,", "module implements the 'scan-build' command API. To run the static", "def scan_build(): \"\"\" Entry point for scan-build command. \"\"\" args", "if ctu_config.collect: shutil.rmtree(ctu_config.dir, ignore_errors=True) # If the user asked for", "for `analyze-cc` and `analyze-c++` compiler wrappers. \"\"\" return compiler_wrapper(analyze_compiler_wrapper_impl) def", "try: yield name finally: if os.listdir(name): if output_format != 'sarif':", "command line parameters or file name extension. The decision also", "\"\"\" result = [] if args.store_model: result.append('-analyzer-store={0}'.format(args.store_model)) if args.constraints_model: result.append('-analyzer-constraints={0}'.format(", "# filename is either absolute or relative to directory. Need", "and returns with it. If failure reports are requested, it", "If the user asks only for a single phase data", "args.keep_empty, args.output_format) as args.output: # Run against a build command.", "a parameter... language = opts.pop('language') compiler = opts.pop('compiler') # ...", "the captured output too with open(name + '.stderr.txt', 'w') as", "mangled_to_asts = defaultdict(set) for line in extdef_map_lines: mangled_name, ast_file =", "# There should be only one arch given (or the", "a single # collection run. if ctu_config.collect and ctu_config.analyze: #", "it's 'plist', 'html', 'plist-html', 'plist-multi-file', or 'sarif' 'output_failures', # generate", "triarch = get_triple_arch(cmd, cwd) ctu_options = ['ctu-dir=' + os.path.join(ctu_config.dir, triarch),", "'objective-c': '.mi', 'c++': '.ii'} return mapping.get(opts['language'], '.i') def destination(): \"\"\"", "for current in pool.imap_unordered(run, generator): if current is not None:", "command = opts.pop('command') command = command if isinstance(command, list) else", "@require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language', 'error_output', 'exit_code']) def report_failure(opts):", "command to interpose compiler wrapper. \"\"\" environment = dict(os.environ) environment.update(wrapper_environment(args))", "CTU_TEMP_DEFMAP_FOLDER) if not os.path.isdir(extern_defs_map_folder): try: os.makedirs(extern_defs_map_folder) except OSError: # In", "need_analyzer(args): \"\"\" Check the intent of the build command. When", "need_analyzer(args.build): govern_analyzer_runs(args) else: # Run build command and analyzer with", "coming from args.ctu_dir and extdef_map_cmd, # so we can leave", "try: os.makedirs(ast_dir) except OSError: # In case an other process", "where generated report files shall go 'output_format', # it's 'plist',", "source (AST generated from the source) which had their definition.", "language, None, if not specified 'compiler': compiler_language(command) # 'c' or", "are not # the same, those should not change the", "when coming from analyze-cc or analyze-c++ return CtuConfig(collect=ctu_config[0], analyze=ctu_config[1], dir=ctu_config[2],", "run_analyzer_parallel(args) merge_ctu_extdef_maps(ctu_config.dir) args.ctu_phases = CtuConfig(collect=False, analyze=True, dir='', extdef_map_cmd='') run_analyzer_parallel(args) shutil.rmtree(ctu_config.dir,", "os.close(handle) # Execute Clang again, but run the syntax check", "output too with open(name + '.stderr.txt', 'w') as handle: handle.writelines(opts['error_output'])", "the static analyzer for line in current['error_output']: logging.info(line.rstrip()) pool.close() pool.join()", "values in state. It checks the required attributes in the", "(1st) and analyze (2nd) phase, we do an # all-in-one", "CtuConfig(collect=True, analyze=False, dir='', extdef_map_cmd='') run_analyzer_parallel(args) merge_ctu_extdef_maps(ctu_config.dir) args.ctu_phases = CtuConfig(collect=False, analyze=True,", "need to run the analyzer or generate report. To run", "the compilation for source in compilation.files: parameters.update({'file': source}) logging.debug('analyzer parameters", "json import logging import multiprocessing import tempfile import functools import", "if arg == '-arch': result['arch_list'].append(next(args)) # take language elif arg", "return prefix_with('-Xclang', result) def require(required): \"\"\" Decorator for checking the", "pre-processing step. # But that's the only pass we have", "given (or the same multiple # times). If there are", "consider everything else as compilation flag. else: result['flags'].append(arg) return result", "directory prefix the filename. \"\"\" if not os.path.isabs(filename): # filename", "macro at the end opts.update({'flags': opts['flags'] + ['-UNDEBUG']}) return continuation(opts)", "} # iterate on the compile options args = iter(command[1:])", "os.sep else path ast_path = os.path.join(\"ast\", path + \".ast\") extdef_ast_list.append(mangled_name", "logging.debug(\"Generating AST using '%s'\", ast_command) run_command(ast_command, cwd=opts['directory']) def map_extdefs(triple_arch): \"\"\"", "analysis.\", exc_info=1) return None @require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language',", "= run_command(cmd, cwd=cwd) return {'error_output': output, 'exit_code': 0} except subprocess.CalledProcessError", "if received_list: # filter out disabled architectures and -arch switches", "= 'analyze-cc' COMPILER_WRAPPER_CXX = 'analyze-c++' CTU_EXTDEF_MAP_FILENAME = 'externalDefMap.txt' CTU_TEMP_DEFMAP_FOLDER =", "def extension(): \"\"\" Generate preprocessor file extension. \"\"\" mapping =", "if ctu_config.collect and ctu_config.analyze: # CTU strings are coming from", "first. if ctu_config.collect: shutil.rmtree(ctu_config.dir, ignore_errors=True) # If the user asked", "check the contract between the caller and the called method.)", "'error_output', 'exit_code']) def report_failure(opts): \"\"\" Create report when analyzer failed.", "for build command to interpose compiler wrapper. \"\"\" environment =", "file extension if language is None and compiler is not", "= get_ctu_config_from_args(args) # If we do a CTU collect (1st", "'clang': os.getenv('ANALYZE_BUILD_CLANG'), 'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'), 'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'), 'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'), 'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS',", "\"\"\" mangled_to_asts = defaultdict(set) for line in extdef_map_lines: mangled_name, ast_file", "\"\"\" A group of command line arguments can mapped to", "line in current['error_output']: logging.info(line.rstrip()) @contextlib.contextmanager def report_directory(hint, keep, output_format): \"\"\"", "prefix_with('-analyzer-config', ctu_options) direct_options = prefix_with('-Xanalyzer', analyzer_options) opts['direct_args'].extend(direct_options) return continuation(opts) @require(['flags',", "msg = \"View result at %s/results-merged.sarif.\" keep = True else:", "at the end into a global map file: CTU_EXTDEF_MAP_FILENAME.\"\"\" def", "'--analyze'] + opts['direct_args'] \\ + opts['flags'] + [opts['file']] triarch =", "(str, str) tuples. \"\"\" mangled_to_asts = defaultdict(set) for line in", "args[0]: raise KeyError('{0} not passed to {1}'.format( key, function.__name__)) return", "check the compiler and capture the location for the build", ":type extdef_map_lines: Iterator of str. :returns: Mangled name - AST", "collect the needed parameters from environment, crash when missing parameters", "args.analyzer_config: result.extend(['-analyzer-config', args.analyzer_config]) if args.verbose >= 4: result.append('-analyzer-display-progress') if args.plugins:", "['ctu-dir=' + os.path.join(ctu_config.dir, triarch), 'experimental-enable-naive-ctu-analysis=true'] analyzer_options = prefix_with('-analyzer-config', ctu_options) direct_options", "current in pool.imap_unordered(run, generator): if current is not None: #", "when child terminated # by signal.) Everything else is 'Other", "relative path out of absolute path = path[1:] if path[0]", "# filter out disabled architectures and -arch switches filtered_list =", "= { '-c': 0, # compile option will be overwritten", "Single runs (collect or analyze) are launched from here. run_analyzer_parallel(args)", "ignore_errors=True) def run_analyzer_parallel(args): \"\"\" Runs the analyzer against the given", "when compilation fails. or when it's not requested. if result", "msg = \"Report directory '%s' contains no report, but kept.\"", "the output directory. keep -- a boolean value to keep", "continuation(opts) @require(['flags', 'force_debug']) def filter_debug_flags(opts, continuation=dispatch_ctu): \"\"\" Filter out nondebug", "= opts['ctu'] if ctu_config.collect or ctu_config.analyze: assert ctu_config.collect != ctu_config.analyze", "delete the empty report directory. \"\"\" stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-' stamp", "args.output_format) as args.output: # Run the analyzer against a compilation", "filename) for exclude_directory in args.excludes) consts = { 'clang': args.clang,", "Generate preprocessor file extension. \"\"\" mapping = {'objective-c++': '.mii', 'objective-c':", "language from command line parameters or file name extension. The", "'flags': ['-x', language] + opts['flags']}) return continuation(opts) @require(['arch_list', 'flags']) def", "Clang terminated by a signal it's a 'Crash'. # (python", "= tempfile.mkstemp(prefix='result-', suffix='.sarif', dir=opts['output_dir']) os.close(handle) return name return opts['output_dir'] try:", "execute sequentially pool = multiprocessing.Pool(1 if args.verbose > 2 else", "extdef_ast_list @require(['clang', 'directory', 'flags', 'direct_args', 'file', 'ctu']) def ctu_collect_phase(opts): \"\"\"", "from the chosen phases and dir. \"\"\" return ( CtuConfig(collect=args.ctu_phases.collect,", "created it. pass if extdef_ast_list: with tempfile.NamedTemporaryFile(mode='w', dir=extern_defs_map_folder, delete=False) as", "import contextlib import datetime import shutil import glob from collections", "(or the same multiple # times). If there are multiple", "return [elem for piece in pieces for elem in [constant,", "source. \"\"\" args = opts['direct_args'] + opts['flags'] extdefmap_command = [opts['ctu'].extdef_map_cmd]", "shutil.rmtree(ctu_config.dir, ignore_errors=True) # If the user asked for a collect", "ex.output, 'exit_code': ex.returncode} if opts.get('output_failures', False): opts.update(result) continuation(opts) return result", "'objective-c++', 'c-cpp-output', 'c++-cpp-output', 'objective-c-cpp-output' }) # language can be given", "os.path.basename(triple_path) extdefmap_dir = os.path.join(ctudir, triple_arch, CTU_TEMP_DEFMAP_FOLDER) extdef_map_lines = generate_extdef_map_lines(extdefmap_dir) mangled_ast_pairs", "influenced by the compiler invocation. \"\"\" accepted = frozenset({ 'c',", "path on windows as well path = os.path.splitdrive(path)[1] # Make", "classify_source(opts['file'], compiler == 'c') if language is None: logging.debug('skip analysis,", "'c++', 'objective-c', 'objective-c++', 'c-cpp-output', 'c++-cpp-output', 'objective-c-cpp-output' }) # language can", "return None else: logging.debug('analysis, on default arch') return continuation(opts) #", "for the build process. \"\"\" return len(args) and not re.search(r'configure|autogen',", "only affects the static analyzer run. # # Keys are", "0, '-install_name': 1, '-exported_symbols_list': 1, '-current_version': 1, '-compatibility_version': 1, '-init':", "it. pass ast_command = [opts['clang'], '-emit-ast'] ast_command.extend(args) ast_command.append('-w') ast_command.append(opts['file']) ast_command.append('-o')", "opts['flags'] + [opts['file']] triarch = get_triple_arch(cmd, cwd) ctu_options = ['ctu-dir='", "definition (mangled name) and the originating source (the corresponding AST", "crash when missing parameters = { 'clang': os.getenv('ANALYZE_BUILD_CLANG'), 'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'),", "args.status_bugs else 0 def need_analyzer(args): \"\"\" Check the intent of", "error type: when Clang terminated by a signal it's a", "exclude(filename, directory): \"\"\" Return true when any excluded directory prefix", "\"\"\" accepted = frozenset({ 'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output', 'c++-cpp-output',", "= {'error_output': ex.error, 'exit_code': 0} if opts.get('output_failures', False): opts.update(result) continuation(opts)", "LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier:", "# data first. if ctu_config.collect: shutil.rmtree(ctu_config.dir, ignore_errors=True) # If the", "keep, output_format): \"\"\" Responsible for the report directory. hint --", "exclude_directory, filename) for exclude_directory in args.excludes) consts = { 'clang':", "or delete the empty report directory. \"\"\" stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-'", "see. elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg): pass #", "point for scan-build command. \"\"\" args = parse_args_for_scan_build() # will", "prefix_with(constant, pieces): \"\"\" From a sequence create another sequence where", "= ','.join(args.disable_checker) result.extend(['-analyzer-disable-checker', checkers]) return prefix_with('-Xclang', result) def require(required): \"\"\"", "in triple_arches: if os.path.isdir(triple_path): triple_arch = os.path.basename(triple_path) extdefmap_dir = os.path.join(ctudir,", "necessary, when compiler wrappers are used. That's the moment when", "analyzer_options) opts['direct_args'].extend(direct_options) return continuation(opts) @require(['flags', 'force_debug']) def filter_debug_flags(opts, continuation=dispatch_ctu): \"\"\"", "else: logging.debug('analysis, on default arch') return continuation(opts) # To have", "the run. If the user asks only for a single", "opts.get('output_failures', False): opts.update(result) continuation(opts) return result except ClangErrorException as ex:", "error + '_', dir=destination()) os.close(handle) # Execute Clang again, but", "= prefix_with('-analyzer-config', ctu_options) direct_options = prefix_with('-Xanalyzer', analyzer_options) opts['direct_args'].extend(direct_options) return continuation(opts)", "function.__name__)) return function(*args, **kwargs) return wrapper return decorator @require(['command', #", "run_command(ast_command, cwd=opts['directory']) def map_extdefs(triple_arch): \"\"\" Generate external definition map file", "report, but kept.\" else: msg = \"Removing directory '%s' because", "if keep: msg = \"Report directory '%s' contains no report,", "ctu_config.analyze if ctu_config.collect: return ctu_collect_phase(opts) if ctu_config.analyze: cwd = opts['directory']", "'.i') def destination(): \"\"\" Creates failures directory if not exits", "from here. run_analyzer_parallel(args) if ctu_config.collect: merge_ctu_extdef_maps(ctu_config.dir) def setup_environment(args): \"\"\" Set", "glob.glob(os.path.join(extdefmap_dir, '*')) files.sort() for filename in files: with open(filename, 'r')", "# Run build command and analyzer with compiler wrappers. environment", "CtuConfig from libscanbuild.arguments import parse_args_for_scan_build, \\ parse_args_for_analyze_build from libscanbuild.intercept import", "file pairs. :rtype: List of (str, str) tuples. \"\"\" mangled_to_asts", "'w') as out_file: for mangled_name, ast_file in mangled_ast_pairs: out_file.write('%s %s\\n'", "= iter(command[1:]) for arg in args: # take arch flags", "a boolean value to keep or delete the empty report", "2 phases of CTU if needed. \"\"\" ctu_config = opts['ctu']", "parse_args_for_analyze_build from libscanbuild.intercept import capture from libscanbuild.report import document from", "same multiple # times). If there are multiple arch are", "os.path.isdir(extern_defs_map_folder): try: os.makedirs(extern_defs_map_folder) except OSError: # In case an other", "the filename. \"\"\" if not os.path.isabs(filename): # filename is either", "parameters %s', parameters) current = run(parameters) # display error message", "'CXX': COMPILER_WRAPPER_CXX, 'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '', 'ANALYZE_BUILD_REPORT_DIR': args.output,", "if opts.pop('force_debug'): # lazy implementation just append an undefine macro", "from libscanbuild.clang import get_version, get_arguments, get_triple_arch, \\ ClangErrorException from libscanbuild.shell", "= parse_args_for_scan_build() # will re-assign the report directory as new", "all compilation units are separately mapped into a temporary file", "with source files into an external definition map list with", "glob from collections import defaultdict from libscanbuild import command_entry_point, compiler_wrapper,", "ast_path) return extdef_ast_list @require(['clang', 'directory', 'flags', 'direct_args', 'file', 'ctu']) def", "delete=False) as out_file: out_file.write(\"\\n\".join(extdef_ast_list) + \"\\n\") cwd = opts['directory'] cmd", "global map keeping only unique names. We leave conflicting names", "language is None and compiler is not None: language =", "into an external definition map list with ast files. \"\"\"", "run analyzer through one of the given architectures. \"\"\" disabled", "= \"Report directory '%s' contains no report, but kept.\" else:", "with intercept module. exit_code = capture(args) # Run the analyzer", "it calls the continuation to generate it. \"\"\" def target():", "piece]] def get_ctu_config_from_args(args): \"\"\" CTU configuration is created from the", "\"\"\" return len(args) and not re.search(r'configure|autogen', args[0]) def prefix_with(constant, pieces):", "other in chain. If the analysis is not possible the", "+ os.linesep) handle.write(' '.join(cmd) + os.linesep) handle.write(' '.join(os.uname()) + os.linesep)", "'%s' because it contains no report.\" logging.warning(msg, name) if not", "of (str, str) tuples. \"\"\" mangled_to_asts = defaultdict(set) for line", "a not in disabled] if filtered_list: # There should be", "\".ast\") extdef_ast_list.append(mangled_name + \" \" + ast_path) return extdef_ast_list @require(['clang',", "have before run the analyzer. current = filtered_list.pop() logging.debug('analysis, on", "'direct_args': analyzer_params(args), 'force_debug': args.force_debug, 'ctu': get_ctu_config_from_args(args) } logging.debug('run analyzer against", "the chain. The passed parameter is a python dictionary. Each", "get_arguments([opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] + [opts['file'], '-o', target()],", "# -*- coding: utf-8 -*- # Part of the LLVM", "+ error + '_', dir=destination()) os.close(handle) # Execute Clang again,", "opts['flags'] ast_joined_path = os.path.join(opts['ctu'].dir, triple_arch, 'ast', os.path.realpath(opts['file'])[1:] + '.ast') ast_path", "when verbose output requested execute sequentially pool = multiprocessing.Pool(1 if", "if os.listdir(name): if output_format != 'sarif': # 'scan-view' currently does", "1, 0, 2, 0, 3] \"\"\" return [elem for piece", "# Recover namedtuple from json when coming from analyze-cc or", "%s' to examine bug reports.\" else: msg = \"View result", "'arch_list': [], # list of architecture flags 'language': None, #", "flags (filters some and add others) and take out language", "phase data is # left so multiple analyze runs can", "configure step might be necessary, when compiler wrappers are used.", "%s', language) opts.update({'language': language, 'flags': ['-x', language] + opts['flags']}) return", "not os.path.exists(parent_dir): os.makedirs(parent_dir) name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir) logging.info('Report directory created:", "and not re.search(r'configure|autogen', args[0]) def prefix_with(constant, pieces): \"\"\" From a", "None, if not specified 'compiler': compiler_language(command) # 'c' or 'c++'", "def get_ctu_config_from_args(args): \"\"\" CTU configuration is created from the chosen", "definition map list with ast files. \"\"\" extdef_ast_list = []", "{ 'flags': [], # the filtered compiler flags 'arch_list': [],", "run the analyzer or generate report. To run `scan-build` against", "checkers]) if args.disable_checker: checkers = ','.join(args.disable_checker) result.extend(['-analyzer-disable-checker', checkers]) return prefix_with('-Xclang',", "CTU_TEMP_DEFMAP_FOLDER = 'tmpExternalDefMaps' @command_entry_point def scan_build(): \"\"\" Entry point for", "that's the only pass we have before run the analyzer.", "except subprocess.CalledProcessError as ex: result = {'error_output': ex.output, 'exit_code': ex.returncode}", "ctu_config.analyze: assert ctu_config.collect != ctu_config.analyze if ctu_config.collect: return ctu_collect_phase(opts) if", "That's the moment when build setup check the compiler and", "non debug macros 'output_dir', # where generated report files shall", "file name for reports. \"\"\" if opts['output_format'] in { 'plist',", "= { 'clang': args.clang, 'output_dir': args.output, 'output_format': args.output_format, 'output_failures': args.output_failures,", "coming from analyze-cc or analyze-c++ return CtuConfig(collect=ctu_config[0], analyze=ctu_config[1], dir=ctu_config[2], extdef_map_cmd=ctu_config[3])", "we should suppress ones # that we don't want to", "\"\"\" files = glob.glob(os.path.join(extdefmap_dir, '*')) files.sort() for filename in files:", "arch_check(opts, continuation=language_check): \"\"\" Do run analyzer through one of the", "not os.path.isdir(failures_dir): os.makedirs(failures_dir) return failures_dir # Classify error type: when", "[1,2,3]) creates [0, 1, 0, 2, 0, 3] \"\"\" return", "wrapper_environment, run_build, run_command, CtuConfig from libscanbuild.arguments import parse_args_for_scan_build, \\ parse_args_for_analyze_build", "create_global_ctu_extdef_map(extdef_map_lines) write_global_map(triple_arch, mangled_ast_pairs) # Remove all temporary files shutil.rmtree(extdefmap_dir, ignore_errors=True)", "ast files. \"\"\" extdef_ast_list = [] for extdef_src_txt in extdef_src_list:", "extension if language is None and compiler is not None:", "\"\"\" Governs multiple runs in CTU mode or runs once", "\"\"\" Prepare compiler flags (filters some and add others) and", "parse_args_for_scan_build() # will re-assign the report directory as new output", "setup check the compiler and capture the location for the", "and ctu_config.analyze: # CTU strings are coming from args.ctu_dir and", "+ os.linesep) handle.write(get_version(opts['clang'])) handle.close() # write the captured output too", "generate it. \"\"\" def target(): \"\"\" Creates output file name", "to check the contract between the caller and the called", "# clang executable name (and path) 'direct_args', # arguments from", "'plist-html', 'plist-multi-file'}: (handle, name) = tempfile.mkstemp(prefix='report-', suffix='.plist', dir=opts['output_dir']) os.close(handle) return", "tempfile.mkstemp(prefix='report-', suffix='.plist', dir=opts['output_dir']) os.close(handle) return name elif opts['output_format'] == 'sarif':", "dir='', extdef_map_cmd='') run_analyzer_parallel(args) shutil.rmtree(ctu_config.dir, ignore_errors=True) else: # Single runs (collect", "os.path.isdir(triple_path): triple_arch = os.path.basename(triple_path) extdefmap_dir = os.path.join(ctudir, triple_arch, CTU_TEMP_DEFMAP_FOLDER) extdef_map_lines", "language) opts.update({'language': language, 'flags': ['-x', language] + opts['flags']}) return continuation(opts)", "the build, -- Analyze: run the analyzer against the captured", "return and break the chain. The passed parameter is a", "should be merged at the end into a global map", "= opts.pop('arch_list') if received_list: # filter out disabled architectures and", "'scan-view %s' to examine bug reports.\" else: msg = \"View", "can be given as a parameter... language = opts.pop('language') compiler", "pieces): \"\"\" From a sequence create another sequence where every", "contextlib import datetime import shutil import glob from collections import", "environment = setup_environment(args) exit_code = run_build(args.build, env=environment) # Cover report", "handle.write(' '.join(os.uname()) + os.linesep) handle.write(get_version(opts['clang'])) handle.close() # write the captured", "'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'), 'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'), 'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'), 'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS', '').split(' '),", "arg == '-arch': result['arch_list'].append(next(args)) # take language elif arg ==", "dict(os.environ) environment.update(wrapper_environment(args)) environment.update({ 'CC': COMPILER_WRAPPER_CC, 'CXX': COMPILER_WRAPPER_CXX, 'ANALYZE_BUILD_CLANG': args.clang if", "'output_dir': args.output, 'output_format': args.output_format, 'output_failures': args.output_failures, 'direct_args': analyzer_params(args), 'force_debug': args.force_debug,", "opts['direct_args'] + opts['flags'] + [opts['file'], '-o', target()], cwd) output =", "@command_entry_point def analyze_compiler_wrapper(): \"\"\" Entry point for `analyze-cc` and `analyze-c++`", "is not None: for line in current['error_output']: logging.info(line.rstrip()) @contextlib.contextmanager def", "ast_command.extend(args) ast_command.append('-w') ast_command.append(opts['file']) ast_command.append('-o') ast_command.append(ast_path) logging.debug(\"Generating AST using '%s'\", ast_command)", "static analyzer against the compilation for source in compilation.files: parameters.update({'file':", "run (or not) static analyzer against a single entry of", "directory as new output with report_directory(args.output, args.keep_empty, args.output_format) as args.output:", "analyze_compiler_wrapper(): \"\"\" Entry point for `analyze-cc` and `analyze-c++` compiler wrappers.", "[opts['clang'], '-fsyntax-only', '-E'] + opts['flags'] + \\ [opts['file'], '-o', name]", "CTU strings are coming from args.ctu_dir and extdef_map_cmd, # so", "some more execution context also saved into '.info.txt' file. \"\"\"", "(This is blindly following the # Perl implementation.) (handle, name)", "environment for build command to interpose compiler wrapper. \"\"\" environment", "'configure' needs to capture the CC/CXX values # for the", "os.path.join(ctudir, triple_arch, CTU_TEMP_DEFMAP_FOLDER) extdef_map_lines = generate_extdef_map_lines(extdefmap_dir) mangled_ast_pairs = create_global_ctu_extdef_map(extdef_map_lines) write_global_map(triple_arch,", "and the source (AST generated from the source) which had", "# wrappers, because 'configure' needs to capture the CC/CXX values", "analyzer when compilation fails. or when it's not requested. if", "error message from the static analyzer if current is not", "# ignore some flags elif arg in IGNORED_FLAGS: count =", "get_ctu_config_from_args(args) } logging.debug('run analyzer against compilation database') with open(args.cdb, 'r')", "leave it empty args.ctu_phases = CtuConfig(collect=True, analyze=False, dir='', extdef_map_cmd='') run_analyzer_parallel(args)", "open(name + '.stderr.txt', 'w') as handle: handle.writelines(opts['error_output']) handle.close() @require(['clang', 'directory',", "single # collection run. if ctu_config.collect and ctu_config.analyze: # CTU", "Entry point for `analyze-cc` and `analyze-c++` compiler wrappers. \"\"\" return", "is a python dictionary. Each method first check that the", "coding: utf-8 -*- # Part of the LLVM Project, under", "License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license", "'exit_code': 0} if opts.get('output_failures', False): opts.update(result) continuation(opts) return result def", "The passed parameter is a python dictionary. Each method first", "os.getenv('ANALYZE_BUILD_CLANG'), 'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'), 'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'), 'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'), 'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS', '').split('", "== os.sep else path ast_path = os.path.join(\"ast\", path + \".ast\")", "ast_command.append('-w') ast_command.append(opts['file']) ast_command.append('-o') ast_command.append(ast_path) logging.debug(\"Generating AST using '%s'\", ast_command) run_command(ast_command,", "by generating all data needed by CTU analysis. \"\"\" def", "compilation command. \"\"\" args = opts['direct_args'] + opts['flags'] ast_joined_path =", "current['error_output']: logging.info(line.rstrip()) @contextlib.contextmanager def report_directory(hint, keep, output_format): \"\"\" Responsible for", "analyze=False, dir='', extdef_map_cmd='') run_analyzer_parallel(args) merge_ctu_extdef_maps(ctu_config.dir) args.ctu_phases = CtuConfig(collect=False, analyze=True, dir='',", "def create_global_ctu_extdef_map(extdef_map_lines): \"\"\" Takes iterator of individual external definition maps", "'c' or 'c++' } # iterate on the compile options", "pool.imap_unordered(run, generator): if current is not None: # display error", ":param extdef_map_lines: Contains the id of a definition (mangled name)", "opts.update({'language': language, 'flags': ['-x', language] + opts['flags']}) return continuation(opts) @require(['arch_list',", "can use the same data gathered by a single #", "in disabled] if filtered_list: # There should be only one", "known') return None elif language not in accepted: logging.debug('skip analysis,", "name) try: yield name finally: if os.listdir(name): if output_format !=", "command. \"\"\" args = parse_args_for_analyze_build() # will re-assign the report", "point for `analyze-cc` and `analyze-c++` compiler wrappers. \"\"\" return compiler_wrapper(analyze_compiler_wrapper_impl)", "elements are the prefix. eg.: prefix_with(0, [1,2,3]) creates [0, 1,", "args.enable_checker: checkers = ','.join(args.enable_checker) result.extend(['-analyzer-checker', checkers]) if args.disable_checker: checkers =", "args.output: # Run the analyzer against a compilation db. govern_analyzer_runs(args)", "It's like an 'assert' to check the contract between the", "= opts['directory'] cmd = [opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags']", "# ctu control options def run(opts): \"\"\" Entry point to", "keep or delete the empty report directory. \"\"\" stamp_format =", "of the given architectures. \"\"\" disabled = frozenset({'ppc', 'ppc64'}) received_list", "AST file) name. :type extdef_map_lines: Iterator of str. :returns: Mangled", "opts['directory'] cmd = [opts['clang'], '-fsyntax-only', '-E'] + opts['flags'] + \\", "cwd=opts['directory']) def map_extdefs(triple_arch): \"\"\" Generate external definition map file for", "a sequence create another sequence where every second element is", "is None: logging.debug('skip analysis, language not known') return None elif", "# Set exit status as it was requested. return number_of_bugs", "generate_extdef_map_lines(extdefmap_dir) mangled_ast_pairs = create_global_ctu_extdef_map(extdef_map_lines) write_global_map(triple_arch, mangled_ast_pairs) # Remove all temporary", "result def extdef_map_list_src_to_ast(extdef_src_list): \"\"\" Turns textual external definition map list", "in extdef_src_list: mangled_name, path = extdef_src_txt.split(\" \", 1) # Normalize", "cwd = opts['directory'] cmd = [opts['clang'], '-fsyntax-only', '-E'] + opts['flags']", "iter(command[1:]) for arg in args: # take arch flags into", "ignore_errors=True) # If the user asked for a collect (1st)", "import parse_args_for_scan_build, \\ parse_args_for_analyze_build from libscanbuild.intercept import capture from libscanbuild.report", "# left so multiple analyze runs can use the same", "'other_error' # Create preprocessor output file name. (This is blindly", "in received_list if a not in disabled] if filtered_list: #", "or file name extension. The decision also influenced by the", "flags elif arg in IGNORED_FLAGS: count = IGNORED_FLAGS[arg] for _", "tempfile.mkstemp(prefix='result-', suffix='.sarif', dir=opts['output_dir']) os.close(handle) return name return opts['output_dir'] try: cwd", "= os.path.abspath(hint) if not os.path.exists(parent_dir): os.makedirs(parent_dir) name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir)", "args.excludes) consts = { 'clang': args.clang, 'output_dir': args.output, 'output_format': args.output_format,", "environment = dict(os.environ) environment.update(wrapper_environment(args)) environment.update({ 'CC': COMPILER_WRAPPER_CC, 'CXX': COMPILER_WRAPPER_CXX, 'ANALYZE_BUILD_CLANG':", "%s\\n' % (mangled_name, ast_file)) triple_arches = glob.glob(os.path.join(ctudir, '*')) for triple_path", "+ ast_path) return extdef_ast_list @require(['clang', 'directory', 'flags', 'direct_args', 'file', 'ctu'])", "a compilation db. govern_analyzer_runs(args) # Cover report generation and bug", "steps: -- Intercept: capture the compilation command during the build,", "extdef_src_txt in extdef_src_list: mangled_name, path = extdef_src_txt.split(\" \", 1) #", "1, # will set up own output file # flags", "to see. elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg): pass", "type: when Clang terminated by a signal it's a 'Crash'.", "+ ['-UNDEBUG']}) return continuation(opts) @require(['language', 'compiler', 'file', 'flags']) def language_check(opts,", "Entry point for analyze-build command. \"\"\" args = parse_args_for_analyze_build() #", "'html', 'plist-html', 'plist-multi-file', or 'sarif' 'output_failures', # generate crash reports", "handle.close() # write the captured output too with open(name +", "if args.verbose >= 4: result.append('-analyzer-display-progress') if args.plugins: result.extend(prefix_with('-load', args.plugins)) if", "report_directory(args.output, args.keep_empty, args.output_format) as args.output: # Run the analyzer against", "execution context also saved into '.info.txt' file. \"\"\" def extension():", "import get_version, get_arguments, get_triple_arch, \\ ClangErrorException from libscanbuild.shell import decode", "the given architectures. \"\"\" disabled = frozenset({'ppc', 'ppc64'}) received_list =", "or ctu_config.analyze: assert ctu_config.collect != ctu_config.analyze if ctu_config.collect: return ctu_collect_phase(opts)", "interpose compiler wrapper. \"\"\" environment = dict(os.environ) environment.update(wrapper_environment(args)) environment.update({ 'CC':", "are requested, it calls the continuation to generate it. \"\"\"", "args = opts['direct_args'] + opts['flags'] extdefmap_command = [opts['ctu'].extdef_map_cmd] extdefmap_command.append(opts['file']) extdefmap_command.append('--')", "if ctu_config.analyze: cwd = opts['directory'] cmd = [opts['clang'], '--analyze'] +", "from the source) which had their definition. These files should", "run # is not required. But we need to set", "COMPILER_WRAPPER_CC, 'CXX': COMPILER_WRAPPER_CXX, 'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '', 'ANALYZE_BUILD_REPORT_DIR':", "triple_path in triple_arches: if os.path.isdir(triple_path): triple_arch = os.path.basename(triple_path) extdefmap_dir =", "pool.join() def govern_analyzer_runs(args): \"\"\" Governs multiple runs in CTU mode", "ASTs for the current compilation command. \"\"\" args = opts['direct_args']", "0, '-save-temps': 0, '-install_name': 1, '-exported_symbols_list': 1, '-current_version': 1, '-compatibility_version':", "os import os.path import json import logging import multiprocessing import", "the intent of the build command. When static analyzer run", "compilation = split_command(execution.cmd) if compilation is None: return # collect", "just return and break the chain. The passed parameter is", "is not None: # display error message from the static", "a compilation? compilation = split_command(execution.cmd) if compilation is None: return", "run the syntax check only. cwd = opts['directory'] cmd =", "the analyzer. This method generates those. \"\"\" result = []", "if args.constraints_model: result.append('-analyzer-constraints={0}'.format( args.constraints_model)) if args.internal_stats: result.append('-analyzer-stats') if args.analyze_headers: result.append('-analyzer-opt-analyze-headers')", "'file', 'ctu']) def ctu_collect_phase(opts): \"\"\" Preprocess source by generating all", "'force_debug']) def filter_debug_flags(opts, continuation=dispatch_ctu): \"\"\" Filter out nondebug macros when", "# and consider everything else as compilation flag. else: result['flags'].append(arg)", "open(args.cdb, 'r') as handle: generator = (dict(cmd, **consts) for cmd", "True else: if keep: msg = \"Report directory '%s' contains", "= opts['direct_args'] + opts['flags'] ast_joined_path = os.path.join(opts['ctu'].dir, triple_arch, 'ast', os.path.realpath(opts['file'])[1:]", "def dispatch_ctu(opts, continuation=run_analyzer): \"\"\" Execute only one phase of 2", "Exception: logging.error(\"Problem occurred during analysis.\", exc_info=1) return None @require(['clang', 'directory',", "arch flags into a separate basket if arg == '-arch':", "return None else: logging.debug('analysis, language: %s', language) opts.update({'language': language, 'flags':", "remove all previous collection # data first. if ctu_config.collect: shutil.rmtree(ctu_config.dir,", "def setup_environment(args): \"\"\" Set up environment for build command to", "0, 2, 0, 3] \"\"\" return [elem for piece in", "we need to set up everything for the # wrappers,", "command. \"\"\" args = parse_args_for_scan_build() # will re-assign the report", "randomly. The compiler output also captured into '.stderr.txt' file. And", "= os.path.abspath(ast_joined_path) ast_dir = os.path.dirname(ast_path) if not os.path.isdir(ast_dir): try: os.makedirs(ast_dir)", "line in in_file: yield line def write_global_map(arch, mangled_ast_pairs): \"\"\" Write", "append an undefine macro at the end opts.update({'flags': opts['flags'] +", "not re.match(r'^-Wno-.+', arg): pass # and consider everything else as", "outputs. \"\"\" import re import os import os.path import json", "mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'} return mapping.get(opts['language'],", "['-arch', current] + opts['flags']}) return continuation(opts) else: logging.debug('skip analysis, found", "is done by the 'require' decorator. It's like an 'assert'", "CTU_TEMP_DEFMAP_FOLDER. These definition maps contain the mangled names and the", "# Classify error type: when Clang terminated by a signal", "+ \".ast\") extdef_ast_list.append(mangled_name + \" \" + ast_path) return extdef_ast_list", "'plist-multi-file'}: (handle, name) = tempfile.mkstemp(prefix='report-', suffix='.plist', dir=opts['output_dir']) os.close(handle) return name", "args.output_failures, 'direct_args': analyzer_params(args), 'force_debug': args.force_debug, 'ctu': get_ctu_config_from_args(args) } logging.debug('run analyzer", "'c++': '.ii'} return mapping.get(opts['language'], '.i') def destination(): \"\"\" Creates failures", "\"\"\" Check the intent of the build command. When static", "in CTU_TEMP_DEFMAP_FOLDER. These definition maps contain the mangled names and", "result.append('-analyzer-store={0}'.format(args.store_model)) if args.constraints_model: result.append('-analyzer-constraints={0}'.format( args.constraints_model)) if args.internal_stats: result.append('-analyzer-stats') if args.analyze_headers:", "'flags', 'direct_args', 'file', 'ctu']) def ctu_collect_phase(opts): \"\"\" Preprocess source by", "report when analyzer failed. The major report is the preprocessor", "well path = os.path.splitdrive(path)[1] # Make relative path out of", "static analyzer option will be overwritten '-o': 1, # will", "where we deliberately remove collection data before and # also", "the perl implementation. '-g': 0, '-save-temps': 0, '-install_name': 1, '-exported_symbols_list':", "None: # display error message from the static analyzer for", "+ os.path.join(ctu_config.dir, triarch), 'experimental-enable-naive-ctu-analysis=true'] analyzer_options = prefix_with('-analyzer-config', ctu_options) direct_options =", "\"\"\" Merge individual external definition maps into a global one.", "from libscanbuild import command_entry_point, compiler_wrapper, \\ wrapper_environment, run_build, run_command, CtuConfig", "for line in current['error_output']: logging.info(line.rstrip()) pool.close() pool.join() def govern_analyzer_runs(args): \"\"\"", "for a collect (1st) and analyze (2nd) phase, we do", "'-current_version': 1, '-compatibility_version': 1, '-init': 1, '-e': 1, '-seg1addr': 1,", "= [opts['ctu'].extdef_map_cmd] extdefmap_command.append(opts['file']) extdefmap_command.append('--') extdefmap_command.extend(args) logging.debug(\"Generating external definition map using", "intercept module. exit_code = capture(args) # Run the analyzer against", "with it. If failure reports are requested, it calls the", "os.path.join(\"ast\", path + \".ast\") extdef_ast_list.append(mangled_name + \" \" + ast_path)", "file. \"\"\" def extension(): \"\"\" Generate preprocessor file extension. \"\"\"", "continuation(opts) @require(['language', 'compiler', 'file', 'flags']) def language_check(opts, continuation=filter_debug_flags): \"\"\" Find", "[opts['clang'], '--analyze'] + opts['direct_args'] \\ + opts['flags'] + [opts['file']] triarch", "['scan_build', 'analyze_build', 'analyze_compiler_wrapper'] COMPILER_WRAPPER_CC = 'analyze-cc' COMPILER_WRAPPER_CXX = 'analyze-c++' CTU_EXTDEF_MAP_FILENAME", "when compiler wrappers are used. That's the moment when build", "# Run the analyzer against the captured commands. if need_analyzer(args.build):", "again, but run the syntax check only. cwd = opts['directory']", "as out_file: out_file.write(\"\\n\".join(extdef_ast_list) + \"\\n\") cwd = opts['directory'] cmd =", "json.load(handle) if not exclude( cmd['file'], cmd['directory'])) # when verbose output", "\"\"\" ctu_config = opts['ctu'] if ctu_config.collect or ctu_config.analyze: assert ctu_config.collect", "{'error_output': ex.output, 'exit_code': ex.returncode} if opts.get('output_failures', False): opts.update(result) continuation(opts) return", "parent_dir = os.path.abspath(hint) if not os.path.exists(parent_dir): os.makedirs(parent_dir) name = tempfile.mkdtemp(prefix=stamp,", "merged at the end into a global map file: CTU_EXTDEF_MAP_FILENAME.\"\"\"", "compile options args = iter(command[1:]) for arg in args: #", "1, '-e': 1, '-seg1addr': 1, '-bundle_loader': 1, '-multiply_defined': 1, '-sectorder':", "args.stats: result.append('-analyzer-checker=debug.Stats') if args.maxloop: result.extend(['-analyzer-max-loop', str(args.maxloop)]) if args.output_format: result.append('-analyzer-output={0}'.format(args.output_format)) if", "**kwargs) return wrapper return decorator @require(['command', # entry from compilation", "result.append('-analyzer-checker=debug.Stats') if args.maxloop: result.extend(['-analyzer-max-loop', str(args.maxloop)]) if args.output_format: result.append('-analyzer-output={0}'.format(args.output_format)) if args.analyzer_config:", "str) tuples. \"\"\" mangled_to_asts = defaultdict(set) for line in extdef_map_lines:", "\"\"\" try: command = opts.pop('command') command = command if isinstance(command,", "the source) which had their definition. These files should be", "for exclude_directory in args.excludes) consts = { 'clang': args.clang, 'output_dir':", "\"\"\" failures_dir = os.path.join(opts['output_dir'], 'failures') if not os.path.isdir(failures_dir): os.makedirs(failures_dir) return", "generated report files shall go 'output_format', # it's 'plist', 'html',", "the only pass we have before run the analyzer. current", "name, value number of options to skip IGNORED_FLAGS = {", "'.ast') ast_path = os.path.abspath(ast_joined_path) ast_dir = os.path.dirname(ast_path) if not os.path.isdir(ast_dir):", "result at %s/results-merged.sarif.\" keep = True else: if keep: msg", "-- Intercept: capture the compilation command during the build, --", "yield name finally: if os.listdir(name): if output_format != 'sarif': #", "wrapper functionality. \"\"\" # don't run analyzer when compilation fails.", "mapped to command line arguments of the analyzer. This method", "so multiple analyze runs can use the same data gathered", "filter_debug_flags(opts, continuation=dispatch_ctu): \"\"\" Filter out nondebug macros when requested. \"\"\"", "but we should suppress ones # that we don't want", "opts['output_format'] == 'sarif': (handle, name) = tempfile.mkstemp(prefix='result-', suffix='.sarif', dir=opts['output_dir']) os.close(handle)", "exclude( cmd['file'], cmd['directory'])) # when verbose output requested execute sequentially", "handle: generator = (dict(cmd, **consts) for cmd in json.load(handle) if", "collection run. if ctu_config.collect and ctu_config.analyze: # CTU strings are", "= CtuConfig(collect=False, analyze=True, dir='', extdef_map_cmd='') run_analyzer_parallel(args) shutil.rmtree(ctu_config.dir, ignore_errors=True) else: #", "analysis, found not supported arch') return None else: logging.debug('analysis, on", "**consts) for cmd in json.load(handle) if not exclude( cmd['file'], cmd['directory']))", "windows as well path = os.path.splitdrive(path)[1] # Make relative path", "are separately mapped into a temporary file in CTU_TEMP_DEFMAP_FOLDER. These", "are given and are not # the same, those should", "found not supported arch') return None else: logging.debug('analysis, on default", "'-c': 0, # compile option will be overwritten '-fsyntax-only': 0,", "for _ in range(count): next(args) # we don't care about", "args.output_format) as args.output: # Run against a build command. there", "opts['direct_args'] \\ + opts['flags'] + [opts['file']] triarch = get_triple_arch(cmd, cwd)", "from the static analyzer for line in current['error_output']: logging.info(line.rstrip()) pool.close()", "it was requested. return number_of_bugs if args.status_bugs else exit_code @command_entry_point", "Check the intent of the build command. When static analyzer", "0} if opts.get('output_failures', False): opts.update(result) continuation(opts) return result def extdef_map_list_src_to_ast(extdef_src_list):", "Prepare compiler flags (filters some and add others) and take", "are absolute paths. filename = os.path.normpath(os.path.join(directory, filename)) return any(re.match(r'^' +", "and dir. \"\"\" ctu_config = json.loads(ctu_conf_json) # Recover namedtuple from", "\"\"\" Do run analyzer through one of the given architectures.", "line.strip().split(' ', 1) mangled_to_asts[mangled_name].add(ast_file) mangled_ast_pairs = [] for mangled_name, ast_files", "mapping.get(opts['language'], '.i') def destination(): \"\"\" Creates failures directory if not", "db. govern_analyzer_runs(args) # Cover report generation and bug counting. number_of_bugs", "support sarif format. msg = \"Run 'scan-view %s' to examine", "if args.internal_stats: result.append('-analyzer-stats') if args.analyze_headers: result.append('-analyzer-opt-analyze-headers') if args.stats: result.append('-analyzer-checker=debug.Stats') if", "and the odd elements are the prefix. eg.: prefix_with(0, [1,2,3])", "mode or runs once in normal mode. \"\"\" ctu_config =", "consts = { 'clang': args.clang, 'output_dir': args.output, 'output_format': args.output_format, 'output_failures':", "second element is from the original sequence and the odd", "and creates a global map keeping only unique names. We", "msg = \"Run 'scan-view %s' to examine bug reports.\" else:", "elif re.match(r'^[^-].+', arg) and classify_source(arg): pass # ignore some flags", "cwd = opts['directory'] cmd = get_arguments([opts['clang'], '--analyze'] + opts['direct_args'] +", "individual external definition maps and creates a global map keeping", "environment.update(wrapper_environment(args)) environment.update({ 'CC': COMPILER_WRAPPER_CC, 'CXX': COMPILER_WRAPPER_CXX, 'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build)", "[0, 1, 0, 2, 0, 3] \"\"\" return [elem for", "pass ast_command = [opts['clang'], '-emit-ast'] ast_command.extend(args) ast_command.append('-w') ast_command.append(opts['file']) ast_command.append('-o') ast_command.append(ast_path)", "reports are requested, it calls the continuation to generate it.", "elif language not in accepted: logging.debug('skip analysis, language not supported')", "are multiple arch are given and are not # the", "dir='', extdef_map_cmd='') run_analyzer_parallel(args) merge_ctu_extdef_maps(ctu_config.dir) args.ctu_phases = CtuConfig(collect=False, analyze=True, dir='', extdef_map_cmd='')", "in_file: for line in in_file: yield line def write_global_map(arch, mangled_ast_pairs):", "of the analyzer. This method generates those. \"\"\" result =", "'.join(cmd) + os.linesep) handle.write(' '.join(os.uname()) + os.linesep) handle.write(get_version(opts['clang'])) handle.close() #", "number of options to skip IGNORED_FLAGS = { '-c': 0,", "below are inherited from the perl implementation. '-g': 0, '-save-temps':", "def extdef_map_list_src_to_ast(extdef_src_list): \"\"\" Turns textual external definition map list with", "a cover report from the analyzer outputs. \"\"\" import re", "\\ [opts['file'], '-o', name] try: cmd = get_arguments(cmd, cwd) run_command(cmd,", "triple_arches: if os.path.isdir(triple_path): triple_arch = os.path.basename(triple_path) extdefmap_dir = os.path.join(ctudir, triple_arch,", "1, '--serialize-diagnostics': 1 } def classify_parameters(command): \"\"\" Prepare compiler flags", "external definition maps into a global one. As the collect", "stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-' stamp = datetime.datetime.now().strftime(stamp_format) parent_dir = os.path.abspath(hint) if", "a collect (1st) and analyze (2nd) phase, we do an", "suffix='.plist', dir=opts['output_dir']) os.close(handle) return name elif opts['output_format'] == 'sarif': (handle,", "'file', 'output_dir', 'language', 'error_output', 'exit_code']) def report_failure(opts): \"\"\" Create report", "And some more execution context also saved into '.info.txt' file.", "== 'c') if language is None: logging.debug('skip analysis, language not", "single entry of the compilation database. This complex task is", "the passed state and stop when any of those is", "analyze-c++ return CtuConfig(collect=ctu_config[0], analyze=ctu_config[1], dir=ctu_config[2], extdef_map_cmd=ctu_config[3]) def create_global_ctu_extdef_map(extdef_map_lines): \"\"\" Takes", "for future processing. \"\"\" result = { 'flags': [], #", "used. That's the moment when build setup check the compiler", "go 'output_format', # it's 'plist', 'html', 'plist-html', 'plist-multi-file', or 'sarif'", "(python subprocess Popen.returncode is negative when child terminated # by", "0 def need_analyzer(args): \"\"\" Check the intent of the build", "try: os.makedirs(extern_defs_map_folder) except OSError: # In case an other process", "not in disabled] if filtered_list: # There should be only", "args = iter(command[1:]) for arg in args: # take arch", "[] for extdef_src_txt in extdef_src_list: mangled_name, path = extdef_src_txt.split(\" \",", "command API. To run the static analyzer against a build", "is created from the chosen phases and dir. \"\"\" return", "+ [opts['file']] triple_arch = get_triple_arch(cmd, cwd) generate_ast(triple_arch) map_extdefs(triple_arch) @require(['ctu']) def", "directory of the output directory. keep -- a boolean value", "files shutil.rmtree(extdefmap_dir, ignore_errors=True) def run_analyzer_parallel(args): \"\"\" Runs the analyzer against", "def filter_debug_flags(opts, continuation=dispatch_ctu): \"\"\" Filter out nondebug macros when requested.", "name extension. The decision also influenced by the compiler invocation.", "if args.status_bugs else exit_code @command_entry_point def analyze_build(): \"\"\" Entry point", "classify_parameters(command): \"\"\" Prepare compiler flags (filters some and add others)", "@require(['command', # entry from compilation database 'directory', # entry from", "current = filtered_list.pop() logging.debug('analysis, on arch: %s', current) opts.update({'flags': ['-arch',", ">= 4: result.append('-analyzer-display-progress') if args.plugins: result.extend(prefix_with('-load', args.plugins)) if args.enable_checker: checkers", "\"\"\" Entry point for scan-build command. \"\"\" args = parse_args_for_scan_build()", "assert ctu_config.collect != ctu_config.analyze if ctu_config.collect: return ctu_collect_phase(opts) if ctu_config.analyze:", "static analyzer if current is not None: for line in", "Preprocess source by generating all data needed by CTU analysis.", "+ opts['direct_args'] + opts['flags'] \\ + [opts['file']] triple_arch = get_triple_arch(cmd,", "and dir. \"\"\" return ( CtuConfig(collect=args.ctu_phases.collect, analyze=args.ctu_phases.analyze, dir=args.ctu_dir, extdef_map_cmd=args.extdef_map_cmd) if", "ctu_config.analyze: # CTU strings are coming from args.ctu_dir and extdef_map_cmd,", "created from the chosen phases and dir. \"\"\" ctu_config =", "'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args)) }) return environment @command_entry_point def analyze_compiler_wrapper(): \"\"\" Entry", "[] for mangled_name, ast_files in mangled_to_asts.items(): if len(ast_files) == 1:", "Capture the output of the analysis and returns with it.", "ctu control options def run(opts): \"\"\" Entry point to run", "definition map list with source files into an external definition", "Do run analyzer through one of the given architectures. \"\"\"", "arg in IGNORED_FLAGS: count = IGNORED_FLAGS[arg] for _ in range(count):", "args: # take arch flags into a separate basket if", "To run `scan-build` against the configure step might be necessary,", "of command line arguments can mapped to command line arguments", "required values in state. It checks the required attributes in", "on multiple threads, all compilation units are separately mapped into", "which are calling each other in chain. If the analysis", "nondebug macros when requested. \"\"\" if opts.pop('force_debug'): # lazy implementation", "some and add others) and take out language (-x) and", "# where generated report files shall go 'output_format', # it's", "handle: handle.write(opts['file'] + os.linesep) handle.write(error.title().replace('_', ' ') + os.linesep) handle.write('", "def language_check(opts, continuation=filter_debug_flags): \"\"\" Find out the language from command", "analyze=False, dir='', extdef_map_cmd='')) def get_ctu_config_from_json(ctu_conf_json): \"\"\" CTU configuration is created", "not re.search(r'configure|autogen', args[0]) def prefix_with(constant, pieces): \"\"\" From a sequence", "report directory. hint -- could specify the parent directory of", "input files in a determined order. \"\"\" files = glob.glob(os.path.join(extdefmap_dir,", "build setup check the compiler and capture the location for", "result.append('-analyzer-output={0}'.format(args.output_format)) if args.analyzer_config: result.extend(['-analyzer-config', args.analyzer_config]) if args.verbose >= 4: result.append('-analyzer-display-progress')", "language] + opts['flags']}) return continuation(opts) @require(['arch_list', 'flags']) def arch_check(opts, continuation=language_check):", "or analyze) are launched from here. run_analyzer_parallel(args) if ctu_config.collect: merge_ctu_extdef_maps(ctu_config.dir)", "= line.strip().split(' ', 1) mangled_to_asts[mangled_name].add(ast_file) mangled_ast_pairs = [] for mangled_name,", "to command line arguments of the analyzer. This method generates", "complex task is decomposed into smaller methods which are calling", "os.path.normpath(os.path.join(directory, filename)) return any(re.match(r'^' + exclude_directory, filename) for exclude_directory in", "except ClangErrorException as ex: result = {'error_output': ex.error, 'exit_code': 0}", "captured commands, -- Report: create a cover report from the", "result.extend(['-analyzer-config', args.analyzer_config]) if args.verbose >= 4: result.append('-analyzer-display-progress') if args.plugins: result.extend(prefix_with('-load',", "} # call static analyzer against the compilation for source", "the current compilation command. \"\"\" args = opts['direct_args'] + opts['flags']", "else '', 'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args)) }) return environment @command_entry_point def analyze_compiler_wrapper():", "merge_ctu_extdef_maps(ctu_config.dir) def setup_environment(args): \"\"\" Set up environment for build command", "path = path[1:] if path[0] == os.sep else path ast_path", "\"\"\" if not os.path.isabs(filename): # filename is either absolute or", "prefix='clang_' + error + '_', dir=destination()) os.close(handle) # Execute Clang", "\\ + opts['flags'] + [opts['file']] triarch = get_triple_arch(cmd, cwd) ctu_options", "+ exclude_directory, filename) for exclude_directory in args.excludes) consts = {", "+ '.ast') ast_path = os.path.abspath(ast_joined_path) ast_dir = os.path.dirname(ast_path) if not", "a python dictionary. Each method first check that the needed", "Popen.returncode is negative when child terminated # by signal.) Everything", "CC/CXX values # for the Makefile. if args.intercept_first: # Run", "@command_entry_point def analyze_build(): \"\"\" Entry point for analyze-build command. \"\"\"", "opts.pop('command') command = command if isinstance(command, list) else decode(command) logging.debug(\"Run", "# list of architecture flags 'language': None, # compilation language,", "wrappers. environment = setup_environment(args) exit_code = run_build(args.build, env=environment) # Cover", "= \"View result at %s/results-merged.sarif.\" keep = True else: if", "the static analyzer run. # # Keys are the option", "= get_triple_arch(cmd, cwd) generate_ast(triple_arch) map_extdefs(triple_arch) @require(['ctu']) def dispatch_ctu(opts, continuation=run_analyzer): \"\"\"", "'force_debug': args.force_debug, 'ctu': get_ctu_config_from_args(args) } logging.debug('run analyzer against compilation database')", "generator = (dict(cmd, **consts) for cmd in json.load(handle) if not", "runs can use the same data gathered by a single", "accepted = frozenset({ 'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output', 'c++-cpp-output', 'objective-c-cpp-output'", "could specify the parent directory of the output directory. keep", "ctu_config.collect != ctu_config.analyze if ctu_config.collect: return ctu_collect_phase(opts) if ctu_config.analyze: cwd", "= setup_environment(args) exit_code = run_build(args.build, env=environment) # Cover report generation", "and executes it. Capture the output of the analysis and", "# that we don't want to see. elif re.match(r'^-W.+', arg)", "','.join(args.enable_checker) result.extend(['-analyzer-checker', checkers]) if args.disable_checker: checkers = ','.join(args.disable_checker) result.extend(['-analyzer-disable-checker', checkers])", "the collect phase runs parallel on multiple threads, all compilation", "else: if keep: msg = \"Report directory '%s' contains no", "was requested. return number_of_bugs if args.status_bugs else exit_code @command_entry_point def", "= [] for extdef_src_txt in extdef_src_list: mangled_name, path = extdef_src_txt.split(\"", "run_command(cmd, cwd=cwd) except subprocess.CalledProcessError: pass except ClangErrorException: pass # write", "from the analyzer outputs. \"\"\" import re import os import", "moment when build setup check the compiler and capture the", "when it's not requested. if result or not os.getenv('ANALYZE_BUILD_CLANG'): return", "add others) and take out language (-x) and architecture (-arch)", "compiler is not None: language = classify_source(opts['file'], compiler == 'c')", "directory. Need to turn # it to absolute since 'args.excludes'", "'file', # entry from compilation database 'clang', # clang executable", "case an other process already created it. pass ast_command =", "Runs the analyzer against the given compilation database. \"\"\" def", "result) def require(required): \"\"\" Decorator for checking the required values", "compiler options shall be # omitted. The compiler flag filtering", "govern_analyzer_runs(args) # Cover report generation and bug counting. number_of_bugs =", "calling each other in chain. If the analysis is not", "run_build, run_command, CtuConfig from libscanbuild.arguments import parse_args_for_scan_build, \\ parse_args_for_analyze_build from", "OSError: # In case an other process already created it.", "analyzer run. # # Keys are the option name, value", "the syntax check only. cwd = opts['directory'] cmd = [opts['clang'],", "'.join(analyzer_params(args)), 'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else '', 'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args)) })", "ast_file in mangled_ast_pairs: out_file.write('%s %s\\n' % (mangled_name, ast_file)) triple_arches =", "map_extdefs(triple_arch): \"\"\" Generate external definition map file for the current", "'sarif': # 'scan-view' currently does not support sarif format. msg", "definition map using '%s'\", extdefmap_command) extdef_src_list = run_command(extdefmap_command, cwd=opts['directory']) extdef_ast_list", "static analyzer run against project configure step, it should be", "user asks only for a single phase data is #", "opts.pop('language') compiler = opts.pop('compiler') # ... or find out from", "accepted: logging.debug('skip analysis, language not supported') return None else: logging.debug('analysis,", "# the filtered compiler flags 'arch_list': [], # list of", "opts['directory'] cmd = get_arguments([opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] +", "else None) for current in pool.imap_unordered(run, generator): if current is", "analyzer for line in current['error_output']: logging.info(line.rstrip()) pool.close() pool.join() def govern_analyzer_runs(args):", "extdef_src_list = run_command(extdefmap_command, cwd=opts['directory']) extdef_ast_list = extdef_map_list_src_to_ast(extdef_src_list) extern_defs_map_folder = os.path.join(opts['ctu'].dir,", "') + os.linesep) handle.write(' '.join(cmd) + os.linesep) handle.write(' '.join(os.uname()) +", "args[0]) def prefix_with(constant, pieces): \"\"\" From a sequence create another", "static analyzer certain compiler options shall be # omitted. The", "Part of the LLVM Project, under the Apache License v2.0", "prefix_with('-Xanalyzer', analyzer_options) opts['direct_args'].extend(direct_options) return continuation(opts) @require(['flags', 'force_debug']) def filter_debug_flags(opts, continuation=dispatch_ctu):", "Find out the language from command line parameters or file", "datetime.datetime.now().strftime(stamp_format) parent_dir = os.path.abspath(hint) if not os.path.exists(parent_dir): os.makedirs(parent_dir) name =", "args.verbose >= 4: result.append('-analyzer-display-progress') if args.plugins: result.extend(prefix_with('-load', args.plugins)) if args.enable_checker:", "failures_dir # Classify error type: when Clang terminated by a", "parameters or file name extension. The decision also influenced by", "directory): \"\"\" Return true when any excluded directory prefix the", "list) else decode(command) logging.debug(\"Run analyzer against '%s'\", command) opts.update(classify_parameters(command)) return", "os.path.splitdrive(path)[1] # Make relative path out of absolute path =", "ClangErrorException from libscanbuild.shell import decode __all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper']", "def classify_parameters(command): \"\"\" Prepare compiler flags (filters some and add", "files.sort() for filename in files: with open(filename, 'r') as in_file:", "The compiler output also captured into '.stderr.txt' file. And some", "# Cover report generation and bug counting. number_of_bugs = document(args)", "flags 'language': None, # compilation language, None, if not specified", "into '.info.txt' file. \"\"\" def extension(): \"\"\" Generate preprocessor file", "\"\"\" stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-' stamp = datetime.datetime.now().strftime(stamp_format) parent_dir = os.path.abspath(hint)", "capture the location for the build process. \"\"\" return len(args)", "return None @require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language', 'error_output', 'exit_code'])", "from libscanbuild.report import document from libscanbuild.compilation import split_command, classify_source, \\", "get_version, get_arguments, get_triple_arch, \\ ClangErrorException from libscanbuild.shell import decode __all__", "'-o', name] try: cmd = get_arguments(cmd, cwd) run_command(cmd, cwd=cwd) except", "dir=opts['output_dir']) os.close(handle) return name return opts['output_dir'] try: cwd = opts['directory']", "= { 'clang': os.getenv('ANALYZE_BUILD_CLANG'), 'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'), 'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'), 'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'),", "'exit_code']) def report_failure(opts): \"\"\" Create report when analyzer failed. The", "arguments from command line 'force_debug', # kill non debug macros", "disabled] if filtered_list: # There should be only one arch", "for arg in args: # take arch flags into a", "empty report directory. \"\"\" stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-' stamp = datetime.datetime.now().strftime(stamp_format)", "sequence and the odd elements are the prefix. eg.: prefix_with(0,", "single phase data is # left so multiple analyze runs", "information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception \"\"\" This module implements", "which looks source file are not flags elif re.match(r'^[^-].+', arg)", "# Run against a build command. there are cases, when", "there are cases, when analyzer run # is not required.", "'-bundle_loader': 1, '-multiply_defined': 1, '-sectorder': 3, '--param': 1, '--serialize-diagnostics': 1", "(-arch) flags for future processing. \"\"\" result = { 'flags':", "return len(args) and not re.search(r'configure|autogen', args[0]) def prefix_with(constant, pieces): \"\"\"", "\"\"\" Write (mangled name, ast file) pairs into final file.", "args.constraints_model)) if args.internal_stats: result.append('-analyzer-stats') if args.analyze_headers: result.append('-analyzer-opt-analyze-headers') if args.stats: result.append('-analyzer-checker=debug.Stats')", "against the captured commands, -- Report: create a cover report", "'').split(' '), 'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'), 'directory': execution.cwd, 'command': [execution.cmd[0], '-c'] +", "except subprocess.CalledProcessError: pass except ClangErrorException: pass # write general information", "smaller methods which are calling each other in chain. If", "result = {'error_output': ex.output, 'exit_code': ex.returncode} if opts.get('output_failures', False): opts.update(result)", "value to keep or delete the empty report directory. \"\"\"", "+ '.stderr.txt', 'w') as handle: handle.writelines(opts['error_output']) handle.close() @require(['clang', 'directory', 'flags',", "else '', 'ANALYZE_BUILD_REPORT_DIR': args.output, 'ANALYZE_BUILD_REPORT_FORMAT': args.output_format, 'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures", "compiler invocation. \"\"\" accepted = frozenset({ 'c', 'c++', 'objective-c', 'objective-c++',", "\"\"\" From a sequence create another sequence where every second", "out language (-x) and architecture (-arch) flags for future processing.", "analyzer or generate report. To run `scan-build` against the configure", "Decorator for checking the required values in state. It checks", "python dictionary. Each method first check that the needed parameters", "if compilation is None: return # collect the needed parameters", "generate_ast(triple_arch): \"\"\" Generates ASTs for the current compilation command. \"\"\"", "once in normal mode. \"\"\" ctu_config = get_ctu_config_from_args(args) # If", "run_command(extdefmap_command, cwd=opts['directory']) extdef_ast_list = extdef_map_list_src_to_ast(extdef_src_list) extern_defs_map_folder = os.path.join(opts['ctu'].dir, triple_arch, CTU_TEMP_DEFMAP_FOLDER)", "configure step, it should be silent and no need to", "for analyze-build command. \"\"\" args = parse_args_for_analyze_build() # will re-assign", "msg = \"Removing directory '%s' because it contains no report.\"", "if not os.path.exists(parent_dir): os.makedirs(parent_dir) name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir) logging.info('Report directory", "in_file: yield line def write_global_map(arch, mangled_ast_pairs): \"\"\" Write (mangled name,", "gathered by a single # collection run. if ctu_config.collect and", "files. \"\"\" extdef_ast_list = [] for extdef_src_txt in extdef_src_list: mangled_name,", "continuation(opts) return result def extdef_map_list_src_to_ast(extdef_src_list): \"\"\" Turns textual external definition", "asks only for a single phase data is # left", "'analyze_build', 'analyze_compiler_wrapper'] COMPILER_WRAPPER_CC = 'analyze-cc' COMPILER_WRAPPER_CXX = 'analyze-c++' CTU_EXTDEF_MAP_FILENAME =", "\"\"\" # don't run analyzer when compilation fails. or when", "an other process already created it. pass if extdef_ast_list: with", "future processing. \"\"\" result = { 'flags': [], # the", "'-sectorder': 3, '--param': 1, '--serialize-diagnostics': 1 } def classify_parameters(command): \"\"\"", "govern_analyzer_runs(args): \"\"\" Governs multiple runs in CTU mode or runs", "-*- # Part of the LLVM Project, under the Apache", "other process already created it. pass ast_command = [opts['clang'], '-emit-ast']", "threads, all compilation units are separately mapped into a temporary", "definition maps contain the mangled names and the source (AST", "syntax check only. cwd = opts['directory'] cmd = [opts['clang'], '-fsyntax-only',", "None else: logging.debug('analysis, on default arch') return continuation(opts) # To", "not supported') return None else: logging.debug('analysis, language: %s', language) opts.update({'language':", "point for analyze-build command. \"\"\" args = parse_args_for_analyze_build() # will", "'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '', 'ANALYZE_BUILD_REPORT_DIR': args.output, 'ANALYZE_BUILD_REPORT_FORMAT': args.output_format,", "if not os.path.isdir(ast_dir): try: os.makedirs(ast_dir) except OSError: # In case", "it empty args.ctu_phases = CtuConfig(collect=True, analyze=False, dir='', extdef_map_cmd='') run_analyzer_parallel(args) merge_ctu_extdef_maps(ctu_config.dir)", "with open(filename, 'r') as in_file: for line in in_file: yield", "# In case an other process already created it. pass", "args.force_debug else '', 'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args)) }) return environment @command_entry_point def", "+ \"\\n\") cwd = opts['directory'] cmd = [opts['clang'], '--analyze'] +", "current] + opts['flags']}) return continuation(opts) else: logging.debug('skip analysis, found not", "defaultdict from libscanbuild import command_entry_point, compiler_wrapper, \\ wrapper_environment, run_build, run_command,", "directory as new output with report_directory( args.output, args.keep_empty, args.output_format) as", "os.close(handle) return name return opts['output_dir'] try: cwd = opts['directory'] cmd", "run_analyzer_parallel(args): \"\"\" Runs the analyzer against the given compilation database.", "1 } def classify_parameters(command): \"\"\" Prepare compiler flags (filters some", "for the current compilation command. \"\"\" args = opts['direct_args'] +", "of the compilation database. This complex task is decomposed into", "parameters = { 'clang': os.getenv('ANALYZE_BUILD_CLANG'), 'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'), 'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'), 'output_failures':", "(handle, name) = tempfile.mkstemp(suffix=extension(), prefix='clang_' + error + '_', dir=destination())", "if not os.path.isabs(filename): # filename is either absolute or relative", "remove collection data before and # also after the run.", "process. \"\"\" return len(args) and not re.search(r'configure|autogen', args[0]) def prefix_with(constant,", "to turn # it to absolute since 'args.excludes' are absolute", "architectures and -arch switches filtered_list = [a for a in", "more execution context also saved into '.info.txt' file. \"\"\" def", "compilation for source in compilation.files: parameters.update({'file': source}) logging.debug('analyzer parameters %s',", "logging.debug('run analyzer against compilation database') with open(args.cdb, 'r') as handle:", "a signal it's a 'Crash'. # (python subprocess Popen.returncode is", "return continuation(opts) @require(['arch_list', 'flags']) def arch_check(opts, continuation=language_check): \"\"\" Do run", "the originating source (the corresponding AST file) name. :type extdef_map_lines:", "should not change the pre-processing step. # But that's the", "mangled_ast_pairs.append((mangled_name, next(iter(ast_files)))) return mangled_ast_pairs def merge_ctu_extdef_maps(ctudir): \"\"\" Merge individual external", "in required: if key not in args[0]: raise KeyError('{0} not", "is negative when child terminated # by signal.) Everything else", "from source file extension if language is None and compiler", "os.listdir(name): if output_format != 'sarif': # 'scan-view' currently does not", "os.path.join(ctu_config.dir, triarch), 'experimental-enable-naive-ctu-analysis=true'] analyzer_options = prefix_with('-analyzer-config', ctu_options) direct_options = prefix_with('-Xanalyzer',", "return mapping.get(opts['language'], '.i') def destination(): \"\"\" Creates failures directory if", "+ opts['flags'] \\ + [opts['file']] triple_arch = get_triple_arch(cmd, cwd) generate_ast(triple_arch)", "in chain. If the analysis is not possible the given", "collect (1st phase) we remove all previous collection # data", "else: logging.debug('analysis, language: %s', language) opts.update({'language': language, 'flags': ['-x', language]", "= [opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] \\ + [opts['file']]", "# take arch flags into a separate basket if arg", "def generate_extdef_map_lines(extdefmap_dir): \"\"\" Iterate over all lines of input files", "step. # But that's the only pass we have before", "architecture flags 'language': None, # compilation language, None, if not", "launched from here. run_analyzer_parallel(args) if ctu_config.collect: merge_ctu_extdef_maps(ctu_config.dir) def setup_environment(args): \"\"\"", "parameters which looks source file are not flags elif re.match(r'^[^-].+',", "= os.path.join(ctudir, arch, CTU_EXTDEF_MAP_FILENAME) with open(extern_defs_map_file, 'w') as out_file: for", "mangled names and the source (AST generated from the source)", "'assert' to check the contract between the caller and the", "the end opts.update({'flags': opts['flags'] + ['-UNDEBUG']}) return continuation(opts) @require(['language', 'compiler',", "over all lines of input files in a determined order.", "\\ wrapper_environment, run_build, run_command, CtuConfig from libscanbuild.arguments import parse_args_for_scan_build, \\", "generated randomly. The compiler output also captured into '.stderr.txt' file.", "Analyze: run the analyzer against the captured commands, -- Report:", "the caller and the called method.) \"\"\" try: command =", "preprocessor output file name. (This is blindly following the #", "of the output directory. keep -- a boolean value to", "args.analyze_headers: result.append('-analyzer-opt-analyze-headers') if args.stats: result.append('-analyzer-checker=debug.Stats') if args.maxloop: result.extend(['-analyzer-max-loop', str(args.maxloop)]) if", "using '%s'\", extdefmap_command) extdef_src_list = run_command(extdefmap_command, cwd=opts['directory']) extdef_ast_list = extdef_map_list_src_to_ast(extdef_src_list)", "preprocessor output. The output filename generated randomly. The compiler output", "kept.\" else: msg = \"Removing directory '%s' because it contains", "is None: return # collect the needed parameters from environment,", "received_list: # filter out disabled architectures and -arch switches filtered_list", "specify the parent directory of the output directory. keep --", "re-assign the report directory as new output with report_directory( args.output,", "args = opts['direct_args'] + opts['flags'] ast_joined_path = os.path.join(opts['ctu'].dir, triple_arch, 'ast',", "arg) and classify_source(arg): pass # ignore some flags elif arg", "deliberately remove collection data before and # also after the", "against '%s'\", command) opts.update(classify_parameters(command)) return arch_check(opts) except Exception: logging.error(\"Problem occurred", "= {'error_output': ex.output, 'exit_code': ex.returncode} if opts.get('output_failures', False): opts.update(result) continuation(opts)", "extdef_map_cmd=ctu_config[3]) def create_global_ctu_extdef_map(extdef_map_lines): \"\"\" Takes iterator of individual external definition", "(This is done by the 'require' decorator. It's like an", "= run(parameters) # display error message from the static analyzer", "all-in-one run where we deliberately remove collection data before and", "+ opts['direct_args'] \\ + opts['flags'] + [opts['file']] triarch = get_triple_arch(cmd,", "dir. \"\"\" return ( CtuConfig(collect=args.ctu_phases.collect, analyze=args.ctu_phases.analyze, dir=args.ctu_dir, extdef_map_cmd=args.extdef_map_cmd) if hasattr(args,", "for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception \"\"\" This", "'direct_args', 'file', 'output_dir', 'output_format']) def run_analyzer(opts, continuation=report_failure): \"\"\" It assembles", "external definition map using '%s'\", extdefmap_command) extdef_src_list = run_command(extdefmap_command, cwd=opts['directory'])", "'dir') else CtuConfig(collect=False, analyze=False, dir='', extdef_map_cmd='')) def get_ctu_config_from_json(ctu_conf_json): \"\"\" CTU", "# entry from compilation database 'clang', # clang executable name", "compilation.flags, 'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU')) } # call static analyzer against the", "by the compiler invocation. \"\"\" accepted = frozenset({ 'c', 'c++',", "to generate it. \"\"\" def target(): \"\"\" Creates output file", "for the report directory. hint -- could specify the parent", "don't run analyzer when compilation fails. or when it's not", "environment, crash when missing parameters = { 'clang': os.getenv('ANALYZE_BUILD_CLANG'), 'output_dir':", "'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)), 'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else '', 'ANALYZE_BUILD_CTU':", "sequence create another sequence where every second element is from", "check that the needed parameters received. (This is done by", "get_triple_arch(cmd, cwd) generate_ast(triple_arch) map_extdefs(triple_arch) @require(['ctu']) def dispatch_ctu(opts, continuation=run_analyzer): \"\"\" Execute", "'output_dir', 'language', 'error_output', 'exit_code']) def report_failure(opts): \"\"\" Create report when", "output. The output filename generated randomly. The compiler output also", "Error'. error = 'crash' if opts['exit_code'] < 0 else 'other_error'", "'objective-c-cpp-output' }) # language can be given as a parameter...", "return number_of_bugs if args.status_bugs else 0 def need_analyzer(args): \"\"\" Check", "CTU mode or runs once in normal mode. \"\"\" ctu_config", "compilation is None: return # collect the needed parameters from", "just append an undefine macro at the end opts.update({'flags': opts['flags']", "one phase of 2 phases of CTU if needed. \"\"\"", "temporary files shutil.rmtree(extdefmap_dir, ignore_errors=True) def run_analyzer_parallel(args): \"\"\" Runs the analyzer", "the # wrappers, because 'configure' needs to capture the CC/CXX", "opts['direct_args'] + opts['flags'] \\ + [opts['file']] triple_arch = get_triple_arch(cmd, cwd)", "'ANALYZE_BUILD_REPORT_FORMAT': args.output_format, 'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '', 'ANALYZE_BUILD_PARAMETERS': '", "if not exclude( cmd['file'], cmd['directory'])) # when verbose output requested", "merge_ctu_extdef_maps(ctudir): \"\"\" Merge individual external definition maps into a global", "if args.verbose > 2 else None) for current in pool.imap_unordered(run,", "','.join(args.disable_checker) result.extend(['-analyzer-disable-checker', checkers]) return prefix_with('-Xclang', result) def require(required): \"\"\" Decorator", "}) # language can be given as a parameter... language", "# parameters which looks source file are not flags elif", "and the called method.) \"\"\" try: command = opts.pop('command') command", "don't care about extra warnings, but we should suppress ones", "another sequence where every second element is from the original", "in args.excludes) consts = { 'clang': args.clang, 'output_dir': args.output, 'output_format':", "'output_format', # it's 'plist', 'html', 'plist-html', 'plist-multi-file', or 'sarif' 'output_failures',", "it's not requested. if result or not os.getenv('ANALYZE_BUILD_CLANG'): return #", "result.append('-analyzer-opt-analyze-headers') if args.stats: result.append('-analyzer-checker=debug.Stats') if args.maxloop: result.extend(['-analyzer-max-loop', str(args.maxloop)]) if args.output_format:", "+ [opts['file'], '-o', target()], cwd) output = run_command(cmd, cwd=cwd) return", "output_format): \"\"\" Responsible for the report directory. hint -- could", "pass if extdef_ast_list: with tempfile.NamedTemporaryFile(mode='w', dir=extern_defs_map_folder, delete=False) as out_file: out_file.write(\"\\n\".join(extdef_ast_list)", "'--analyze'] + opts['direct_args'] + opts['flags'] \\ + [opts['file']] triple_arch =", "as ex: result = {'error_output': ex.output, 'exit_code': ex.returncode} if opts.get('output_failures',", "\" \" + ast_path) return extdef_ast_list @require(['clang', 'directory', 'flags', 'direct_args',", "as args.output: # Run the analyzer against a compilation db.", "by signal.) Everything else is 'Other Error'. error = 'crash'", "args.output, 'ANALYZE_BUILD_REPORT_FORMAT': args.output_format, 'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '', 'ANALYZE_BUILD_PARAMETERS':", "+ opts['direct_args'] + opts['flags'] + [opts['file'], '-o', target()], cwd) output", "or generate report. To run `scan-build` against the configure step", "(AST generated from the source) which had their definition. These", "the output of the analysis and returns with it. If", "it. If failure reports are requested, it calls the continuation", "language not in accepted: logging.debug('skip analysis, language not supported') return", "direct_options = prefix_with('-Xanalyzer', analyzer_options) opts['direct_args'].extend(direct_options) return continuation(opts) @require(['flags', 'force_debug']) def", "cmd['directory'])) # when verbose output requested execute sequentially pool =", "= multiprocessing.Pool(1 if args.verbose > 2 else None) for current", "collect phase runs parallel on multiple threads, all compilation units", "def need_analyzer(args): \"\"\" Check the intent of the build command.", "ignore_errors=True) else: # Single runs (collect or analyze) are launched", "return compiler_wrapper(analyze_compiler_wrapper_impl) def analyze_compiler_wrapper_impl(result, execution): \"\"\" Implements analyzer compiler wrapper", "the static analyzer against a build is done in multiple", "with ast files. \"\"\" extdef_ast_list = [] for extdef_src_txt in", "not in args[0]: raise KeyError('{0} not passed to {1}'.format( key,", "when analyzer run # is not required. But we need", "subprocess.CalledProcessError as ex: result = {'error_output': ex.output, 'exit_code': ex.returncode} if", "the given method just return and break the chain. The", "except OSError: # In case an other process already created", "disabled = frozenset({'ppc', 'ppc64'}) received_list = opts.pop('arch_list') if received_list: #", "the source (AST generated from the source) which had their", "generate report. To run `scan-build` against the configure step might", "1: mangled_ast_pairs.append((mangled_name, next(iter(ast_files)))) return mangled_ast_pairs def merge_ctu_extdef_maps(ctudir): \"\"\" Merge individual", "it was requested. return number_of_bugs if args.status_bugs else 0 def", "take arch flags into a separate basket if arg ==", "analyzer against compilation database') with open(args.cdb, 'r') as handle: generator", "do an # all-in-one run where we deliberately remove collection", "cwd) ctu_options = ['ctu-dir=' + os.path.join(ctu_config.dir, triarch), 'experimental-enable-naive-ctu-analysis=true'] analyzer_options =", "needs to capture the CC/CXX values # for the Makefile.", "result = [] if args.store_model: result.append('-analyzer-store={0}'.format(args.store_model)) if args.constraints_model: result.append('-analyzer-constraints={0}'.format( args.constraints_model))", "= defaultdict(set) for line in extdef_map_lines: mangled_name, ast_file = line.strip().split('", "compiler == 'c') if language is None: logging.debug('skip analysis, language", "output requested execute sequentially pool = multiprocessing.Pool(1 if args.verbose >", "extdefmap_command = [opts['ctu'].extdef_map_cmd] extdefmap_command.append(opts['file']) extdefmap_command.append('--') extdefmap_command.extend(args) logging.debug(\"Generating external definition map", "out_file.write(\"\\n\".join(extdef_ast_list) + \"\\n\") cwd = opts['directory'] cmd = [opts['clang'], '--analyze']", "This module implements the 'scan-build' command API. To run the", "in args: # take arch flags into a separate basket", "sarif format. msg = \"Run 'scan-view %s' to examine bug", "capture the CC/CXX values # for the Makefile. if args.intercept_first:", "a global map file: CTU_EXTDEF_MAP_FILENAME.\"\"\" def generate_extdef_map_lines(extdefmap_dir): \"\"\" Iterate over", "analysis. \"\"\" def generate_ast(triple_arch): \"\"\" Generates ASTs for the current", "analyzer against the compilation for source in compilation.files: parameters.update({'file': source})", "we remove all previous collection # data first. if ctu_config.collect:", "To run the static analyzer against a build is done", "as new output with report_directory( args.output, args.keep_empty, args.output_format) as args.output:", "{1}'.format( key, function.__name__)) return function(*args, **kwargs) return wrapper return decorator", "# Execute Clang again, but run the syntax check only.", "command line and executes it. Capture the output of the", "number_of_bugs if args.status_bugs else 0 def need_analyzer(args): \"\"\" Check the", "the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt", "+ '.info.txt', 'w') as handle: handle.write(opts['file'] + os.linesep) handle.write(error.title().replace('_', '", "= os.path.join(opts['ctu'].dir, triple_arch, CTU_TEMP_DEFMAP_FOLDER) if not os.path.isdir(extern_defs_map_folder): try: os.makedirs(extern_defs_map_folder) except", "also after the run. If the user asks only for", "of individual external definition maps and creates a global map", "is not possible the given method just return and break", "the captured commands. if need_analyzer(args.build): govern_analyzer_runs(args) else: # Run build", "except ClangErrorException: pass # write general information about the crash", "[opts['file'], '-o', target()], cwd) output = run_command(cmd, cwd=cwd) return {'error_output':", "(mangled name, ast file) pairs into final file. \"\"\" extern_defs_map_file", "child terminated # by signal.) Everything else is 'Other Error'.", "option name, value number of options to skip IGNORED_FLAGS =", "unique names. We leave conflicting names out of CTU. :param", "for source in compilation.files: parameters.update({'file': source}) logging.debug('analyzer parameters %s', parameters)", "`analyze-c++` compiler wrappers. \"\"\" return compiler_wrapper(analyze_compiler_wrapper_impl) def analyze_compiler_wrapper_impl(result, execution): \"\"\"", "triple_arch, CTU_TEMP_DEFMAP_FOLDER) if not os.path.isdir(extern_defs_map_folder): try: os.makedirs(extern_defs_map_folder) except OSError: #", "args.store_model: result.append('-analyzer-store={0}'.format(args.store_model)) if args.constraints_model: result.append('-analyzer-constraints={0}'.format( args.constraints_model)) if args.internal_stats: result.append('-analyzer-stats') if", "classify_source, \\ compiler_language from libscanbuild.clang import get_version, get_arguments, get_triple_arch, \\", "against a single entry of the compilation database. This complex", "= generate_extdef_map_lines(extdefmap_dir) mangled_ast_pairs = create_global_ctu_extdef_map(extdef_map_lines) write_global_map(triple_arch, mangled_ast_pairs) # Remove all", "in normal mode. \"\"\" ctu_config = get_ctu_config_from_args(args) # If we", "+ '_', dir=destination()) os.close(handle) # Execute Clang again, but run", "analyze-cc or analyze-c++ return CtuConfig(collect=ctu_config[0], analyze=ctu_config[1], dir=ctu_config[2], extdef_map_cmd=ctu_config[3]) def create_global_ctu_extdef_map(extdef_map_lines):", "from compilation database 'directory', # entry from compilation database 'file',", "' '.join(analyzer_params(args)), 'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else '', 'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args))", "that the needed parameters received. (This is done by the", "disabled architectures and -arch switches filtered_list = [a for a", "crash reports or not 'ctu']) # ctu control options def", "+ opts['flags'] + [opts['file'], '-o', target()], cwd) output = run_command(cmd,", "value number of options to skip IGNORED_FLAGS = { '-c':", "\"\"\" ctu_config = json.loads(ctu_conf_json) # Recover namedtuple from json when", "filtered compiler flags 'arch_list': [], # list of architecture flags", "'externalDefMap.txt' CTU_TEMP_DEFMAP_FOLDER = 'tmpExternalDefMaps' @command_entry_point def scan_build(): \"\"\" Entry point", "of input files in a determined order. \"\"\" files =", "# by signal.) Everything else is 'Other Error'. error =", "files in a determined order. \"\"\" files = glob.glob(os.path.join(extdefmap_dir, '*'))", "or not 'ctu']) # ctu control options def run(opts): \"\"\"", "requested. return number_of_bugs if args.status_bugs else exit_code @command_entry_point def analyze_build():", "and are not # the same, those should not change", "# To have good results from static analyzer certain compiler", "`scan-build` against the configure step might be necessary, when compiler", "will set up own output file # flags below are", "sequentially pool = multiprocessing.Pool(1 if args.verbose > 2 else None)", "Governs multiple runs in CTU mode or runs once in", "implementation.) (handle, name) = tempfile.mkstemp(suffix=extension(), prefix='clang_' + error + '_',", "of CTU. :param extdef_map_lines: Contains the id of a definition", "contain the mangled names and the source (AST generated from", "4: result.append('-analyzer-display-progress') if args.plugins: result.extend(prefix_with('-load', args.plugins)) if args.enable_checker: checkers =", "name elif opts['output_format'] == 'sarif': (handle, name) = tempfile.mkstemp(prefix='result-', suffix='.sarif',", "+ compilation.flags, 'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU')) } # call static analyzer against", "failures directory if not exits yet. \"\"\" failures_dir = os.path.join(opts['output_dir'],", "try: cwd = opts['directory'] cmd = get_arguments([opts['clang'], '--analyze'] + opts['direct_args']", "following the # Perl implementation.) (handle, name) = tempfile.mkstemp(suffix=extension(), prefix='clang_'", "data is # left so multiple analyze runs can use", "if ctu_config.collect or ctu_config.analyze: assert ctu_config.collect != ctu_config.analyze if ctu_config.collect:", "be merged at the end into a global map file:", "out of absolute path = path[1:] if path[0] == os.sep", "opts['directory'] cmd = [opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] \\", "directory if not exits yet. \"\"\" failures_dir = os.path.join(opts['output_dir'], 'failures')", "arguments of the analyzer. This method generates those. \"\"\" result", "build command. there are cases, when analyzer run # is", "If failure reports are requested, it calls the continuation to", "ctu_config = get_ctu_config_from_args(args) # If we do a CTU collect", "= [] if args.store_model: result.append('-analyzer-store={0}'.format(args.store_model)) if args.constraints_model: result.append('-analyzer-constraints={0}'.format( args.constraints_model)) if", "is from the original sequence and the odd elements are", "= get_triple_arch(cmd, cwd) ctu_options = ['ctu-dir=' + os.path.join(ctu_config.dir, triarch), 'experimental-enable-naive-ctu-analysis=true']", "'), 'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'), 'directory': execution.cwd, 'command': [execution.cmd[0], '-c'] + compilation.flags,", "# write general information about the crash with open(name +", "create another sequence where every second element is from the", "str. :returns: Mangled name - AST file pairs. :rtype: List", "global map file: CTU_EXTDEF_MAP_FILENAME.\"\"\" def generate_extdef_map_lines(extdefmap_dir): \"\"\" Iterate over all", "options def run(opts): \"\"\" Entry point to run (or not)", "needed by CTU analysis. \"\"\" def generate_ast(triple_arch): \"\"\" Generates ASTs", "using '%s'\", ast_command) run_command(ast_command, cwd=opts['directory']) def map_extdefs(triple_arch): \"\"\" Generate external", "os.path.join(opts['ctu'].dir, triple_arch, 'ast', os.path.realpath(opts['file'])[1:] + '.ast') ast_path = os.path.abspath(ast_joined_path) ast_dir", "'ctu']) def ctu_collect_phase(opts): \"\"\" Preprocess source by generating all data", "if filtered_list: # There should be only one arch given", "not required. But we need to set up everything for", "output of the analysis and returns with it. If failure", "to capture the CC/CXX values # for the Makefile. if", "CtuConfig(collect=False, analyze=True, dir='', extdef_map_cmd='') run_analyzer_parallel(args) shutil.rmtree(ctu_config.dir, ignore_errors=True) else: # Single", "the analysis is not possible the given method just return", "prefix. eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0,", "= os.path.join(ctudir, triple_arch, CTU_TEMP_DEFMAP_FOLDER) extdef_map_lines = generate_extdef_map_lines(extdefmap_dir) mangled_ast_pairs = create_global_ctu_extdef_map(extdef_map_lines)", "\"\"\" Entry point for `analyze-cc` and `analyze-c++` compiler wrappers. \"\"\"", "= run_command(extdefmap_command, cwd=opts['directory']) extdef_ast_list = extdef_map_list_src_to_ast(extdef_src_list) extern_defs_map_folder = os.path.join(opts['ctu'].dir, triple_arch,", "run. If the user asks only for a single phase", "analyzer_params(args), 'force_debug': args.force_debug, 'ctu': get_ctu_config_from_args(args) } logging.debug('run analyzer against compilation", "path ast_path = os.path.join(\"ast\", path + \".ast\") extdef_ast_list.append(mangled_name + \"", "command. \"\"\" args = opts['direct_args'] + opts['flags'] ast_joined_path = os.path.join(opts['ctu'].dir,", "# arguments from command line 'force_debug', # kill non debug", "'c++-cpp-output', 'objective-c-cpp-output' }) # language can be given as a", "logging.debug('skip analysis, language not known') return None elif language not", "output file # flags below are inherited from the perl", "None: for line in current['error_output']: logging.info(line.rstrip()) @contextlib.contextmanager def report_directory(hint, keep,", "handle.write(' '.join(cmd) + os.linesep) handle.write(' '.join(os.uname()) + os.linesep) handle.write(get_version(opts['clang'])) handle.close()", "in multiple steps: -- Intercept: capture the compilation command during", "handle: handle.writelines(opts['error_output']) handle.close() @require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir', 'output_format'])", "is None and compiler is not None: language = classify_source(opts['file'],", "analysis is not possible the given method just return and", "requested, it calls the continuation to generate it. \"\"\" def", "Iterator of str. :returns: Mangled name - AST file pairs.", "call static analyzer against the compilation for source in compilation.files:", "done in multiple steps: -- Intercept: capture the compilation command", "as handle: generator = (dict(cmd, **consts) for cmd in json.load(handle)", "filtered_list: # There should be only one arch given (or", "database 'clang', # clang executable name (and path) 'direct_args', #", "None: return # collect the needed parameters from environment, crash", "args.output_format, 'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '', 'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)),", "= opts['direct_args'] + opts['flags'] extdefmap_command = [opts['ctu'].extdef_map_cmd] extdefmap_command.append(opts['file']) extdefmap_command.append('--') extdefmap_command.extend(args)", "args.output_format, 'output_failures': args.output_failures, 'direct_args': analyzer_params(args), 'force_debug': args.force_debug, 'ctu': get_ctu_config_from_args(args) }", "import functools import subprocess import contextlib import datetime import shutil", "re.search(r'configure|autogen', args[0]) def prefix_with(constant, pieces): \"\"\" From a sequence create", "'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else '', 'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args)) }) return", "mangled_name, path = extdef_src_txt.split(\" \", 1) # Normalize path on", "build process. \"\"\" return len(args) and not re.search(r'configure|autogen', args[0]) def", "for the current source. \"\"\" args = opts['direct_args'] + opts['flags']", "compiler output also captured into '.stderr.txt' file. And some more", "compiler flag filtering only affects the static analyzer run. #", "received_list = opts.pop('arch_list') if received_list: # filter out disabled architectures", "= run_build(args.build, env=environment) # Cover report generation and bug counting.", "This complex task is decomposed into smaller methods which are", "reports.\" else: msg = \"View result at %s/results-merged.sarif.\" keep =", "and add others) and take out language (-x) and architecture", "the continuation to generate it. \"\"\" def target(): \"\"\" Creates", "prefix the filename. \"\"\" if not os.path.isabs(filename): # filename is", "analyze-build command. \"\"\" args = parse_args_for_analyze_build() # will re-assign the", "glob.glob(os.path.join(ctudir, '*')) for triple_path in triple_arches: if os.path.isdir(triple_path): triple_arch =", "not os.getenv('ANALYZE_BUILD_CLANG'): return # check is it a compilation? compilation", "+ opts['flags'] + \\ [opts['file'], '-o', name] try: cmd =", "and stop when any of those is missing. \"\"\" def", "contains no report, but kept.\" else: msg = \"Removing directory", "if path[0] == os.sep else path ast_path = os.path.join(\"ast\", path", "into a global one. As the collect phase runs parallel", "if args.intercept_first: # Run build command with intercept module. exit_code", "If we do a CTU collect (1st phase) we remove", "[elem for piece in pieces for elem in [constant, piece]]", "flags below are inherited from the perl implementation. '-g': 0,", "... or find out from source file extension if language", "need_analyzer(args.build) else '', 'ANALYZE_BUILD_REPORT_DIR': args.output, 'ANALYZE_BUILD_REPORT_FORMAT': args.output_format, 'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if", "if ctu_config.collect: return ctu_collect_phase(opts) if ctu_config.analyze: cwd = opts['directory'] cmd", "to keep or delete the empty report directory. \"\"\" stamp_format", "collection data before and # also after the run. If", "flag filtering only affects the static analyzer run. # #", "ClangErrorException as ex: result = {'error_output': ex.error, 'exit_code': 0} if", "CTU_EXTDEF_MAP_FILENAME) with open(extern_defs_map_file, 'w') as out_file: for mangled_name, ast_file in", "compilation command during the build, -- Analyze: run the analyzer", "filtered_list.pop() logging.debug('analysis, on arch: %s', current) opts.update({'flags': ['-arch', current] +", "name) if not keep: os.rmdir(name) def analyzer_params(args): \"\"\" A group", "compilation database 'file', # entry from compilation database 'clang', #", "entry from compilation database 'clang', # clang executable name (and", "chain. The passed parameter is a python dictionary. Each method", "= opts.pop('command') command = command if isinstance(command, list) else decode(command)", "\"\"\" return ( CtuConfig(collect=args.ctu_phases.collect, analyze=args.ctu_phases.analyze, dir=args.ctu_dir, extdef_map_cmd=args.extdef_map_cmd) if hasattr(args, 'ctu_phases')", "When static analyzer run against project configure step, it should", "paths. filename = os.path.normpath(os.path.join(directory, filename)) return any(re.match(r'^' + exclude_directory, filename)", "pass # ignore some flags elif arg in IGNORED_FLAGS: count", "> 2 else None) for current in pool.imap_unordered(run, generator): if", "where every second element is from the original sequence and", "runs in CTU mode or runs once in normal mode.", "\"\"\" CTU configuration is created from the chosen phases and", "the analyzer against the captured commands, -- Report: create a", "from libscanbuild.intercept import capture from libscanbuild.report import document from libscanbuild.compilation", "args.internal_stats: result.append('-analyzer-stats') if args.analyze_headers: result.append('-analyzer-opt-analyze-headers') if args.stats: result.append('-analyzer-checker=debug.Stats') if args.maxloop:", "import json import logging import multiprocessing import tempfile import functools", "the # Perl implementation.) (handle, name) = tempfile.mkstemp(suffix=extension(), prefix='clang_' +", "against a compilation db. govern_analyzer_runs(args) # Cover report generation and", "\"Removing directory '%s' because it contains no report.\" logging.warning(msg, name)", "create_global_ctu_extdef_map(extdef_map_lines): \"\"\" Takes iterator of individual external definition maps and", "# kill non debug macros 'output_dir', # where generated report", "result = { 'flags': [], # the filtered compiler flags", "as handle: handle.write(opts['file'] + os.linesep) handle.write(error.title().replace('_', ' ') + os.linesep)", "List of (str, str) tuples. \"\"\" mangled_to_asts = defaultdict(set) for", "in files: with open(filename, 'r') as in_file: for line in", "environment @command_entry_point def analyze_compiler_wrapper(): \"\"\" Entry point for `analyze-cc` and", "extdefmap_dir = os.path.join(ctudir, triple_arch, CTU_TEMP_DEFMAP_FOLDER) extdef_map_lines = generate_extdef_map_lines(extdefmap_dir) mangled_ast_pairs =", "def write_global_map(arch, mangled_ast_pairs): \"\"\" Write (mangled name, ast file) pairs", "analyzer failed. The major report is the preprocessor output. The", "from environment, crash when missing parameters = { 'clang': os.getenv('ANALYZE_BUILD_CLANG'),", "return continuation(opts) @require(['language', 'compiler', 'file', 'flags']) def language_check(opts, continuation=filter_debug_flags): \"\"\"", "the required attributes in the passed state and stop when", "here. run_analyzer_parallel(args) if ctu_config.collect: merge_ctu_extdef_maps(ctu_config.dir) def setup_environment(args): \"\"\" Set up", "generated from the source) which had their definition. These files", "json when coming from analyze-cc or analyze-c++ return CtuConfig(collect=ctu_config[0], analyze=ctu_config[1],", "'ppc64'}) received_list = opts.pop('arch_list') if received_list: # filter out disabled", "'ast', os.path.realpath(opts['file'])[1:] + '.ast') ast_path = os.path.abspath(ast_joined_path) ast_dir = os.path.dirname(ast_path)", "looks source file are not flags elif re.match(r'^[^-].+', arg) and", "options shall be # omitted. The compiler flag filtering only", "checkers]) return prefix_with('-Xclang', result) def require(required): \"\"\" Decorator for checking", "captured commands. if need_analyzer(args.build): govern_analyzer_runs(args) else: # Run build command", "if need_analyzer(args.build) else '', 'ANALYZE_BUILD_REPORT_DIR': args.output, 'ANALYZE_BUILD_REPORT_FORMAT': args.output_format, 'ANALYZE_BUILD_REPORT_FAILURES': 'yes'", "line def write_global_map(arch, mangled_ast_pairs): \"\"\" Write (mangled name, ast file)", "the compilation database. This complex task is decomposed into smaller", "args.force_debug, 'ctu': get_ctu_config_from_args(args) } logging.debug('run analyzer against compilation database') with", "ex.error, 'exit_code': 0} if opts.get('output_failures', False): opts.update(result) continuation(opts) return result", "{'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'} return mapping.get(opts['language'], '.i') def", "opts['direct_args'] + opts['flags'] ast_joined_path = os.path.join(opts['ctu'].dir, triple_arch, 'ast', os.path.realpath(opts['file'])[1:] +", "against project configure step, it should be silent and no", "\"\"\" disabled = frozenset({'ppc', 'ppc64'}) received_list = opts.pop('arch_list') if received_list:", "args.output_format: result.append('-analyzer-output={0}'.format(args.output_format)) if args.analyzer_config: result.extend(['-analyzer-config', args.analyzer_config]) if args.verbose >= 4:", "'', 'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)), 'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else '',", "excluded directory prefix the filename. \"\"\" if not os.path.isabs(filename): #", "path + \".ast\") extdef_ast_list.append(mangled_name + \" \" + ast_path) return", "\"\"\" return [elem for piece in pieces for elem in", "configuration is created from the chosen phases and dir. \"\"\"", "(dict(cmd, **consts) for cmd in json.load(handle) if not exclude( cmd['file'],", "result or not os.getenv('ANALYZE_BUILD_CLANG'): return # check is it a", "so we can leave it empty args.ctu_phases = CtuConfig(collect=True, analyze=False,", "(2nd) phase, we do an # all-in-one run where we", "parameter is a python dictionary. Each method first check that", "libscanbuild.arguments import parse_args_for_scan_build, \\ parse_args_for_analyze_build from libscanbuild.intercept import capture from", "for mangled_name, ast_file in mangled_ast_pairs: out_file.write('%s %s\\n' % (mangled_name, ast_file))", "# omitted. The compiler flag filtering only affects the static", "\"\"\" Create report when analyzer failed. The major report is", "'force_debug', # kill non debug macros 'output_dir', # where generated", "return environment @command_entry_point def analyze_compiler_wrapper(): \"\"\" Entry point for `analyze-cc`", "'clang', # clang executable name (and path) 'direct_args', # arguments", "extdef_map_cmd='') run_analyzer_parallel(args) merge_ctu_extdef_maps(ctu_config.dir) args.ctu_phases = CtuConfig(collect=False, analyze=True, dir='', extdef_map_cmd='') run_analyzer_parallel(args)", "handle.write(opts['file'] + os.linesep) handle.write(error.title().replace('_', ' ') + os.linesep) handle.write(' '.join(cmd)", "in extdef_map_lines: mangled_name, ast_file = line.strip().split(' ', 1) mangled_to_asts[mangled_name].add(ast_file) mangled_ast_pairs", "parameters received. (This is done by the 'require' decorator. It's", "cmd = [opts['clang'], '-fsyntax-only', '-E'] + opts['flags'] + \\ [opts['file'],", "ctu_options = ['ctu-dir=' + os.path.join(ctu_config.dir, triarch), 'experimental-enable-naive-ctu-analysis=true'] analyzer_options = prefix_with('-analyzer-config',", "strings are coming from args.ctu_dir and extdef_map_cmd, # so we", "and analyzer with compiler wrappers. environment = setup_environment(args) exit_code =", "number_of_bugs if args.status_bugs else exit_code @command_entry_point def analyze_build(): \"\"\" Entry", "name) = tempfile.mkstemp(prefix='report-', suffix='.plist', dir=opts['output_dir']) os.close(handle) return name elif opts['output_format']", "extdef_map_cmd='')) def get_ctu_config_from_json(ctu_conf_json): \"\"\" CTU configuration is created from the", "captured output too with open(name + '.stderr.txt', 'w') as handle:", "SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception \"\"\" This module implements the 'scan-build'", "and # also after the run. If the user asks", "scan_build(): \"\"\" Entry point for scan-build command. \"\"\" args =", "'language', 'error_output', 'exit_code']) def report_failure(opts): \"\"\" Create report when analyzer", "# is not required. But we need to set up", "source file extension if language is None and compiler is", "def report_directory(hint, keep, output_format): \"\"\" Responsible for the report directory.", "signal.) Everything else is 'Other Error'. error = 'crash' if", "'compiler', 'file', 'flags']) def language_check(opts, continuation=filter_debug_flags): \"\"\" Find out the", "others) and take out language (-x) and architecture (-arch) flags", "document(args) # Set exit status as it was requested. return", "against the compilation for source in compilation.files: parameters.update({'file': source}) logging.debug('analyzer", "is done in multiple steps: -- Intercept: capture the compilation", "analyzer_params(args): \"\"\" A group of command line arguments can mapped", "compiler wrappers are used. That's the moment when build setup", "missing. \"\"\" def decorator(function): @functools.wraps(function) def wrapper(*args, **kwargs): for key", "supported arch') return None else: logging.debug('analysis, on default arch') return", "'c') if language is None: logging.debug('skip analysis, language not known')", "cmd = get_arguments([opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] + [opts['file'],", "on arch: %s', current) opts.update({'flags': ['-arch', current] + opts['flags']}) return", "in mangled_to_asts.items(): if len(ast_files) == 1: mangled_ast_pairs.append((mangled_name, next(iter(ast_files)))) return mangled_ast_pairs", "import command_entry_point, compiler_wrapper, \\ wrapper_environment, run_build, run_command, CtuConfig from libscanbuild.arguments", "is created from the chosen phases and dir. \"\"\" ctu_config", "if ctu_config.collect: merge_ctu_extdef_maps(ctu_config.dir) def setup_environment(args): \"\"\" Set up environment for", "line in current['error_output']: logging.info(line.rstrip()) pool.close() pool.join() def govern_analyzer_runs(args): \"\"\" Governs", "<filename>clang/tools/scan-build-py/libscanbuild/analyze.py<gh_stars>1-10 # -*- coding: utf-8 -*- # Part of the", "os.getenv('ANALYZE_BUILD_CLANG'): return # check is it a compilation? compilation =", "wrapper return decorator @require(['command', # entry from compilation database 'directory',", "output also captured into '.stderr.txt' file. And some more execution", "values # for the Makefile. if args.intercept_first: # Run build", "parameter... language = opts.pop('language') compiler = opts.pop('compiler') # ... or", "'flags': [], # the filtered compiler flags 'arch_list': [], #", "analyzer against a compilation db. govern_analyzer_runs(args) # Cover report generation", "ctu_config.collect and ctu_config.analyze: # CTU strings are coming from args.ctu_dir", "= frozenset({'ppc', 'ppc64'}) received_list = opts.pop('arch_list') if received_list: # filter", "an external definition map list with ast files. \"\"\" extdef_ast_list", "opts.pop('compiler') # ... or find out from source file extension", "when build setup check the compiler and capture the location", "# when verbose output requested execute sequentially pool = multiprocessing.Pool(1", "\"\"\" environment = dict(os.environ) environment.update(wrapper_environment(args)) environment.update({ 'CC': COMPILER_WRAPPER_CC, 'CXX': COMPILER_WRAPPER_CXX,", "write_global_map(triple_arch, mangled_ast_pairs) # Remove all temporary files shutil.rmtree(extdefmap_dir, ignore_errors=True) def", "names out of CTU. :param extdef_map_lines: Contains the id of", "source files into an external definition map list with ast", "data needed by CTU analysis. \"\"\" def generate_ast(triple_arch): \"\"\" Generates", "result.extend(['-analyzer-disable-checker', checkers]) return prefix_with('-Xclang', result) def require(required): \"\"\" Decorator for", "order. \"\"\" files = glob.glob(os.path.join(extdefmap_dir, '*')) files.sort() for filename in", "filename generated randomly. The compiler output also captured into '.stderr.txt'", "ast_path = os.path.abspath(ast_joined_path) ast_dir = os.path.dirname(ast_path) if not os.path.isdir(ast_dir): try:", "extdef_ast_list = extdef_map_list_src_to_ast(extdef_src_list) extern_defs_map_folder = os.path.join(opts['ctu'].dir, triple_arch, CTU_TEMP_DEFMAP_FOLDER) if not", "# flags below are inherited from the perl implementation. '-g':", "before run the analyzer. current = filtered_list.pop() logging.debug('analysis, on arch:", "name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir) logging.info('Report directory created: %s', name) try:", "continuation=report_failure): \"\"\" It assembles the analysis command line and executes", "filename in files: with open(filename, 'r') as in_file: for line", "< 0 else 'other_error' # Create preprocessor output file name.", "'direct_args', 'file', 'ctu']) def ctu_collect_phase(opts): \"\"\" Preprocess source by generating", "end opts.update({'flags': opts['flags'] + ['-UNDEBUG']}) return continuation(opts) @require(['language', 'compiler', 'file',", "basket if arg == '-arch': result['arch_list'].append(next(args)) # take language elif", "if output_format != 'sarif': # 'scan-view' currently does not support", "is decomposed into smaller methods which are calling each other", "maps into a global one. As the collect phase runs", "Turns textual external definition map list with source files into", "os.path import json import logging import multiprocessing import tempfile import", "continuation(opts) # To have good results from static analyzer certain", "args.maxloop: result.extend(['-analyzer-max-loop', str(args.maxloop)]) if args.output_format: result.append('-analyzer-output={0}'.format(args.output_format)) if args.analyzer_config: result.extend(['-analyzer-config', args.analyzer_config])", "= opts.pop('language') compiler = opts.pop('compiler') # ... or find out", "os.linesep) handle.write(error.title().replace('_', ' ') + os.linesep) handle.write(' '.join(cmd) + os.linesep)", "not flags elif re.match(r'^[^-].+', arg) and classify_source(arg): pass # ignore", "= tempfile.mkstemp(prefix='report-', suffix='.plist', dir=opts['output_dir']) os.close(handle) return name elif opts['output_format'] ==", "\"\"\" result = { 'flags': [], # the filtered compiler", "== '-arch': result['arch_list'].append(next(args)) # take language elif arg == '-x':", "@require(['arch_list', 'flags']) def arch_check(opts, continuation=language_check): \"\"\" Do run analyzer through", "multiple analyze runs can use the same data gathered by", "their definition. These files should be merged at the end", "def wrapper(*args, **kwargs): for key in required: if key not", "source by generating all data needed by CTU analysis. \"\"\"", "the analyzer against the given compilation database. \"\"\" def exclude(filename,", "undefine macro at the end opts.update({'flags': opts['flags'] + ['-UNDEBUG']}) return", "The major report is the preprocessor output. The output filename", "current is not None: # display error message from the", "phases and dir. \"\"\" return ( CtuConfig(collect=args.ctu_phases.collect, analyze=args.ctu_phases.analyze, dir=args.ctu_dir, extdef_map_cmd=args.extdef_map_cmd)", "-- Analyze: run the analyzer against the captured commands, --", "to directory. Need to turn # it to absolute since", "be overwritten '-o': 1, # will set up own output", "'-emit-ast'] ast_command.extend(args) ast_command.append('-w') ast_command.append(opts['file']) ast_command.append('-o') ast_command.append(ast_path) logging.debug(\"Generating AST using '%s'\",", "with report_directory( args.output, args.keep_empty, args.output_format) as args.output: # Run against", "(and path) 'direct_args', # arguments from command line 'force_debug', #", "language = opts.pop('language') compiler = opts.pop('compiler') # ... or find", "not os.path.isdir(extern_defs_map_folder): try: os.makedirs(extern_defs_map_folder) except OSError: # In case an", "wrappers, because 'configure' needs to capture the CC/CXX values #", "keep -- a boolean value to keep or delete the", "from libscanbuild.shell import decode __all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper'] COMPILER_WRAPPER_CC", "'output_format': args.output_format, 'output_failures': args.output_failures, 'direct_args': analyzer_params(args), 'force_debug': args.force_debug, 'ctu': get_ctu_config_from_args(args)", "= glob.glob(os.path.join(extdefmap_dir, '*')) files.sort() for filename in files: with open(filename,", "parallel on multiple threads, all compilation units are separately mapped", "it to absolute since 'args.excludes' are absolute paths. filename =", "the empty report directory. \"\"\" stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-' stamp =", "file) name. :type extdef_map_lines: Iterator of str. :returns: Mangled name", "@functools.wraps(function) def wrapper(*args, **kwargs): for key in required: if key", "extdef_src_txt.split(\" \", 1) # Normalize path on windows as well", "dispatch_ctu(opts, continuation=run_analyzer): \"\"\" Execute only one phase of 2 phases", "separately mapped into a temporary file in CTU_TEMP_DEFMAP_FOLDER. These definition", "Write (mangled name, ast file) pairs into final file. \"\"\"", "when missing parameters = { 'clang': os.getenv('ANALYZE_BUILD_CLANG'), 'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'), 'output_format':", "We leave conflicting names out of CTU. :param extdef_map_lines: Contains", "of 2 phases of CTU if needed. \"\"\" ctu_config =", "check only. cwd = opts['directory'] cmd = [opts['clang'], '-fsyntax-only', '-E']", "by a single # collection run. if ctu_config.collect and ctu_config.analyze:", "an other process already created it. pass ast_command = [opts['clang'],", "definition maps and creates a global map keeping only unique", "'output_failures': args.output_failures, 'direct_args': analyzer_params(args), 'force_debug': args.force_debug, 'ctu': get_ctu_config_from_args(args) } logging.debug('run", "'Crash'. # (python subprocess Popen.returncode is negative when child terminated", "if not keep: os.rmdir(name) def analyzer_params(args): \"\"\" A group of", "analyze) are launched from here. run_analyzer_parallel(args) if ctu_config.collect: merge_ctu_extdef_maps(ctu_config.dir) def", "the needed parameters received. (This is done by the 'require'", "yet. \"\"\" failures_dir = os.path.join(opts['output_dir'], 'failures') if not os.path.isdir(failures_dir): os.makedirs(failures_dir)", "leave conflicting names out of CTU. :param extdef_map_lines: Contains the", "os.path.isdir(failures_dir): os.makedirs(failures_dir) return failures_dir # Classify error type: when Clang", "exit_code @command_entry_point def analyze_build(): \"\"\" Entry point for analyze-build command.", "= 'scan-build-%Y-%m-%d-%H-%M-%S-%f-' stamp = datetime.datetime.now().strftime(stamp_format) parent_dir = os.path.abspath(hint) if not", "'.mii', 'objective-c': '.mi', 'c++': '.ii'} return mapping.get(opts['language'], '.i') def destination():", "the preprocessor output. The output filename generated randomly. The compiler", "# also after the run. If the user asks only", "logging.debug('analyzer parameters %s', parameters) current = run(parameters) # display error", "1, '-multiply_defined': 1, '-sectorder': 3, '--param': 1, '--serialize-diagnostics': 1 }", "'.info.txt', 'w') as handle: handle.write(opts['file'] + os.linesep) handle.write(error.title().replace('_', ' ')", ":rtype: List of (str, str) tuples. \"\"\" mangled_to_asts = defaultdict(set)", "build is done in multiple steps: -- Intercept: capture the", "collect (1st) and analyze (2nd) phase, we do an #", "flags 'arch_list': [], # list of architecture flags 'language': None,", "\"\"\" extdef_ast_list = [] for extdef_src_txt in extdef_src_list: mangled_name, path", "logging.warning(msg, name) if not keep: os.rmdir(name) def analyzer_params(args): \"\"\" A", "for scan-build command. \"\"\" args = parse_args_for_scan_build() # will re-assign", "wrappers. \"\"\" return compiler_wrapper(analyze_compiler_wrapper_impl) def analyze_compiler_wrapper_impl(result, execution): \"\"\" Implements analyzer", "logging.debug('skip analysis, language not supported') return None else: logging.debug('analysis, language:", "from the chosen phases and dir. \"\"\" ctu_config = json.loads(ctu_conf_json)", "already created it. pass if extdef_ast_list: with tempfile.NamedTemporaryFile(mode='w', dir=extern_defs_map_folder, delete=False)", "'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'), 'directory': execution.cwd, 'command': [execution.cmd[0], '-c'] + compilation.flags, 'ctu':", "\"\"\" Entry point for analyze-build command. \"\"\" args = parse_args_for_analyze_build()", "\"\"\" args = parse_args_for_scan_build() # will re-assign the report directory", "flags elif re.match(r'^[^-].+', arg) and classify_source(arg): pass # ignore some", "or runs once in normal mode. \"\"\" ctu_config = get_ctu_config_from_args(args)", "else '', 'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)), 'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else", "= create_global_ctu_extdef_map(extdef_map_lines) write_global_map(triple_arch, mangled_ast_pairs) # Remove all temporary files shutil.rmtree(extdefmap_dir,", "multiple steps: -- Intercept: capture the compilation command during the", "'yes' if args.force_debug else '', 'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args)) }) return environment", "\"View result at %s/results-merged.sarif.\" keep = True else: if keep:", "'-x': result['language'] = next(args) # parameters which looks source file", "decorator(function): @functools.wraps(function) def wrapper(*args, **kwargs): for key in required: if", "as well path = os.path.splitdrive(path)[1] # Make relative path out", "for line in extdef_map_lines: mangled_name, ast_file = line.strip().split(' ', 1)", "name. (This is blindly following the # Perl implementation.) (handle,", "# the same, those should not change the pre-processing step.", "# iterate on the compile options args = iter(command[1:]) for", "return continuation(opts) @require(['flags', 'force_debug']) def filter_debug_flags(opts, continuation=dispatch_ctu): \"\"\" Filter out", "when Clang terminated by a signal it's a 'Crash'. #", "a build is done in multiple steps: -- Intercept: capture", "extdef_map_lines: Iterator of str. :returns: Mangled name - AST file", "= opts['directory'] cmd = [opts['clang'], '-fsyntax-only', '-E'] + opts['flags'] +", "ctu_options) direct_options = prefix_with('-Xanalyzer', analyzer_options) opts['direct_args'].extend(direct_options) return continuation(opts) @require(['flags', 'force_debug'])", "a build command. there are cases, when analyzer run #", "any excluded directory prefix the filename. \"\"\" if not os.path.isabs(filename):", "calls the continuation to generate it. \"\"\" def target(): \"\"\"", "extension. \"\"\" mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'}", "run `scan-build` against the configure step might be necessary, when", "crash with open(name + '.info.txt', 'w') as handle: handle.write(opts['file'] +", "{ 'clang': args.clang, 'output_dir': args.output, 'output_format': args.output_format, 'output_failures': args.output_failures, 'direct_args':", "[constant, piece]] def get_ctu_config_from_args(args): \"\"\" CTU configuration is created from", "compiler_language(command) # 'c' or 'c++' } # iterate on the", "entry of the compilation database. This complex task is decomposed", "analyze_compiler_wrapper_impl(result, execution): \"\"\" Implements analyzer compiler wrapper functionality. \"\"\" #", "up everything for the # wrappers, because 'configure' needs to", "path = extdef_src_txt.split(\" \", 1) # Normalize path on windows", "+ opts['flags'] ast_joined_path = os.path.join(opts['ctu'].dir, triple_arch, 'ast', os.path.realpath(opts['file'])[1:] + '.ast')", "in { 'plist', 'plist-html', 'plist-multi-file'}: (handle, name) = tempfile.mkstemp(prefix='report-', suffix='.plist',", "not passed to {1}'.format( key, function.__name__)) return function(*args, **kwargs) return", "not None: # display error message from the static analyzer", "with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. #", "= 'tmpExternalDefMaps' @command_entry_point def scan_build(): \"\"\" Entry point for scan-build", "= os.path.join(opts['output_dir'], 'failures') if not os.path.isdir(failures_dir): os.makedirs(failures_dir) return failures_dir #", "Create report when analyzer failed. The major report is the", "[opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] \\ + [opts['file']] triple_arch", "count = IGNORED_FLAGS[arg] for _ in range(count): next(args) # we", "eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3]", "import decode __all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper'] COMPILER_WRAPPER_CC = 'analyze-cc'", "re import os import os.path import json import logging import", "return # collect the needed parameters from environment, crash when", "of a definition (mangled name) and the originating source (the", "\"\"\" def generate_ast(triple_arch): \"\"\" Generates ASTs for the current compilation", "IGNORED_FLAGS = { '-c': 0, # compile option will be", "= os.path.basename(triple_path) extdefmap_dir = os.path.join(ctudir, triple_arch, CTU_TEMP_DEFMAP_FOLDER) extdef_map_lines = generate_extdef_map_lines(extdefmap_dir)", "original sequence and the odd elements are the prefix. eg.:", "import glob from collections import defaultdict from libscanbuild import command_entry_point,", "\"\"\" def decorator(function): @functools.wraps(function) def wrapper(*args, **kwargs): for key in", "ast_file)) triple_arches = glob.glob(os.path.join(ctudir, '*')) for triple_path in triple_arches: if", "'output_failures', # generate crash reports or not 'ctu']) # ctu", "extension(): \"\"\" Generate preprocessor file extension. \"\"\" mapping = {'objective-c++':", "map file for the current source. \"\"\" args = opts['direct_args']", "line arguments of the analyzer. This method generates those. \"\"\"", "name) and the originating source (the corresponding AST file) name.", "False): opts.update(result) continuation(opts) return result def extdef_map_list_src_to_ast(extdef_src_list): \"\"\" Turns textual", "analyze=True, dir='', extdef_map_cmd='') run_analyzer_parallel(args) shutil.rmtree(ctu_config.dir, ignore_errors=True) else: # Single runs", "extdef_ast_list = [] for extdef_src_txt in extdef_src_list: mangled_name, path =", "the id of a definition (mangled name) and the originating", "it's a 'Crash'. # (python subprocess Popen.returncode is negative when", "The compiler flag filtering only affects the static analyzer run.", "extdef_map_list_src_to_ast(extdef_src_list) extern_defs_map_folder = os.path.join(opts['ctu'].dir, triple_arch, CTU_TEMP_DEFMAP_FOLDER) if not os.path.isdir(extern_defs_map_folder): try:", "def analyze_build(): \"\"\" Entry point for analyze-build command. \"\"\" args", "def require(required): \"\"\" Decorator for checking the required values in", "else path ast_path = os.path.join(\"ast\", path + \".ast\") extdef_ast_list.append(mangled_name +", "line parameters or file name extension. The decision also influenced", "'analyze_compiler_wrapper'] COMPILER_WRAPPER_CC = 'analyze-cc' COMPILER_WRAPPER_CXX = 'analyze-c++' CTU_EXTDEF_MAP_FILENAME = 'externalDefMap.txt'", "if args.store_model: result.append('-analyzer-store={0}'.format(args.store_model)) if args.constraints_model: result.append('-analyzer-constraints={0}'.format( args.constraints_model)) if args.internal_stats: result.append('-analyzer-stats')", "requested. return number_of_bugs if args.status_bugs else 0 def need_analyzer(args): \"\"\"", "the same, those should not change the pre-processing step. #", "no report, but kept.\" else: msg = \"Removing directory '%s'", "(handle, name) = tempfile.mkstemp(prefix='report-', suffix='.plist', dir=opts['output_dir']) os.close(handle) return name elif", "phase) we remove all previous collection # data first. if", "logging.info(line.rstrip()) @contextlib.contextmanager def report_directory(hint, keep, output_format): \"\"\" Responsible for the", "is 'Other Error'. error = 'crash' if opts['exit_code'] < 0", "v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information.", "else 0 def need_analyzer(args): \"\"\" Check the intent of the", "if args.enable_checker: checkers = ','.join(args.enable_checker) result.extend(['-analyzer-checker', checkers]) if args.disable_checker: checkers", "line 'force_debug', # kill non debug macros 'output_dir', # where", "'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS', '').split(' '), 'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'), 'directory': execution.cwd, 'command': [execution.cmd[0],", "continuation(opts) @require(['arch_list', 'flags']) def arch_check(opts, continuation=language_check): \"\"\" Do run analyzer", "if args.analyzer_config: result.extend(['-analyzer-config', args.analyzer_config]) if args.verbose >= 4: result.append('-analyzer-display-progress') if", "options args = iter(command[1:]) for arg in args: # take", "Contains the id of a definition (mangled name) and the", "\"\"\" mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'} return", "general information about the crash with open(name + '.info.txt', 'w')", "'_', dir=destination()) os.close(handle) # Execute Clang again, but run the", "opts.update(classify_parameters(command)) return arch_check(opts) except Exception: logging.error(\"Problem occurred during analysis.\", exc_info=1)", "import document from libscanbuild.compilation import split_command, classify_source, \\ compiler_language from", "result.extend(['-analyzer-checker', checkers]) if args.disable_checker: checkers = ','.join(args.disable_checker) result.extend(['-analyzer-disable-checker', checkers]) return", "# Remove all temporary files shutil.rmtree(extdefmap_dir, ignore_errors=True) def run_analyzer_parallel(args): \"\"\"", "but run the syntax check only. cwd = opts['directory'] cmd", "analysis, language not supported') return None else: logging.debug('analysis, language: %s',", "args.intercept_first: # Run build command with intercept module. exit_code =", "\"\"\" if opts.pop('force_debug'): # lazy implementation just append an undefine", "extern_defs_map_folder = os.path.join(opts['ctu'].dir, triple_arch, CTU_TEMP_DEFMAP_FOLDER) if not os.path.isdir(extern_defs_map_folder): try: os.makedirs(extern_defs_map_folder)", "same data gathered by a single # collection run. if", "'output_dir', # where generated report files shall go 'output_format', #", "if args.status_bugs else 0 def need_analyzer(args): \"\"\" Check the intent", "into final file. \"\"\" extern_defs_map_file = os.path.join(ctudir, arch, CTU_EXTDEF_MAP_FILENAME) with", "mangled_name, ast_files in mangled_to_asts.items(): if len(ast_files) == 1: mangled_ast_pairs.append((mangled_name, next(iter(ast_files))))", "CtuConfig(collect=ctu_config[0], analyze=ctu_config[1], dir=ctu_config[2], extdef_map_cmd=ctu_config[3]) def create_global_ctu_extdef_map(extdef_map_lines): \"\"\" Takes iterator of", "{'error_output': ex.error, 'exit_code': 0} if opts.get('output_failures', False): opts.update(result) continuation(opts) return", "one. As the collect phase runs parallel on multiple threads,", "return name return opts['output_dir'] try: cwd = opts['directory'] cmd =", "the analyzer. current = filtered_list.pop() logging.debug('analysis, on arch: %s', current)", "analyze (2nd) phase, we do an # all-in-one run where", "only one phase of 2 phases of CTU if needed.", "is not None: language = classify_source(opts['file'], compiler == 'c') if", "scan-build command. \"\"\" args = parse_args_for_scan_build() # will re-assign the", "0, # static analyzer option will be overwritten '-o': 1,", "a CTU collect (1st phase) we remove all previous collection", "= next(args) # parameters which looks source file are not", "Responsible for the report directory. hint -- could specify the", "return failures_dir # Classify error type: when Clang terminated by", "directory. hint -- could specify the parent directory of the", "command if isinstance(command, list) else decode(command) logging.debug(\"Run analyzer against '%s'\",", "To have good results from static analyzer certain compiler options", "def prefix_with(constant, pieces): \"\"\" From a sequence create another sequence", "opts['flags'] \\ + [opts['file']] triple_arch = get_triple_arch(cmd, cwd) generate_ast(triple_arch) map_extdefs(triple_arch)", "LLVM-exception \"\"\" This module implements the 'scan-build' command API. To", "from the original sequence and the odd elements are the", "attributes in the passed state and stop when any of", "specified 'compiler': compiler_language(command) # 'c' or 'c++' } # iterate", "1) # Normalize path on windows as well path =", "compilation database. \"\"\" def exclude(filename, directory): \"\"\" Return true when", "get_triple_arch, \\ ClangErrorException from libscanbuild.shell import decode __all__ = ['scan_build',", "import split_command, classify_source, \\ compiler_language from libscanbuild.clang import get_version, get_arguments,", "open(filename, 'r') as in_file: for line in in_file: yield line", "CTU collect (1st phase) we remove all previous collection #", "cmd['file'], cmd['directory'])) # when verbose output requested execute sequentially pool", "key not in args[0]: raise KeyError('{0} not passed to {1}'.format(", "reports or not 'ctu']) # ctu control options def run(opts):", "tempfile.mkstemp(suffix=extension(), prefix='clang_' + error + '_', dir=destination()) os.close(handle) # Execute", "analyze runs can use the same data gathered by a", "None) for current in pool.imap_unordered(run, generator): if current is not", "{ 'clang': os.getenv('ANALYZE_BUILD_CLANG'), 'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'), 'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'), 'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'), 'direct_args':", "boolean value to keep or delete the empty report directory.", "LLVM Project, under the Apache License v2.0 with LLVM Exceptions.", "tempfile import functools import subprocess import contextlib import datetime import", "to absolute since 'args.excludes' are absolute paths. filename = os.path.normpath(os.path.join(directory,", "return decorator @require(['command', # entry from compilation database 'directory', #", "dir. \"\"\" ctu_config = json.loads(ctu_conf_json) # Recover namedtuple from json", "opts.pop('force_debug'): # lazy implementation just append an undefine macro at", "method just return and break the chain. The passed parameter", "args.ctu_phases = CtuConfig(collect=False, analyze=True, dir='', extdef_map_cmd='') run_analyzer_parallel(args) shutil.rmtree(ctu_config.dir, ignore_errors=True) else:", "args.verbose > 2 else None) for current in pool.imap_unordered(run, generator):", "opts.update(result) continuation(opts) return result except ClangErrorException as ex: result =", "result = {'error_output': ex.error, 'exit_code': 0} if opts.get('output_failures', False): opts.update(result)", "= 'externalDefMap.txt' CTU_TEMP_DEFMAP_FOLDER = 'tmpExternalDefMaps' @command_entry_point def scan_build(): \"\"\" Entry", "of options to skip IGNORED_FLAGS = { '-c': 0, #", "# display error message from the static analyzer for line", "else: # Single runs (collect or analyze) are launched from", "opts.pop('arch_list') if received_list: # filter out disabled architectures and -arch", "source (the corresponding AST file) name. :type extdef_map_lines: Iterator of", "chosen phases and dir. \"\"\" return ( CtuConfig(collect=args.ctu_phases.collect, analyze=args.ctu_phases.analyze, dir=args.ctu_dir,", "'%s' contains no report, but kept.\" else: msg = \"Removing", "the Makefile. if args.intercept_first: # Run build command with intercept", "as in_file: for line in in_file: yield line def write_global_map(arch,", "debug macros 'output_dir', # where generated report files shall go", "module. exit_code = capture(args) # Run the analyzer against the", "shutil.rmtree(extdefmap_dir, ignore_errors=True) def run_analyzer_parallel(args): \"\"\" Runs the analyzer against the", "language: %s', language) opts.update({'language': language, 'flags': ['-x', language] + opts['flags']})", "license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception \"\"\" This module", "return ( CtuConfig(collect=args.ctu_phases.collect, analyze=args.ctu_phases.analyze, dir=args.ctu_dir, extdef_map_cmd=args.extdef_map_cmd) if hasattr(args, 'ctu_phases') and", "1, '-init': 1, '-e': 1, '-seg1addr': 1, '-bundle_loader': 1, '-multiply_defined':", "json.loads(ctu_conf_json) # Recover namedtuple from json when coming from analyze-cc", "= extdef_src_txt.split(\" \", 1) # Normalize path on windows as", "parameters from environment, crash when missing parameters = { 'clang':", "in current['error_output']: logging.info(line.rstrip()) @contextlib.contextmanager def report_directory(hint, keep, output_format): \"\"\" Responsible", "of the build command. When static analyzer run against project", "in json.load(handle) if not exclude( cmd['file'], cmd['directory'])) # when verbose", "AST using '%s'\", ast_command) run_command(ast_command, cwd=opts['directory']) def map_extdefs(triple_arch): \"\"\" Generate", "all temporary files shutil.rmtree(extdefmap_dir, ignore_errors=True) def run_analyzer_parallel(args): \"\"\" Runs the", "out_file: for mangled_name, ast_file in mangled_ast_pairs: out_file.write('%s %s\\n' % (mangled_name,", "'c++' } # iterate on the compile options args =", "that we don't want to see. elif re.match(r'^-W.+', arg) and", "'failures') if not os.path.isdir(failures_dir): os.makedirs(failures_dir) return failures_dir # Classify error", "'-c'] + compilation.flags, 'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU')) } # call static analyzer", "libscanbuild.report import document from libscanbuild.compilation import split_command, classify_source, \\ compiler_language", "lazy implementation just append an undefine macro at the end", "'-multiply_defined': 1, '-sectorder': 3, '--param': 1, '--serialize-diagnostics': 1 } def", "logging import multiprocessing import tempfile import functools import subprocess import", "all lines of input files in a determined order. \"\"\"", "\"\"\" args = opts['direct_args'] + opts['flags'] extdefmap_command = [opts['ctu'].extdef_map_cmd] extdefmap_command.append(opts['file'])", "filename = os.path.normpath(os.path.join(directory, filename)) return any(re.match(r'^' + exclude_directory, filename) for", "['-UNDEBUG']}) return continuation(opts) @require(['language', 'compiler', 'file', 'flags']) def language_check(opts, continuation=filter_debug_flags):", "keeping only unique names. We leave conflicting names out of", "Clang again, but run the syntax check only. cwd =", "execution.cwd, 'command': [execution.cmd[0], '-c'] + compilation.flags, 'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU')) } #", "try: command = opts.pop('command') command = command if isinstance(command, list)", "out of CTU. :param extdef_map_lines: Contains the id of a", "' ') + os.linesep) handle.write(' '.join(cmd) + os.linesep) handle.write(' '.join(os.uname())", "Keys are the option name, value number of options to", "does not support sarif format. msg = \"Run 'scan-view %s'", "logging.debug('analysis, on default arch') return continuation(opts) # To have good", "return extdef_ast_list @require(['clang', 'directory', 'flags', 'direct_args', 'file', 'ctu']) def ctu_collect_phase(opts):", "about extra warnings, but we should suppress ones # that", "+ os.linesep) handle.write(error.title().replace('_', ' ') + os.linesep) handle.write(' '.join(cmd) +", "the analyzer outputs. \"\"\" import re import os import os.path", "result['language'] = next(args) # parameters which looks source file are", "multiple threads, all compilation units are separately mapped into a", "for filename in files: with open(filename, 'r') as in_file: for", "**kwargs): for key in required: if key not in args[0]:", "be overwritten '-fsyntax-only': 0, # static analyzer option will be", "terminated # by signal.) Everything else is 'Other Error'. error", "units are separately mapped into a temporary file in CTU_TEMP_DEFMAP_FOLDER.", "str(args.maxloop)]) if args.output_format: result.append('-analyzer-output={0}'.format(args.output_format)) if args.analyzer_config: result.extend(['-analyzer-config', args.analyzer_config]) if args.verbose", "or relative to directory. Need to turn # it to", "extdefmap_command) extdef_src_list = run_command(extdefmap_command, cwd=opts['directory']) extdef_ast_list = extdef_map_list_src_to_ast(extdef_src_list) extern_defs_map_folder =", "# 'scan-view' currently does not support sarif format. msg =", "-arch switches filtered_list = [a for a in received_list if", "message from the static analyzer for line in current['error_output']: logging.info(line.rstrip())", "is blindly following the # Perl implementation.) (handle, name) =", "return {'error_output': output, 'exit_code': 0} except subprocess.CalledProcessError as ex: result", "json.dumps(get_ctu_config_from_args(args)) }) return environment @command_entry_point def analyze_compiler_wrapper(): \"\"\" Entry point", "+ opts['flags'] + [opts['file']] triarch = get_triple_arch(cmd, cwd) ctu_options =", "with compiler wrappers. environment = setup_environment(args) exit_code = run_build(args.build, env=environment)", "failed. The major report is the preprocessor output. The output", "if len(ast_files) == 1: mangled_ast_pairs.append((mangled_name, next(iter(ast_files)))) return mangled_ast_pairs def merge_ctu_extdef_maps(ctudir):", "else: msg = \"Removing directory '%s' because it contains no", "} logging.debug('run analyzer against compilation database') with open(args.cdb, 'r') as", "in the passed state and stop when any of those", "out disabled architectures and -arch switches filtered_list = [a for", "bug reports.\" else: msg = \"View result at %s/results-merged.sarif.\" keep", "# entry from compilation database 'file', # entry from compilation", "= frozenset({ 'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output', 'c++-cpp-output', 'objective-c-cpp-output' })", "@require(['language', 'compiler', 'file', 'flags']) def language_check(opts, continuation=filter_debug_flags): \"\"\" Find out", "compiler wrapper. \"\"\" environment = dict(os.environ) environment.update(wrapper_environment(args)) environment.update({ 'CC': COMPILER_WRAPPER_CC,", "true when any excluded directory prefix the filename. \"\"\" if", "run(opts): \"\"\" Entry point to run (or not) static analyzer", "args = parse_args_for_analyze_build() # will re-assign the report directory as", "'directory': execution.cwd, 'command': [execution.cmd[0], '-c'] + compilation.flags, 'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU')) }", "of the analysis and returns with it. If failure reports", "return result except ClangErrorException as ex: result = {'error_output': ex.error,", "current is not None: for line in current['error_output']: logging.info(line.rstrip()) @contextlib.contextmanager", "# 'c' or 'c++' } # iterate on the compile", "command_entry_point, compiler_wrapper, \\ wrapper_environment, run_build, run_command, CtuConfig from libscanbuild.arguments import", "'r') as handle: generator = (dict(cmd, **consts) for cmd in", "destination(): \"\"\" Creates failures directory if not exits yet. \"\"\"", "'scan-build-%Y-%m-%d-%H-%M-%S-%f-' stamp = datetime.datetime.now().strftime(stamp_format) parent_dir = os.path.abspath(hint) if not os.path.exists(parent_dir):", "os.path.abspath(hint) if not os.path.exists(parent_dir): os.makedirs(parent_dir) name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir) logging.info('Report", "get_triple_arch(cmd, cwd) ctu_options = ['ctu-dir=' + os.path.join(ctu_config.dir, triarch), 'experimental-enable-naive-ctu-analysis=true'] analyzer_options", "sequence where every second element is from the original sequence", "implementation. '-g': 0, '-save-temps': 0, '-install_name': 1, '-exported_symbols_list': 1, '-current_version':", "'clang': args.clang, 'output_dir': args.output, 'output_format': args.output_format, 'output_failures': args.output_failures, 'direct_args': analyzer_params(args),", "any of those is missing. \"\"\" def decorator(function): @functools.wraps(function) def", "\"\"\" Generates ASTs for the current compilation command. \"\"\" args", "and break the chain. The passed parameter is a python", "or analyze-c++ return CtuConfig(collect=ctu_config[0], analyze=ctu_config[1], dir=ctu_config[2], extdef_map_cmd=ctu_config[3]) def create_global_ctu_extdef_map(extdef_map_lines): \"\"\"", "import logging import multiprocessing import tempfile import functools import subprocess", "[] if args.store_model: result.append('-analyzer-store={0}'.format(args.store_model)) if args.constraints_model: result.append('-analyzer-constraints={0}'.format( args.constraints_model)) if args.internal_stats:", "opts.get('output_failures', False): opts.update(result) continuation(opts) return result def extdef_map_list_src_to_ast(extdef_src_list): \"\"\" Turns", "args.ctu_phases = CtuConfig(collect=True, analyze=False, dir='', extdef_map_cmd='') run_analyzer_parallel(args) merge_ctu_extdef_maps(ctu_config.dir) args.ctu_phases =", "= prefix_with('-Xanalyzer', analyzer_options) opts['direct_args'].extend(direct_options) return continuation(opts) @require(['flags', 'force_debug']) def filter_debug_flags(opts,", "= opts['directory'] cmd = get_arguments([opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags']", "datetime import shutil import glob from collections import defaultdict from", "database. This complex task is decomposed into smaller methods which", "setup_environment(args) exit_code = run_build(args.build, env=environment) # Cover report generation and", "It assembles the analysis command line and executes it. Capture", "import re import os import os.path import json import logging", "= get_arguments([opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] + [opts['file'], '-o',", "'-install_name': 1, '-exported_symbols_list': 1, '-current_version': 1, '-compatibility_version': 1, '-init': 1,", "ones # that we don't want to see. elif re.match(r'^-W.+',", "arch are given and are not # the same, those", "write general information about the crash with open(name + '.info.txt',", "command) opts.update(classify_parameters(command)) return arch_check(opts) except Exception: logging.error(\"Problem occurred during analysis.\",", "# ... or find out from source file extension if", "3, '--param': 1, '--serialize-diagnostics': 1 } def classify_parameters(command): \"\"\" Prepare", "list with ast files. \"\"\" extdef_ast_list = [] for extdef_src_txt", "'-save-temps': 0, '-install_name': 1, '-exported_symbols_list': 1, '-current_version': 1, '-compatibility_version': 1,", "analyzer through one of the given architectures. \"\"\" disabled =", "functools import subprocess import contextlib import datetime import shutil import", "in pieces for elem in [constant, piece]] def get_ctu_config_from_args(args): \"\"\"", "name] try: cmd = get_arguments(cmd, cwd) run_command(cmd, cwd=cwd) except subprocess.CalledProcessError:", "= os.path.join(\"ast\", path + \".ast\") extdef_ast_list.append(mangled_name + \" \" +", "terminated by a signal it's a 'Crash'. # (python subprocess", "from the static analyzer if current is not None: for", "write_global_map(arch, mangled_ast_pairs): \"\"\" Write (mangled name, ast file) pairs into", "clang executable name (and path) 'direct_args', # arguments from command", "range(count): next(args) # we don't care about extra warnings, but", "implements the 'scan-build' command API. To run the static analyzer", "pairs into final file. \"\"\" extern_defs_map_file = os.path.join(ctudir, arch, CTU_EXTDEF_MAP_FILENAME)", "dir=extern_defs_map_folder, delete=False) as out_file: out_file.write(\"\\n\".join(extdef_ast_list) + \"\\n\") cwd = opts['directory']", "generation and bug counting. number_of_bugs = document(args) # Set exit", "be only one arch given (or the same multiple #", "run_analyzer_parallel(args) if ctu_config.collect: merge_ctu_extdef_maps(ctu_config.dir) def setup_environment(args): \"\"\" Set up environment", "set up everything for the # wrappers, because 'configure' needs", "with open(name + '.info.txt', 'w') as handle: handle.write(opts['file'] + os.linesep)", "left so multiple analyze runs can use the same data", "names and the source (AST generated from the source) which", "called method.) \"\"\" try: command = opts.pop('command') command = command", "return opts['output_dir'] try: cwd = opts['directory'] cmd = get_arguments([opts['clang'], '--analyze']", "None: language = classify_source(opts['file'], compiler == 'c') if language is", "# If we do a CTU collect (1st phase) we", "display error message from the static analyzer for line in", "See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception", "odd elements are the prefix. eg.: prefix_with(0, [1,2,3]) creates [0,", "and classify_source(arg): pass # ignore some flags elif arg in", "# (python subprocess Popen.returncode is negative when child terminated #", "else CtuConfig(collect=False, analyze=False, dir='', extdef_map_cmd='')) def get_ctu_config_from_json(ctu_conf_json): \"\"\" CTU configuration", "language can be given as a parameter... language = opts.pop('language')", "decorator. It's like an 'assert' to check the contract between", "point to run (or not) static analyzer against a single", "# But that's the only pass we have before run", "{ '-c': 0, # compile option will be overwritten '-fsyntax-only':", "\"\"\" Filter out nondebug macros when requested. \"\"\" if opts.pop('force_debug'):", "Perl implementation.) (handle, name) = tempfile.mkstemp(suffix=extension(), prefix='clang_' + error +", "be necessary, when compiler wrappers are used. That's the moment", "import os import os.path import json import logging import multiprocessing", "executable name (and path) 'direct_args', # arguments from command line", "files = glob.glob(os.path.join(extdefmap_dir, '*')) files.sort() for filename in files: with", "Cover report generation and bug counting. number_of_bugs = document(args) #", "checking the required values in state. It checks the required", "next(iter(ast_files)))) return mangled_ast_pairs def merge_ctu_extdef_maps(ctudir): \"\"\" Merge individual external definition", "multiple runs in CTU mode or runs once in normal", "[], # list of architecture flags 'language': None, # compilation", "analyzer against the given compilation database. \"\"\" def exclude(filename, directory):", "if opts['exit_code'] < 0 else 'other_error' # Create preprocessor output", "'crash' if opts['exit_code'] < 0 else 'other_error' # Create preprocessor", "capture the compilation command during the build, -- Analyze: run", "compiler wrapper functionality. \"\"\" # don't run analyzer when compilation", "mangled_ast_pairs = create_global_ctu_extdef_map(extdef_map_lines) write_global_map(triple_arch, mangled_ast_pairs) # Remove all temporary files", "'-E'] + opts['flags'] + \\ [opts['file'], '-o', name] try: cmd", "result.extend(prefix_with('-load', args.plugins)) if args.enable_checker: checkers = ','.join(args.enable_checker) result.extend(['-analyzer-checker', checkers]) if", "the same multiple # times). If there are multiple arch", "run_analyzer(opts, continuation=report_failure): \"\"\" It assembles the analysis command line and", "the odd elements are the prefix. eg.: prefix_with(0, [1,2,3]) creates", "+ os.linesep) handle.write(' '.join(os.uname()) + os.linesep) handle.write(get_version(opts['clang'])) handle.close() # write", "continuation=filter_debug_flags): \"\"\" Find out the language from command line parameters", "creates a global map keeping only unique names. We leave", "the pre-processing step. # But that's the only pass we", "arg in args: # take arch flags into a separate", "only for a single phase data is # left so", "os.close(handle) return name elif opts['output_format'] == 'sarif': (handle, name) =", "defaultdict(set) for line in extdef_map_lines: mangled_name, ast_file = line.strip().split(' ',", "also captured into '.stderr.txt' file. And some more execution context", "language, 'flags': ['-x', language] + opts['flags']}) return continuation(opts) @require(['arch_list', 'flags'])", "capture from libscanbuild.report import document from libscanbuild.compilation import split_command, classify_source,", "from compilation database 'clang', # clang executable name (and path)", "saved into '.info.txt' file. \"\"\" def extension(): \"\"\" Generate preprocessor", "chosen phases and dir. \"\"\" ctu_config = json.loads(ctu_conf_json) # Recover", "only pass we have before run the analyzer. current =", "process already created it. pass if extdef_ast_list: with tempfile.NamedTemporaryFile(mode='w', dir=extern_defs_map_folder,", "\"\"\" Generate external definition map file for the current source.", "if args.maxloop: result.extend(['-analyzer-max-loop', str(args.maxloop)]) if args.output_format: result.append('-analyzer-output={0}'.format(args.output_format)) if args.analyzer_config: result.extend(['-analyzer-config',", "arg == '-x': result['language'] = next(args) # parameters which looks", "cmd in json.load(handle) if not exclude( cmd['file'], cmd['directory'])) # when", "# generate crash reports or not 'ctu']) # ctu control", "'-fsyntax-only': 0, # static analyzer option will be overwritten '-o':", "mangled_name, ast_file = line.strip().split(' ', 1) mangled_to_asts[mangled_name].add(ast_file) mangled_ast_pairs = []", "= datetime.datetime.now().strftime(stamp_format) parent_dir = os.path.abspath(hint) if not os.path.exists(parent_dir): os.makedirs(parent_dir) name", "generate crash reports or not 'ctu']) # ctu control options", "'%s'\", ast_command) run_command(ast_command, cwd=opts['directory']) def map_extdefs(triple_arch): \"\"\" Generate external definition", "\"\"\" Find out the language from command line parameters or", "first check that the needed parameters received. (This is done", "execution): \"\"\" Implements analyzer compiler wrapper functionality. \"\"\" # don't", "command and analyzer with compiler wrappers. environment = setup_environment(args) exit_code", "name finally: if os.listdir(name): if output_format != 'sarif': # 'scan-view'", "filename. \"\"\" if not os.path.isabs(filename): # filename is either absolute", "out_file: out_file.write(\"\\n\".join(extdef_ast_list) + \"\\n\") cwd = opts['directory'] cmd = [opts['clang'],", "\"\"\" def target(): \"\"\" Creates output file name for reports.", "-- could specify the parent directory of the output directory.", "get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU')) } # call static analyzer against the compilation for", "args.output, args.keep_empty, args.output_format) as args.output: # Run against a build", "file name extension. The decision also influenced by the compiler", "and capture the location for the build process. \"\"\" return", "result.append('-analyzer-constraints={0}'.format( args.constraints_model)) if args.internal_stats: result.append('-analyzer-stats') if args.analyze_headers: result.append('-analyzer-opt-analyze-headers') if args.stats:", "for elem in [constant, piece]] def get_ctu_config_from_args(args): \"\"\" CTU configuration", "to {1}'.format( key, function.__name__)) return function(*args, **kwargs) return wrapper return", "= True else: if keep: msg = \"Report directory '%s'", "CTU_EXTDEF_MAP_FILENAME = 'externalDefMap.txt' CTU_TEMP_DEFMAP_FOLDER = 'tmpExternalDefMaps' @command_entry_point def scan_build(): \"\"\"", "= os.path.dirname(ast_path) if not os.path.isdir(ast_dir): try: os.makedirs(ast_dir) except OSError: #", "arch given (or the same multiple # times). If there", "# collection run. if ctu_config.collect and ctu_config.analyze: # CTU strings", "final file. \"\"\" extern_defs_map_file = os.path.join(ctudir, arch, CTU_EXTDEF_MAP_FILENAME) with open(extern_defs_map_file,", "'*')) for triple_path in triple_arches: if os.path.isdir(triple_path): triple_arch = os.path.basename(triple_path)", "phase, we do an # all-in-one run where we deliberately", "A group of command line arguments can mapped to command", "when any of those is missing. \"\"\" def decorator(function): @functools.wraps(function)", "map using '%s'\", extdefmap_command) extdef_src_list = run_command(extdefmap_command, cwd=opts['directory']) extdef_ast_list =", "target()], cwd) output = run_command(cmd, cwd=cwd) return {'error_output': output, 'exit_code':", "phases of CTU if needed. \"\"\" ctu_config = opts['ctu'] if", "1, '-compatibility_version': 1, '-init': 1, '-e': 1, '-seg1addr': 1, '-bundle_loader':", "exits yet. \"\"\" failures_dir = os.path.join(opts['output_dir'], 'failures') if not os.path.isdir(failures_dir):", "'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '', 'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)), 'ANALYZE_BUILD_FORCE_DEBUG':", "created it. pass ast_command = [opts['clang'], '-emit-ast'] ast_command.extend(args) ast_command.append('-w') ast_command.append(opts['file'])", "path = os.path.splitdrive(path)[1] # Make relative path out of absolute", "maps contain the mangled names and the source (AST generated", "else decode(command) logging.debug(\"Run analyzer against '%s'\", command) opts.update(classify_parameters(command)) return arch_check(opts)", "@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir', 'output_format']) def run_analyzer(opts, continuation=report_failure):", "perl implementation. '-g': 0, '-save-temps': 0, '-install_name': 1, '-exported_symbols_list': 1,", "control options def run(opts): \"\"\" Entry point to run (or", "determined order. \"\"\" files = glob.glob(os.path.join(extdefmap_dir, '*')) files.sort() for filename", "we don't care about extra warnings, but we should suppress", "'-init': 1, '-e': 1, '-seg1addr': 1, '-bundle_loader': 1, '-multiply_defined': 1,", "methods which are calling each other in chain. If the", "`analyze-cc` and `analyze-c++` compiler wrappers. \"\"\" return compiler_wrapper(analyze_compiler_wrapper_impl) def analyze_compiler_wrapper_impl(result,", "from libscanbuild.arguments import parse_args_for_scan_build, \\ parse_args_for_analyze_build from libscanbuild.intercept import capture", "silent and no need to run the analyzer or generate", "'ctu_phases') and hasattr(args.ctu_phases, 'dir') else CtuConfig(collect=False, analyze=False, dir='', extdef_map_cmd='')) def", "in in_file: yield line def write_global_map(arch, mangled_ast_pairs): \"\"\" Write (mangled", "reports. \"\"\" if opts['output_format'] in { 'plist', 'plist-html', 'plist-multi-file'}: (handle,", "in accepted: logging.debug('skip analysis, language not supported') return None else:", "def merge_ctu_extdef_maps(ctudir): \"\"\" Merge individual external definition maps into a", "report generation and bug counting. number_of_bugs = document(args) # Set", "= json.loads(ctu_conf_json) # Recover namedtuple from json when coming from", "'-exported_symbols_list': 1, '-current_version': 1, '-compatibility_version': 1, '-init': 1, '-e': 1,", "the language from command line parameters or file name extension.", "dir=ctu_config[2], extdef_map_cmd=ctu_config[3]) def create_global_ctu_extdef_map(extdef_map_lines): \"\"\" Takes iterator of individual external", "compile option will be overwritten '-fsyntax-only': 0, # static analyzer", "compilation fails. or when it's not requested. if result or", "(mangled_name, ast_file)) triple_arches = glob.glob(os.path.join(ctudir, '*')) for triple_path in triple_arches:", "be given as a parameter... language = opts.pop('language') compiler =", "given and are not # the same, those should not", "analyzer against the captured commands. if need_analyzer(args.build): govern_analyzer_runs(args) else: #", "analysis, language not known') return None elif language not in", "# Run the analyzer against a compilation db. govern_analyzer_runs(args) #", "split_command(execution.cmd) if compilation is None: return # collect the needed", "for extdef_src_txt in extdef_src_list: mangled_name, path = extdef_src_txt.split(\" \", 1)", "analyze=args.ctu_phases.analyze, dir=args.ctu_dir, extdef_map_cmd=args.extdef_map_cmd) if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir') else", "default arch') return continuation(opts) # To have good results from", "\\ compiler_language from libscanbuild.clang import get_version, get_arguments, get_triple_arch, \\ ClangErrorException", "not specified 'compiler': compiler_language(command) # 'c' or 'c++' } #", "# Part of the LLVM Project, under the Apache License", "from command line parameters or file name extension. The decision", "Normalize path on windows as well path = os.path.splitdrive(path)[1] #", "on windows as well path = os.path.splitdrive(path)[1] # Make relative", "map file: CTU_EXTDEF_MAP_FILENAME.\"\"\" def generate_extdef_map_lines(extdefmap_dir): \"\"\" Iterate over all lines", "for checking the required values in state. It checks the", "isinstance(command, list) else decode(command) logging.debug(\"Run analyzer against '%s'\", command) opts.update(classify_parameters(command))", "os.linesep) handle.write(get_version(opts['clang'])) handle.close() # write the captured output too with", "(1st phase) we remove all previous collection # data first.", "the compilation command during the build, -- Analyze: run the", "compilation database 'clang', # clang executable name (and path) 'direct_args',", "= parse_args_for_analyze_build() # will re-assign the report directory as new", "output_format != 'sarif': # 'scan-view' currently does not support sarif", "[execution.cmd[0], '-c'] + compilation.flags, 'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU')) } # call static", "handle.write(error.title().replace('_', ' ') + os.linesep) handle.write(' '.join(cmd) + os.linesep) handle.write('", "%s', name) try: yield name finally: if os.listdir(name): if output_format", "try: cmd = get_arguments(cmd, cwd) run_command(cmd, cwd=cwd) except subprocess.CalledProcessError: pass", "not requested. if result or not os.getenv('ANALYZE_BUILD_CLANG'): return # check", "those. \"\"\" result = [] if args.store_model: result.append('-analyzer-store={0}'.format(args.store_model)) if args.constraints_model:", "on the compile options args = iter(command[1:]) for arg in", "method first check that the needed parameters received. (This is", "document from libscanbuild.compilation import split_command, classify_source, \\ compiler_language from libscanbuild.clang", "to run the analyzer or generate report. To run `scan-build`", "# # Keys are the option name, value number of", "In case an other process already created it. pass ast_command", "the required values in state. It checks the required attributes", "ast_command = [opts['clang'], '-emit-ast'] ast_command.extend(args) ast_command.append('-w') ast_command.append(opts['file']) ast_command.append('-o') ast_command.append(ast_path) logging.debug(\"Generating", "os.path.join(opts['ctu'].dir, triple_arch, CTU_TEMP_DEFMAP_FOLDER) if not os.path.isdir(extern_defs_map_folder): try: os.makedirs(extern_defs_map_folder) except OSError:", "signal it's a 'Crash'. # (python subprocess Popen.returncode is negative", "ast_command.append('-o') ast_command.append(ast_path) logging.debug(\"Generating AST using '%s'\", ast_command) run_command(ast_command, cwd=opts['directory']) def", "'yes' if args.output_failures else '', 'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)), 'ANALYZE_BUILD_FORCE_DEBUG': 'yes'", "cmd = [opts['clang'], '--analyze'] + opts['direct_args'] \\ + opts['flags'] +", "+ opts['flags']}) return continuation(opts) else: logging.debug('skip analysis, found not supported", "build command to interpose compiler wrapper. \"\"\" environment = dict(os.environ)", "the build command. When static analyzer run against project configure", "run(parameters) # display error message from the static analyzer if", "parameters.update({'file': source}) logging.debug('analyzer parameters %s', parameters) current = run(parameters) #", "Creates failures directory if not exits yet. \"\"\" failures_dir =", "was requested. return number_of_bugs if args.status_bugs else 0 def need_analyzer(args):", "hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir') else CtuConfig(collect=False, analyze=False, dir='', extdef_map_cmd=''))", "captured into '.stderr.txt' file. And some more execution context also", "= extdef_map_list_src_to_ast(extdef_src_list) extern_defs_map_folder = os.path.join(opts['ctu'].dir, triple_arch, CTU_TEMP_DEFMAP_FOLDER) if not os.path.isdir(extern_defs_map_folder):", "case an other process already created it. pass if extdef_ast_list:", "ctu_config = json.loads(ctu_conf_json) # Recover namedtuple from json when coming", "namedtuple from json when coming from analyze-cc or analyze-c++ return", "# Run build command with intercept module. exit_code = capture(args)", "it. pass if extdef_ast_list: with tempfile.NamedTemporaryFile(mode='w', dir=extern_defs_map_folder, delete=False) as out_file:", "if current is not None: for line in current['error_output']: logging.info(line.rstrip())", "the moment when build setup check the compiler and capture", "output, 'exit_code': 0} except subprocess.CalledProcessError as ex: result = {'error_output':", "ctu_config.collect or ctu_config.analyze: assert ctu_config.collect != ctu_config.analyze if ctu_config.collect: return", "only unique names. We leave conflicting names out of CTU.", "opts['flags']}) return continuation(opts) else: logging.debug('skip analysis, found not supported arch')", "'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output', 'c++-cpp-output', 'objective-c-cpp-output' }) # language", "processing. \"\"\" result = { 'flags': [], # the filtered", "given compilation database. \"\"\" def exclude(filename, directory): \"\"\" Return true", "pass except ClangErrorException: pass # write general information about the", "take out language (-x) and architecture (-arch) flags for future", "# display error message from the static analyzer if current", "external definition maps and creates a global map keeping only", "file in CTU_TEMP_DEFMAP_FOLDER. These definition maps contain the mangled names", "env=environment) # Cover report generation and bug counting. number_of_bugs =", "'-e': 1, '-seg1addr': 1, '-bundle_loader': 1, '-multiply_defined': 1, '-sectorder': 3,", "'flags']) def arch_check(opts, continuation=language_check): \"\"\" Do run analyzer through one", "_ in range(count): next(args) # we don't care about extra", "3] \"\"\" return [elem for piece in pieces for elem", "decode __all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper'] COMPILER_WRAPPER_CC = 'analyze-cc' COMPILER_WRAPPER_CXX", "into '.stderr.txt' file. And some more execution context also saved", "elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg): pass # and", "\"\"\" Return true when any excluded directory prefix the filename.", "(handle, name) = tempfile.mkstemp(prefix='result-', suffix='.sarif', dir=opts['output_dir']) os.close(handle) return name return", "handle.close() @require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir', 'output_format']) def run_analyzer(opts,", "or not os.getenv('ANALYZE_BUILD_CLANG'): return # check is it a compilation?", "# static analyzer option will be overwritten '-o': 1, #", "file. \"\"\" extern_defs_map_file = os.path.join(ctudir, arch, CTU_EXTDEF_MAP_FILENAME) with open(extern_defs_map_file, 'w')", "overwritten '-o': 1, # will set up own output file", "the option name, value number of options to skip IGNORED_FLAGS", "against the configure step might be necessary, when compiler wrappers", "inherited from the perl implementation. '-g': 0, '-save-temps': 0, '-install_name':", "build command with intercept module. exit_code = capture(args) # Run", "assembles the analysis command line and executes it. Capture the", "== 'sarif': (handle, name) = tempfile.mkstemp(prefix='result-', suffix='.sarif', dir=opts['output_dir']) os.close(handle) return", "care about extra warnings, but we should suppress ones #", "else 'other_error' # Create preprocessor output file name. (This is", "each other in chain. If the analysis is not possible", "# it to absolute since 'args.excludes' are absolute paths. filename", "report directory as new output with report_directory(args.output, args.keep_empty, args.output_format) as", "\"\"\" Implements analyzer compiler wrapper functionality. \"\"\" # don't run", "received_list if a not in disabled] if filtered_list: # There", "for cmd in json.load(handle) if not exclude( cmd['file'], cmd['directory'])) #", "as it was requested. return number_of_bugs if args.status_bugs else 0", "else: msg = \"View result at %s/results-merged.sarif.\" keep = True", "a separate basket if arg == '-arch': result['arch_list'].append(next(args)) # take", "verbose output requested execute sequentially pool = multiprocessing.Pool(1 if args.verbose", "should be only one arch given (or the same multiple", "absolute or relative to directory. Need to turn # it", "preprocessor file extension. \"\"\" mapping = {'objective-c++': '.mii', 'objective-c': '.mi',", "triarch), 'experimental-enable-naive-ctu-analysis=true'] analyzer_options = prefix_with('-analyzer-config', ctu_options) direct_options = prefix_with('-Xanalyzer', analyzer_options)", "'%s'\", extdefmap_command) extdef_src_list = run_command(extdefmap_command, cwd=opts['directory']) extdef_ast_list = extdef_map_list_src_to_ast(extdef_src_list) extern_defs_map_folder", "ast_dir = os.path.dirname(ast_path) if not os.path.isdir(ast_dir): try: os.makedirs(ast_dir) except OSError:", "break the chain. The passed parameter is a python dictionary.", "{'error_output': output, 'exit_code': 0} except subprocess.CalledProcessError as ex: result =", "'plist', 'html', 'plist-html', 'plist-multi-file', or 'sarif' 'output_failures', # generate crash", "the captured commands, -- Report: create a cover report from", "from command line 'force_debug', # kill non debug macros 'output_dir',", "import os.path import json import logging import multiprocessing import tempfile", "check is it a compilation? compilation = split_command(execution.cmd) if compilation", "if args.output_format: result.append('-analyzer-output={0}'.format(args.output_format)) if args.analyzer_config: result.extend(['-analyzer-config', args.analyzer_config]) if args.verbose >=", "run where we deliberately remove collection data before and #", "file are not flags elif re.match(r'^[^-].+', arg) and classify_source(arg): pass", "# collect the needed parameters from environment, crash when missing", "prefix_with('-Xclang', result) def require(required): \"\"\" Decorator for checking the required", "should be silent and no need to run the analyzer", "not supported arch') return None else: logging.debug('analysis, on default arch')", "= os.path.normpath(os.path.join(directory, filename)) return any(re.match(r'^' + exclude_directory, filename) for exclude_directory", "had their definition. These files should be merged at the", "it. \"\"\" def target(): \"\"\" Creates output file name for", "commands. if need_analyzer(args.build): govern_analyzer_runs(args) else: # Run build command and", "capture(args) # Run the analyzer against the captured commands. if", "Need to turn # it to absolute since 'args.excludes' are", "new output with report_directory( args.output, args.keep_empty, args.output_format) as args.output: #", "CTU configuration is created from the chosen phases and dir.", "= filtered_list.pop() logging.debug('analysis, on arch: %s', current) opts.update({'flags': ['-arch', current]", "1, '-sectorder': 3, '--param': 1, '--serialize-diagnostics': 1 } def classify_parameters(command):", "libscanbuild import command_entry_point, compiler_wrapper, \\ wrapper_environment, run_build, run_command, CtuConfig from", "should suppress ones # that we don't want to see.", "those is missing. \"\"\" def decorator(function): @functools.wraps(function) def wrapper(*args, **kwargs):", "are used. That's the moment when build setup check the", "triple_arch, 'ast', os.path.realpath(opts['file'])[1:] + '.ast') ast_path = os.path.abspath(ast_joined_path) ast_dir =", "# lazy implementation just append an undefine macro at the", "chain. If the analysis is not possible the given method", "(mangled name) and the originating source (the corresponding AST file)", "filename)) return any(re.match(r'^' + exclude_directory, filename) for exclude_directory in args.excludes)", "up environment for build command to interpose compiler wrapper. \"\"\"", "os.path.exists(parent_dir): os.makedirs(parent_dir) name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir) logging.info('Report directory created: %s',", "os.getenv('ANALYZE_BUILD_REPORT_DIR'), 'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'), 'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'), 'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS', '').split(' '), 'force_debug':", "get_ctu_config_from_json(ctu_conf_json): \"\"\" CTU configuration is created from the chosen phases", "generate_ast(triple_arch) map_extdefs(triple_arch) @require(['ctu']) def dispatch_ctu(opts, continuation=run_analyzer): \"\"\" Execute only one", "if not os.path.isdir(failures_dir): os.makedirs(failures_dir) return failures_dir # Classify error type:", "'-g': 0, '-save-temps': 0, '-install_name': 1, '-exported_symbols_list': 1, '-current_version': 1,", "\"\"\" extern_defs_map_file = os.path.join(ctudir, arch, CTU_EXTDEF_MAP_FILENAME) with open(extern_defs_map_file, 'w') as", "the end into a global map file: CTU_EXTDEF_MAP_FILENAME.\"\"\" def generate_extdef_map_lines(extdefmap_dir):", "[opts['file'], '-o', name] try: cmd = get_arguments(cmd, cwd) run_command(cmd, cwd=cwd)", "some flags elif arg in IGNORED_FLAGS: count = IGNORED_FLAGS[arg] for", "mapped into a temporary file in CTU_TEMP_DEFMAP_FOLDER. These definition maps", "format. msg = \"Run 'scan-view %s' to examine bug reports.\"", "return mangled_ast_pairs def merge_ctu_extdef_maps(ctudir): \"\"\" Merge individual external definition maps", "line in extdef_map_lines: mangled_name, ast_file = line.strip().split(' ', 1) mangled_to_asts[mangled_name].add(ast_file)", "[], # the filtered compiler flags 'arch_list': [], # list", "= \"Run 'scan-view %s' to examine bug reports.\" else: msg", "lines of input files in a determined order. \"\"\" files", "1, '-seg1addr': 1, '-bundle_loader': 1, '-multiply_defined': 1, '-sectorder': 3, '--param':", "definition. These files should be merged at the end into", "names. We leave conflicting names out of CTU. :param extdef_map_lines:", "name (and path) 'direct_args', # arguments from command line 'force_debug',", "continuation(opts) return result except ClangErrorException as ex: result = {'error_output':", "(filters some and add others) and take out language (-x)", "not 'ctu']) # ctu control options def run(opts): \"\"\" Entry", "number_of_bugs = document(args) # Set exit status as it was", "as a parameter... language = opts.pop('language') compiler = opts.pop('compiler') #", "one of the given architectures. \"\"\" disabled = frozenset({'ppc', 'ppc64'})", "data first. if ctu_config.collect: shutil.rmtree(ctu_config.dir, ignore_errors=True) # If the user", "the 'scan-build' command API. To run the static analyzer against", "like an 'assert' to check the contract between the caller", "dir=destination()) os.close(handle) # Execute Clang again, but run the syntax", "# CTU strings are coming from args.ctu_dir and extdef_map_cmd, #", "into a temporary file in CTU_TEMP_DEFMAP_FOLDER. These definition maps contain", "name, ast file) pairs into final file. \"\"\" extern_defs_map_file =", "'exit_code': ex.returncode} if opts.get('output_failures', False): opts.update(result) continuation(opts) return result except", "return None elif language not in accepted: logging.debug('skip analysis, language", "environment.update({ 'CC': COMPILER_WRAPPER_CC, 'CXX': COMPILER_WRAPPER_CXX, 'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else", "of str. :returns: Mangled name - AST file pairs. :rtype:", "absolute path = path[1:] if path[0] == os.sep else path", "Report: create a cover report from the analyzer outputs. \"\"\"", "creates [0, 1, 0, 2, 0, 3] \"\"\" return [elem", "not exits yet. \"\"\" failures_dir = os.path.join(opts['output_dir'], 'failures') if not", "split_command, classify_source, \\ compiler_language from libscanbuild.clang import get_version, get_arguments, get_triple_arch,", "ctu_collect_phase(opts): \"\"\" Preprocess source by generating all data needed by", "are cases, when analyzer run # is not required. But", "stop when any of those is missing. \"\"\" def decorator(function):", "opts['flags']}) return continuation(opts) @require(['arch_list', 'flags']) def arch_check(opts, continuation=language_check): \"\"\" Do", "pairs. :rtype: List of (str, str) tuples. \"\"\" mangled_to_asts =", "certain compiler options shall be # omitted. The compiler flag", "directory. \"\"\" stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-' stamp = datetime.datetime.now().strftime(stamp_format) parent_dir =", "compiler flags (filters some and add others) and take out", "kill non debug macros 'output_dir', # where generated report files", "If the user asked for a collect (1st) and analyze", "shutil import glob from collections import defaultdict from libscanbuild import", "result.extend(['-analyzer-max-loop', str(args.maxloop)]) if args.output_format: result.append('-analyzer-output={0}'.format(args.output_format)) if args.analyzer_config: result.extend(['-analyzer-config', args.analyzer_config]) if", "get_arguments, get_triple_arch, \\ ClangErrorException from libscanbuild.shell import decode __all__ =", "compiler_wrapper, \\ wrapper_environment, run_build, run_command, CtuConfig from libscanbuild.arguments import parse_args_for_scan_build,", "current compilation command. \"\"\" args = opts['direct_args'] + opts['flags'] ast_joined_path", "logging.error(\"Problem occurred during analysis.\", exc_info=1) return None @require(['clang', 'directory', 'flags',", "= dict(os.environ) environment.update(wrapper_environment(args)) environment.update({ 'CC': COMPILER_WRAPPER_CC, 'CXX': COMPILER_WRAPPER_CXX, 'ANALYZE_BUILD_CLANG': args.clang", "run analyzer when compilation fails. or when it's not requested.", "file: CTU_EXTDEF_MAP_FILENAME.\"\"\" def generate_extdef_map_lines(extdefmap_dir): \"\"\" Iterate over all lines of", "source) which had their definition. These files should be merged", "[opts['file']] triple_arch = get_triple_arch(cmd, cwd) generate_ast(triple_arch) map_extdefs(triple_arch) @require(['ctu']) def dispatch_ctu(opts,", "mangled_ast_pairs = [] for mangled_name, ast_files in mangled_to_asts.items(): if len(ast_files)", "a global map keeping only unique names. We leave conflicting", "textual external definition map list with source files into an", "mangled_ast_pairs) # Remove all temporary files shutil.rmtree(extdefmap_dir, ignore_errors=True) def run_analyzer_parallel(args):", "the static analyzer if current is not None: for line", "\" + ast_path) return extdef_ast_list @require(['clang', 'directory', 'flags', 'direct_args', 'file',", "omitted. The compiler flag filtering only affects the static analyzer", "the analysis and returns with it. If failure reports are", "not None: for line in current['error_output']: logging.info(line.rstrip()) @contextlib.contextmanager def report_directory(hint,", "in state. It checks the required attributes in the passed", "os.makedirs(parent_dir) name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir) logging.info('Report directory created: %s', name)", "'-o', target()], cwd) output = run_command(cmd, cwd=cwd) return {'error_output': output,", "opts.update({'flags': ['-arch', current] + opts['flags']}) return continuation(opts) else: logging.debug('skip analysis,", "continuation(opts) else: logging.debug('skip analysis, found not supported arch') return None", "args.keep_empty, args.output_format) as args.output: # Run the analyzer against a", "else: # Run build command and analyzer with compiler wrappers.", "analyzer. This method generates those. \"\"\" result = [] if", "dir=args.ctu_dir, extdef_map_cmd=args.extdef_map_cmd) if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir') else CtuConfig(collect=False,", "can leave it empty args.ctu_phases = CtuConfig(collect=True, analyze=False, dir='', extdef_map_cmd='')", "opts['output_format'] in { 'plist', 'plist-html', 'plist-multi-file'}: (handle, name) = tempfile.mkstemp(prefix='report-',", "os.linesep) handle.write(' '.join(cmd) + os.linesep) handle.write(' '.join(os.uname()) + os.linesep) handle.write(get_version(opts['clang']))", "-*- coding: utf-8 -*- # Part of the LLVM Project,", "# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception \"\"\" This module implements the", "static analyzer against a single entry of the compilation database.", "os.path.isabs(filename): # filename is either absolute or relative to directory.", "source in compilation.files: parameters.update({'file': source}) logging.debug('analyzer parameters %s', parameters) current", "file extension. \"\"\" mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++':", "as args.output: # Run against a build command. there are", "\"\"\" args = opts['direct_args'] + opts['flags'] ast_joined_path = os.path.join(opts['ctu'].dir, triple_arch,", "0, 3] \"\"\" return [elem for piece in pieces for", "'-seg1addr': 1, '-bundle_loader': 1, '-multiply_defined': 1, '-sectorder': 3, '--param': 1,", "', 1) mangled_to_asts[mangled_name].add(ast_file) mangled_ast_pairs = [] for mangled_name, ast_files in", "runs once in normal mode. \"\"\" ctu_config = get_ctu_config_from_args(args) #", "other process already created it. pass if extdef_ast_list: with tempfile.NamedTemporaryFile(mode='w',", "require(required): \"\"\" Decorator for checking the required values in state.", "dictionary. Each method first check that the needed parameters received.", "the crash with open(name + '.info.txt', 'w') as handle: handle.write(opts['file']", "the filtered compiler flags 'arch_list': [], # list of architecture", "def run_analyzer(opts, continuation=report_failure): \"\"\" It assembles the analysis command line", "mangled_ast_pairs: out_file.write('%s %s\\n' % (mangled_name, ast_file)) triple_arches = glob.glob(os.path.join(ctudir, '*'))", "Classify error type: when Clang terminated by a signal it's", "else exit_code @command_entry_point def analyze_build(): \"\"\" Entry point for analyze-build", "def generate_ast(triple_arch): \"\"\" Generates ASTs for the current compilation command.", "normal mode. \"\"\" ctu_config = get_ctu_config_from_args(args) # If we do", "and hasattr(args.ctu_phases, 'dir') else CtuConfig(collect=False, analyze=False, dir='', extdef_map_cmd='')) def get_ctu_config_from_json(ctu_conf_json):", "= [] for mangled_name, ast_files in mangled_to_asts.items(): if len(ast_files) ==", "Entry point for scan-build command. \"\"\" args = parse_args_for_scan_build() #", "not os.path.isabs(filename): # filename is either absolute or relative to", "filter out disabled architectures and -arch switches filtered_list = [a", "= split_command(execution.cmd) if compilation is None: return # collect the", "set up own output file # flags below are inherited", "analyze=ctu_config[1], dir=ctu_config[2], extdef_map_cmd=ctu_config[3]) def create_global_ctu_extdef_map(extdef_map_lines): \"\"\" Takes iterator of individual", "= ['scan_build', 'analyze_build', 'analyze_compiler_wrapper'] COMPILER_WRAPPER_CC = 'analyze-cc' COMPILER_WRAPPER_CXX = 'analyze-c++'", "if language is None: logging.debug('skip analysis, language not known') return", "to skip IGNORED_FLAGS = { '-c': 0, # compile option", "required: if key not in args[0]: raise KeyError('{0} not passed", "method.) \"\"\" try: command = opts.pop('command') command = command if", "the needed parameters from environment, crash when missing parameters =", "done by the 'require' decorator. It's like an 'assert' to", "is not required. But we need to set up everything", "logging.debug(\"Generating external definition map using '%s'\", extdefmap_command) extdef_src_list = run_command(extdefmap_command,", "# write the captured output too with open(name + '.stderr.txt',", "counting. number_of_bugs = document(args) # Set exit status as it", "the compile options args = iter(command[1:]) for arg in args:", "compilation database. This complex task is decomposed into smaller methods", "extdef_map_cmd, # so we can leave it empty args.ctu_phases =", "# will re-assign the report directory as new output with", "missing parameters = { 'clang': os.getenv('ANALYZE_BUILD_CLANG'), 'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'), 'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'),", "current = run(parameters) # display error message from the static", "cover report from the analyzer outputs. \"\"\" import re import", "the location for the build process. \"\"\" return len(args) and", "os.linesep) handle.write(' '.join(os.uname()) + os.linesep) handle.write(get_version(opts['clang'])) handle.close() # write the", "name return opts['output_dir'] try: cwd = opts['directory'] cmd = get_arguments([opts['clang'],", "collections import defaultdict from libscanbuild import command_entry_point, compiler_wrapper, \\ wrapper_environment,", "tempfile.NamedTemporaryFile(mode='w', dir=extern_defs_map_folder, delete=False) as out_file: out_file.write(\"\\n\".join(extdef_ast_list) + \"\\n\") cwd =", "'analyze-cc' COMPILER_WRAPPER_CXX = 'analyze-c++' CTU_EXTDEF_MAP_FILENAME = 'externalDefMap.txt' CTU_TEMP_DEFMAP_FOLDER = 'tmpExternalDefMaps'", "user asked for a collect (1st) and analyze (2nd) phase,", "frozenset({ 'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output', 'c++-cpp-output', 'objective-c-cpp-output' }) #", "else: logging.debug('skip analysis, found not supported arch') return None else:", "re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg): pass # and consider", "= document(args) # Set exit status as it was requested.", "needed parameters from environment, crash when missing parameters = {", "when any excluded directory prefix the filename. \"\"\" if not", "the analysis command line and executes it. Capture the output", "'r') as in_file: for line in in_file: yield line def", "= CtuConfig(collect=True, analyze=False, dir='', extdef_map_cmd='') run_analyzer_parallel(args) merge_ctu_extdef_maps(ctu_config.dir) args.ctu_phases = CtuConfig(collect=False,", "error = 'crash' if opts['exit_code'] < 0 else 'other_error' #", "# compile option will be overwritten '-fsyntax-only': 0, # static", "'compiler': compiler_language(command) # 'c' or 'c++' } # iterate on", "get_ctu_config_from_args(args): \"\"\" CTU configuration is created from the chosen phases", "Filter out nondebug macros when requested. \"\"\" if opts.pop('force_debug'): #", "decomposed into smaller methods which are calling each other in", "Makefile. if args.intercept_first: # Run build command with intercept module.", "= (dict(cmd, **consts) for cmd in json.load(handle) if not exclude(", "if args.force_debug else '', 'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args)) }) return environment @command_entry_point", "'exit_code': 0} except subprocess.CalledProcessError as ex: result = {'error_output': ex.output,", "shutil.rmtree(ctu_config.dir, ignore_errors=True) else: # Single runs (collect or analyze) are", "is missing. \"\"\" def decorator(function): @functools.wraps(function) def wrapper(*args, **kwargs): for", "suppress ones # that we don't want to see. elif", "CtuConfig(collect=args.ctu_phases.collect, analyze=args.ctu_phases.analyze, dir=args.ctu_dir, extdef_map_cmd=args.extdef_map_cmd) if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir')", "exc_info=1) return None @require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language', 'error_output',", "in range(count): next(args) # we don't care about extra warnings,", "# all-in-one run where we deliberately remove collection data before", "wrapper(*args, **kwargs): for key in required: if key not in", "decision also influenced by the compiler invocation. \"\"\" accepted =", "[a for a in received_list if a not in disabled]", "Entry point to run (or not) static analyzer against a", "with open(name + '.stderr.txt', 'w') as handle: handle.writelines(opts['error_output']) handle.close() @require(['clang',", "pool = multiprocessing.Pool(1 if args.verbose > 2 else None) for", "run against project configure step, it should be silent and", "'file', 'flags']) def language_check(opts, continuation=filter_debug_flags): \"\"\" Find out the language", "will be overwritten '-o': 1, # will set up own", "macros when requested. \"\"\" if opts.pop('force_debug'): # lazy implementation just", "caller and the called method.) \"\"\" try: command = opts.pop('command')", "[opts['ctu'].extdef_map_cmd] extdefmap_command.append(opts['file']) extdefmap_command.append('--') extdefmap_command.extend(args) logging.debug(\"Generating external definition map using '%s'\",", "logging.debug(\"Run analyzer against '%s'\", command) opts.update(classify_parameters(command)) return arch_check(opts) except Exception:", "absolute since 'args.excludes' are absolute paths. filename = os.path.normpath(os.path.join(directory, filename))", "Set exit status as it was requested. return number_of_bugs if", "analyzer run against project configure step, it should be silent", "filename is either absolute or relative to directory. Need to", "continuation=dispatch_ctu): \"\"\" Filter out nondebug macros when requested. \"\"\" if", "database. \"\"\" def exclude(filename, directory): \"\"\" Return true when any", "individual external definition maps into a global one. As the", "static analyzer against a build is done in multiple steps:", "Intercept: capture the compilation command during the build, -- Analyze:" ]
[ "mid_left self.mid_split = mid_split self.mid_right = mid_right self.low_left = low_left", "u'\\u2524',\\ u'\\u2514', u'\\u2534', u'\\u2518',\\ u'\\u2500', u'\\u2502'), TableBorder(u'\\u2554', u'\\u2566', u'\\u2557',\\ u'\\u2560',", "pylint: disable=R0902 # pylint: disable=R0903 # pylint: disable=R0913 \"\"\" Définie", "'+',\\ '+', '+', '+',\\ '-', '|'), TableBorder(u'\\u250c', u'\\u252C', u'\\u2510',\\ u'\\u251C',", "# pylint: disable=R0913 \"\"\" Définie la classe TableBorder \"\"\" class", "disable=C0103 # pylint: disable=R0902 # pylint: disable=R0903 # pylint: disable=R0913", "BORDERS = [TableBorder('+', '+', '+',\\ '+', '+', '+',\\ '+', '+',", "'+', '+', '+',\\ '+', '+', '+',\\ '-', '|'), TableBorder(u'\\u250c', u'\\u252C',", "u'\\u2534', u'\\u2518',\\ u'\\u2500', u'\\u2502'), TableBorder(u'\\u2554', u'\\u2566', u'\\u2557',\\ u'\\u2560', u'\\u256C', u'\\u2563',\\", "\"\"\" def __init__(self, top_left, top_split, top_right, mid_left, mid_split, mid_right, low_left,", "TableBorder: \"\"\" Facillite l'usage de l'UNICODE \"\"\" def __init__(self, top_left,", "l'UNICODE \"\"\" def __init__(self, top_left, top_split, top_right, mid_left, mid_split, mid_right,", "disable=R0913 \"\"\" Définie la classe TableBorder \"\"\" class TableBorder: \"\"\"", "l'usage de l'UNICODE \"\"\" def __init__(self, top_left, top_split, top_right, mid_left,", "'+',\\ '-', '|'), TableBorder(u'\\u250c', u'\\u252C', u'\\u2510',\\ u'\\u251C', u'\\u253C', u'\\u2524',\\ u'\\u2514',", "pylint: disable=R0903 # pylint: disable=R0913 \"\"\" Définie la classe TableBorder", "disable=R0903 # pylint: disable=R0913 \"\"\" Définie la classe TableBorder \"\"\"", "# pylint: disable=C0103 # pylint: disable=R0902 # pylint: disable=R0903 #", "TableBorder(u'\\u2554', u'\\u2566', u'\\u2557',\\ u'\\u2560', u'\\u256C', u'\\u2563',\\ u'\\u255a', u'\\u2569', u'\\u255d',\\ u'\\u2550',", "de l'UNICODE \"\"\" def __init__(self, top_left, top_split, top_right, mid_left, mid_split,", "Constructeur \"\"\" self.top_left = top_left self.top_split = top_split self.top_right =", "self.vertical = vertical BORDERS = [TableBorder('+', '+', '+',\\ '+', '+',", "= mid_left self.mid_split = mid_split self.mid_right = mid_right self.low_left =", "mid_right self.low_left = low_left self.low_split = low_split self.low_right = low_right", "self.mid_left = mid_left self.mid_split = mid_split self.mid_right = mid_right self.low_left", "u'\\u2514', u'\\u2534', u'\\u2518',\\ u'\\u2500', u'\\u2502'), TableBorder(u'\\u2554', u'\\u2566', u'\\u2557',\\ u'\\u2560', u'\\u256C',", "u'\\u2510',\\ u'\\u251C', u'\\u253C', u'\\u2524',\\ u'\\u2514', u'\\u2534', u'\\u2518',\\ u'\\u2500', u'\\u2502'), TableBorder(u'\\u2554',", "pylint: disable=C0103 # pylint: disable=R0902 # pylint: disable=R0903 # pylint:", "self.low_right = low_right self.horizontal = horizontal self.vertical = vertical BORDERS", "horizontal self.vertical = vertical BORDERS = [TableBorder('+', '+', '+',\\ '+',", "self.top_split = top_split self.top_right = top_right self.mid_left = mid_left self.mid_split", "mid_split self.mid_right = mid_right self.low_left = low_left self.low_split = low_split", "mid_right, low_left, low_split, low_right, horizontal, vertical): \"\"\" Constructeur \"\"\" self.top_left", "'+', '+', '+',\\ '-', '|'), TableBorder(u'\\u250c', u'\\u252C', u'\\u2510',\\ u'\\u251C', u'\\u253C',", "u'\\u252C', u'\\u2510',\\ u'\\u251C', u'\\u253C', u'\\u2524',\\ u'\\u2514', u'\\u2534', u'\\u2518',\\ u'\\u2500', u'\\u2502'),", "mid_split, mid_right, low_left, low_split, low_right, horizontal, vertical): \"\"\" Constructeur \"\"\"", "= top_left self.top_split = top_split self.top_right = top_right self.mid_left =", "self.mid_split = mid_split self.mid_right = mid_right self.low_left = low_left self.low_split", "top_split self.top_right = top_right self.mid_left = mid_left self.mid_split = mid_split", "TableBorder \"\"\" class TableBorder: \"\"\" Facillite l'usage de l'UNICODE \"\"\"", "horizontal, vertical): \"\"\" Constructeur \"\"\" self.top_left = top_left self.top_split =", "TableBorder(u'\\u250c', u'\\u252C', u'\\u2510',\\ u'\\u251C', u'\\u253C', u'\\u2524',\\ u'\\u2514', u'\\u2534', u'\\u2518',\\ u'\\u2500',", "#!/usr/bin/env python3 # pylint: disable=C0103 # pylint: disable=R0902 # pylint:", "\"\"\" Facillite l'usage de l'UNICODE \"\"\" def __init__(self, top_left, top_split,", "disable=R0902 # pylint: disable=R0903 # pylint: disable=R0913 \"\"\" Définie la", "\"\"\" Constructeur \"\"\" self.top_left = top_left self.top_split = top_split self.top_right", "'+',\\ '+', '+', '+',\\ '+', '+', '+',\\ '-', '|'), TableBorder(u'\\u250c',", "self.low_split = low_split self.low_right = low_right self.horizontal = horizontal self.vertical", "u'\\u2518',\\ u'\\u2500', u'\\u2502'), TableBorder(u'\\u2554', u'\\u2566', u'\\u2557',\\ u'\\u2560', u'\\u256C', u'\\u2563',\\ u'\\u255a',", "'+', '+',\\ '+', '+', '+',\\ '+', '+', '+',\\ '-', '|'),", "= low_left self.low_split = low_split self.low_right = low_right self.horizontal =", "low_split, low_right, horizontal, vertical): \"\"\" Constructeur \"\"\" self.top_left = top_left", "'+', '+',\\ '-', '|'), TableBorder(u'\\u250c', u'\\u252C', u'\\u2510',\\ u'\\u251C', u'\\u253C', u'\\u2524',\\", "[TableBorder('+', '+', '+',\\ '+', '+', '+',\\ '+', '+', '+',\\ '-',", "Définie la classe TableBorder \"\"\" class TableBorder: \"\"\" Facillite l'usage", "low_left, low_split, low_right, horizontal, vertical): \"\"\" Constructeur \"\"\" self.top_left =", "\"\"\" class TableBorder: \"\"\" Facillite l'usage de l'UNICODE \"\"\" def", "= low_right self.horizontal = horizontal self.vertical = vertical BORDERS =", "u'\\u251C', u'\\u253C', u'\\u2524',\\ u'\\u2514', u'\\u2534', u'\\u2518',\\ u'\\u2500', u'\\u2502'), TableBorder(u'\\u2554', u'\\u2566',", "la classe TableBorder \"\"\" class TableBorder: \"\"\" Facillite l'usage de", "top_split, top_right, mid_left, mid_split, mid_right, low_left, low_split, low_right, horizontal, vertical):", "= top_split self.top_right = top_right self.mid_left = mid_left self.mid_split =", "\"\"\" Définie la classe TableBorder \"\"\" class TableBorder: \"\"\" Facillite", "self.horizontal = horizontal self.vertical = vertical BORDERS = [TableBorder('+', '+',", "top_right self.mid_left = mid_left self.mid_split = mid_split self.mid_right = mid_right", "u'\\u2557',\\ u'\\u2560', u'\\u256C', u'\\u2563',\\ u'\\u255a', u'\\u2569', u'\\u255d',\\ u'\\u2550', u'\\u2551') ]", "u'\\u2566', u'\\u2557',\\ u'\\u2560', u'\\u256C', u'\\u2563',\\ u'\\u255a', u'\\u2569', u'\\u255d',\\ u'\\u2550', u'\\u2551')", "<gh_stars>0 #!/usr/bin/env python3 # pylint: disable=C0103 # pylint: disable=R0902 #", "self.top_right = top_right self.mid_left = mid_left self.mid_split = mid_split self.mid_right", "mid_left, mid_split, mid_right, low_left, low_split, low_right, horizontal, vertical): \"\"\" Constructeur", "low_split self.low_right = low_right self.horizontal = horizontal self.vertical = vertical", "class TableBorder: \"\"\" Facillite l'usage de l'UNICODE \"\"\" def __init__(self,", "low_left self.low_split = low_split self.low_right = low_right self.horizontal = horizontal", "python3 # pylint: disable=C0103 # pylint: disable=R0902 # pylint: disable=R0903", "self.low_left = low_left self.low_split = low_split self.low_right = low_right self.horizontal", "vertical BORDERS = [TableBorder('+', '+', '+',\\ '+', '+', '+',\\ '+',", "u'\\u253C', u'\\u2524',\\ u'\\u2514', u'\\u2534', u'\\u2518',\\ u'\\u2500', u'\\u2502'), TableBorder(u'\\u2554', u'\\u2566', u'\\u2557',\\", "top_left self.top_split = top_split self.top_right = top_right self.mid_left = mid_left", "pylint: disable=R0913 \"\"\" Définie la classe TableBorder \"\"\" class TableBorder:", "def __init__(self, top_left, top_split, top_right, mid_left, mid_split, mid_right, low_left, low_split,", "# pylint: disable=R0902 # pylint: disable=R0903 # pylint: disable=R0913 \"\"\"", "\"\"\" self.top_left = top_left self.top_split = top_split self.top_right = top_right", "= mid_split self.mid_right = mid_right self.low_left = low_left self.low_split =", "= low_split self.low_right = low_right self.horizontal = horizontal self.vertical =", "low_right self.horizontal = horizontal self.vertical = vertical BORDERS = [TableBorder('+',", "'|'), TableBorder(u'\\u250c', u'\\u252C', u'\\u2510',\\ u'\\u251C', u'\\u253C', u'\\u2524',\\ u'\\u2514', u'\\u2534', u'\\u2518',\\", "self.mid_right = mid_right self.low_left = low_left self.low_split = low_split self.low_right", "top_right, mid_left, mid_split, mid_right, low_left, low_split, low_right, horizontal, vertical): \"\"\"", "'-', '|'), TableBorder(u'\\u250c', u'\\u252C', u'\\u2510',\\ u'\\u251C', u'\\u253C', u'\\u2524',\\ u'\\u2514', u'\\u2534',", "= vertical BORDERS = [TableBorder('+', '+', '+',\\ '+', '+', '+',\\", "u'\\u2500', u'\\u2502'), TableBorder(u'\\u2554', u'\\u2566', u'\\u2557',\\ u'\\u2560', u'\\u256C', u'\\u2563',\\ u'\\u255a', u'\\u2569',", "vertical): \"\"\" Constructeur \"\"\" self.top_left = top_left self.top_split = top_split", "= mid_right self.low_left = low_left self.low_split = low_split self.low_right =", "= horizontal self.vertical = vertical BORDERS = [TableBorder('+', '+', '+',\\", "= [TableBorder('+', '+', '+',\\ '+', '+', '+',\\ '+', '+', '+',\\", "u'\\u2502'), TableBorder(u'\\u2554', u'\\u2566', u'\\u2557',\\ u'\\u2560', u'\\u256C', u'\\u2563',\\ u'\\u255a', u'\\u2569', u'\\u255d',\\", "= top_right self.mid_left = mid_left self.mid_split = mid_split self.mid_right =", "self.top_left = top_left self.top_split = top_split self.top_right = top_right self.mid_left", "classe TableBorder \"\"\" class TableBorder: \"\"\" Facillite l'usage de l'UNICODE", "top_left, top_split, top_right, mid_left, mid_split, mid_right, low_left, low_split, low_right, horizontal,", "'+', '+',\\ '+', '+', '+',\\ '-', '|'), TableBorder(u'\\u250c', u'\\u252C', u'\\u2510',\\", "# pylint: disable=R0903 # pylint: disable=R0913 \"\"\" Définie la classe", "__init__(self, top_left, top_split, top_right, mid_left, mid_split, mid_right, low_left, low_split, low_right,", "Facillite l'usage de l'UNICODE \"\"\" def __init__(self, top_left, top_split, top_right,", "low_right, horizontal, vertical): \"\"\" Constructeur \"\"\" self.top_left = top_left self.top_split" ]
[ "<gh_stars>0 from django.conf.urls import url from . import views urlpatterns", "urlpatterns = [ url(r'^register/', views.register), url(r'^login/', views.login), url(r'logout/', views.logout), url(r'search/',", "= [ url(r'^register/', views.register), url(r'^login/', views.login), url(r'logout/', views.logout), url(r'search/', views.search)", "from django.conf.urls import url from . import views urlpatterns =", "url from . import views urlpatterns = [ url(r'^register/', views.register),", "from . import views urlpatterns = [ url(r'^register/', views.register), url(r'^login/',", "import url from . import views urlpatterns = [ url(r'^register/',", "views urlpatterns = [ url(r'^register/', views.register), url(r'^login/', views.login), url(r'logout/', views.logout),", ". import views urlpatterns = [ url(r'^register/', views.register), url(r'^login/', views.login),", "django.conf.urls import url from . import views urlpatterns = [", "[ url(r'^register/', views.register), url(r'^login/', views.login), url(r'logout/', views.logout), url(r'search/', views.search) ]", "import views urlpatterns = [ url(r'^register/', views.register), url(r'^login/', views.login), url(r'logout/'," ]
[ "'HASL Version' self._version = __version__ self._py_version = self._haslapi.version() @property def", "of the sensor.\"\"\" return self._version + \"/\" + self._py_version class", "'ri2_' + ri4key + '_' + siteid self._hass = hass", "# If the sensor should return minutes to next departure.", "self._version, 'pyHasl': self._py_version} @property def state(self): \"\"\" Return the state", "import logging from datetime import timedelta import homeassistant.helpers.config_validation as cv", "at which next departure occurs. if self._sensorproperty is 'time': if", "CONF_VERSION = 'version_sensor' CONF_USE_MINIMIZATION = 'api_minimization' LIST_SENSOR_TYPES = ['departures', 'status',", "si2key: self._si2key = si2key self._si2api = si2api(si2key, siteid, '') self._si2datakey", "%s...', self._name) except HASL_Error as e: _LOGGER.error(\"A communication error occured", "__version__ = '2.2.0' _LOGGER = logging.getLogger(__name__) DOMAIN = 'hasl' #", "+ int(s[1]) - (rightnow.hour * 60 + rightnow.minute) if min", "as e: _LOGGER.error(\"A error occured while retreiving \" \"cached SI2", "and update sensor data. newdata['attribution'] = \"Stockholms Lokaltrafik\" newdata['last_updated'] =", "'traffic_class' CONF_VERSION = 'version_sensor' CONF_USE_MINIMIZATION = 'api_minimization' LIST_SENSOR_TYPES = ['departures',", "and ri4key: sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLDeparturesSensor( hass, si2key, ri4key, sitekey,", "def parseDepartureTime(self, t): \"\"\" weird time formats from the API,", "or sensor_state.state is STATE_ON: return STATE_ON return STATE_OFF if self._sensorproperty", "{} # Use some nice translations for the statuses etc.", "self._name) except HASL_Error as e: _LOGGER.error(\"A communication error occured while", "measure. unit_table = { 'min': 'min', 'time': '', 'deviations': '',", "STATE_ON: val['refresh_enabled'] = STATE_ON else: val['refresh_enabled'] = STATE_OFF # Set", "_update_ri4(self): errorOccured = False _LOGGER.info(\"Starting to update RI4 for %s...\",", "True else: try: departuredata = self.getCache(self._ri4datakey) _LOGGER.info(\"Reusing data from cache", "SI2 sensor: %s\", e.details) errorOccured = True if not errorOccured:", "sitekey = sensorconf.get(CONF_SITEID) si2key = config.get(CONF_SI2_KEY) ri4key = config.get(CONF_RI4_KEY) if", "= \\ statuses.get(response['StatusIcon']) newdata[statustype + '_status_icon'] = \\ statusIcons.get(response['StatusIcon']) newdata[statustype", "return if it is updating or not. if self._sensorproperty is", "Exception as e: _LOGGER.error(\"A error occured while \" \"updating SI2", "is '-': return '-' return refresh.strftime('%Y-%m-%d %H:%M:%S') # Failsafe return", "Return the state of the sensor.\"\"\" return self._version + \"/\"", "== 'trainlocation': train_type = sensorconf.get(CONF_TRAIN_TYPE) if train_type: sensorname = sensorconf[ATTR_FRIENDLY_NAME]", "= departuredata['ResponseData'] self.putCache(self._ri4datakey, departuredata) self._hass.data[DOMAIN][self._ri4datakey] = \\ now(self._hass.config.time_zone) _LOGGER.info(\"Updated cache", "_LOGGER.info(\"Starting to update RI4 for %s...\", self._name) cacheage = self._hass.data[DOMAIN][self._ri4datakey]", "the sensor attributes.\"\"\" return {'type': self._train_type, 'data': json.dumps(self._data)} @property def", "_LOGGER.error(\"A error occured while retreiving \" \"cached RI4 sensor data:", "as e: _LOGGER.error(\"A error occured while \" \"updating SI2 sensor:", "\"\"\" Return the icon for the frontend.\"\"\" if self._deviations_table: return", "@property def device_state_attributes(self): \"\"\" Return the sensor attributes.\"\"\" return {'hasl':", "self._enabled_sensor is None or sensor_state.state is STATE_ON: return STATE_ON return", "'metro': 'mdi:subway-variant' } # If the same API have already", "the frontend.\"\"\" return None @property def device_state_attributes(self): \"\"\" Return the", "'': value} jsonFile = open(self._cachefile, 'w') jsonFile.write(json.dumps(data)) jsonFile.close() def _update(self):", "now(self._hass.config.time_zone) _LOGGER.info(\"Updated cache for %s...\", self._name) except HASL_Error as e:", "attributes. val = {} # Format the next exptected time.", "== 'status' or \\ sensorconf[CONF_SENSOR_TYPE] == 'tl2': tl2key = config.get(CONF_TL2_KEY)", "'fromDate': value['FromDateTime'], 'toDate': value['UpToDateTime'], 'details': value['Details'], 'sortOrder': value['SortOrder'], }) self._deviations_table", "min + 1440 return min except Exception: _LOGGER.warning(\"Failed to parse", "vol.All(vol.Coerce(int), vol.Range(min=0, max=2)), vol.Optional(CONF_TIMEWINDOW, default=DEFAULT_TIMEWINDOW): vol.All(vol.Coerce(int), vol.Range(min=0, max=60)), vol.Optional(CONF_SENSORPROPERTY, default=DEFAULT_SENSORPROPERTY):", "config, add_devices, discovery_info=None): \"\"\"Setup the sensors.\"\"\" if not hass.data.get(DOMAIN): hass.data[DOMAIN]", "else: apidata = self.getCache(self._datakey) _LOGGER.info(\"Reusing data from cache for %s...\",", "is ON then proceed. if self._enabled_sensor is None or sensor_state.state", "'-' return self._departure_table[0]['time'] # If the sensor should return the", "siteid, lines, friendly_name, enabled_sensor, interval, direction, timewindow, sensorproperty, minimization): \"\"\"Initialize\"\"\"", "'EventPlanned': 'mdi:triangle-outline' } trafficTypeIcons = { 'ferry': 'mdi:ferry', 'bus': 'mdi:bus',", "to the next departure \"\"\" # If the sensor should", "key + '': value} jsonFile = open(self._cachefile, 'w') jsonFile.write(json.dumps(data)) jsonFile.close()", "CONF_SITEID = 'siteid' CONF_LINES = 'lines' CONF_DIRECTION = 'direction' CONF_ENABLED_SENSOR", "Set values of the sensor. val['attribution'] = 'Stockholms Lokaltrafik' val['departures']", "= False _LOGGER.info(\"Starting to update RI4 for %s...\", self._name) cacheage", "['PT', 'RB', 'TVB', 'SB', 'LB', 'SpvC', 'TB1', 'TB2', 'TB3'] #", "or '' icon = iconswitcher.get(traffictype, 'mdi:train-car') if int(self._direction) == 0", "= self._hass.data[DOMAIN][self._ri4datakey] if not cacheage or now(self._hass.config.time_zone) \\ - self._interval", "siteid, '') self._si2datakey = 'si2_' + si2key + '_' +", "Initialize the state attributes. val = {} # Format the", "config.get(CONF_RI4_KEY) if sitekey and ri4key: sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLDeparturesSensor( hass,", "STATE_ON) from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import (async_track_point_in_utc_time, async_track_utc_time_change,", "not '': val['unit_of_measurement'] = self._unit_of_measure # Check if sensor is", "'-' # Format the last refresh time. refresh = self._lastupdate", "= value['ExpectedDateTime'] or '' groupofline = value['GroupOfLine'] or '' icon", "sensor %s...\", sensorname) else: _LOGGER.error(\"Sensor %s is missing tl2key attribute\",", "cacheage or not self._minimization: try: departuredata = self._ri4api.request() departuredata =", "the sensor.\"\"\" return self._lastupdate def getCache(self, key): try: jsonFile =", "_LOGGER.error(\"A error occured while retreiving \" \"cached SI2 sensor: %s\",", "if train_type: sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLTrainLocationSensor( hass, sensorname, train_type, sensorconf.get(CONF_SCAN_INTERVAL),", "parseDepartureTime(self, t): \"\"\" weird time formats from the API, do", "hass): self._hass = hass self._haslapi = haslapi() self._name = 'HASL", "if sitekey and ri4key: sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLDeparturesSensor( hass, si2key,", "the state of the sensor.\"\"\" return self._lastupdate def getCache(self, key):", "sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_DIRECTION), sensorconf.get(CONF_TIMEWINDOW), sensorconf.get(CONF_SENSORPROPERTY), config.get(CONF_USE_MINIMIZATION) )) _LOGGER.info(\"Created departures sensor %s...\",", "self._enabled_sensor is None or sensor_state.state is STATE_ON: _LOGGER.info(\"Starting to update", "__init__(self, hass, tl2key, friendly_name, enabled_sensor, interval, type, minimization): self._tl2api =", "to next departure. if self._sensorproperty is 'min': if not self._departure_table:", "departure time (%s) \", t) return 0 def getCache(self, key):", "expected_time = self._departure_table[0]['expected'] or '-' expected_minutes = self._departure_table[0]['time'] or '-'", "'refresh': '', 'update': '', } if si2key: self._si2key = si2key", "(haslapi, fpapi, tl2api, ri4api, si2api, HASL_Error, HASL_API_Error, HASL_HTTP_Error) __version__ =", "'min': return int(s[0]) s = t.split(':') if len(s) > 1:", "'_' + siteid self._hass = hass self._name = friendly_name self._lines", "RI4 sensor data: %s\", e) errorOccured = True if not", "vol.All(cv.ensure_list, [vol.In(DEFAULT_TRAFFIC_CLASS)]), vol.Optional(CONF_TRAIN_TYPE, default=DEFAULT_TRAIN_TYPE): vol.In(LIST_TRAIN_TYPES) })]), }, extra=vol.ALLOW_EXTRA) def setup_platform(hass,", "LIST_SENSOR_TYPES = ['departures', 'status', 'trainlocation', 'comb', 'tl2'] LIST_SENSOR_PROPERTIES = ['min',", "state(self): \"\"\" Return the state of the sensor.\"\"\" return self._lastupdate", "= \\ sorted(deviations, key=lambda k: k['sortOrder']) _LOGGER.info(\"SI2 update completed for", "self._sensorproperty is 'min': if not self._departure_table: return '-' return self._departure_table[0]['time']", "'SB', 'LB', 'SpvC', 'TB1', 'TB2', 'TB3'] # Default values for", "import datetime import json import logging from datetime import timedelta", "is STATE_ON: self._update_ri4() if self._si2key: self._update_si2() self._lastupdate = now(self._hass.config.time_zone) def", "'r') data = json.load(jsonFile) jsonFile.close() return data.get(key) except: return {}", "DEFAULT_TRAFFIC_CLASS = ['metro', 'train', 'local', 'tram', 'bus', 'fer'] DEFAULT_SENSORTYPE =", "CONF_RI4_KEY = 'ri4key' CONF_SI2_KEY = 'si2key' CONF_TL2_KEY = 'tl2key' CONF_SITEID", "rightnow = now(self._hass.config.time_zone) min = int(s[0]) * 60 + int(s[1])", "if len(s) > 1: rightnow = now(self._hass.config.time_zone) min = int(s[0])", "voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import", "sensor_state = self._hass.states.get(self._enabled_sensor) # If we dont have external sensor", "error occured while \" \"updating SI2 sensor: %s\", e.details) errorOccured", "Throttle(interval)(self._update) @property def name(self): \"\"\"Return the name of the sensor.\"\"\"", "the state of the sensor.\"\"\" return self._train_type def _update(self): if", "LIST_SENSOR_PROPERTIES = ['min', 'time', 'deviations', 'refresh', 'updated'] LIST_TRAIN_TYPES = ['PT',", "or sensor_state.state is STATE_ON: _LOGGER.info(\"Starting to update TL2 for %s...\",", "as e: _LOGGER.error(\"A error occured while retreiving \" \"cached RI4", "while retreiving \" \"cached RI4 sensor data: %s\", e) errorOccured", "self._unit_of_measure # Check if sensor is currently updating or not.", "if self._enabled_sensor is None or sensor_state.state is STATE_ON: return STATE_ON", "sensor or it is ON then proceed. if self._enabled_sensor is", "'Good', 'EventMinor': 'Minor', 'EventMajor': 'Closed', 'EventPlanned': 'Planned', } # Icon", "= sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLStatusSensor( hass, tl2key, sensorname, sensorconf.get(CONF_ENABLED_SENSOR), sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_TRAFFIC_CLASS), config.get(CONF_USE_MINIMIZATION)", "'EventGood': 'Good', 'EventMinor': 'Minor', 'EventMajor': 'Closed', 'EventPlanned': 'Planned', } #", "= True if not errorOccured: deviations = [] for (idx,", "SLDeparturesSensor(Entity): \"\"\"Departure board for one SL site.\"\"\" def __init__(self, hass,", "for SL (Storstockholms Lokaltrafik).\"\"\" import datetime import json import logging", "= {} # Use some nice translations for the statuses", "[] iconswitcher = { 'Buses': 'mdi:bus', 'Trams': 'mdi:tram', 'Ships': 'mdi:ferry',", "= '' if self._si2key: if not hass.data[DOMAIN].get(self._si2datakey): hass.data[DOMAIN][self._si2datakey] = ''", "Keys used in the configuration. CONF_RI4_KEY = 'ri4key' CONF_SI2_KEY =", "next exptected time. if self._departure_table: expected_time = self._departure_table[0]['expected'] or '-'", "== 'comb': sitekey = sensorconf.get(CONF_SITEID) si2key = config.get(CONF_SI2_KEY) ri4key =", "+ tl2key self._interval = interval self._hass = hass self._name =", "if self._sensorproperty is 'time': if not self._departure_table: return '-' expected", "= [] iconswitcher = { 'Buses': 'mdi:bus', 'Trams': 'mdi:tram', 'Ships':", "= timedelta(minutes=10) DEFAULT_TIMEWINDOW = 30 DEFAULT_DIRECTION = 0 DEFAULT_SENSORPROPERTY =", "hass.data[DOMAIN][self._datakey] = '' self.update = Throttle(interval)(self._update) @property def name(self): \"\"\"Return", "val['deviation_count'] = len(self._deviations_table) return val def parseDepartureTime(self, t): \"\"\" weird", "occurs. if self._sensorproperty is 'time': if not self._departure_table: return '-'", "self._minimization = minimization if not hass.data[DOMAIN].get(self._datakey): hass.data[DOMAIN][self._datakey] = '' self.update", "time. if self._departure_table: expected_time = self._departure_table[0]['expected'] or '-' expected_minutes =", "some quick and dirty conversions. \"\"\" try: if t ==", "results. for response in apidata: type = response['Type'] if self._type", "not self._minimization: try: departuredata = self._ri4api.request() departuredata = departuredata['ResponseData'] self.putCache(self._ri4datakey,", "self._name) except Exception as e: _LOGGER.error(\"A error occured while retreiving", "def device_state_attributes(self): \"\"\" Return the sensor attributes .\"\"\" # Initialize", "interval self._unit_of_measure = unit_table.get(self._sensorproperty, 'min') self._cachefile = hass.config.path(DEFAULT_CACHE_FILE) self._minimization =", "translations for the statuses etc. statuses = { 'EventGood': 'Good',", "def state(self): \"\"\" Return number of minutes to the next", "len(self._deviations_table) # If the sensor should return if it is", "our object. newdata = {} # Use some nice translations", "now(self._hass.config.time_zone) min = int(s[0]) * 60 + int(s[1]) - (rightnow.hour", "'groupofline': groupofline, 'icon': icon, }) self._departure_table = sorted(departures, key=lambda k:", "e: _LOGGER.error(\"A error occured while \" \"updating SI2 sensor: %s\",", "except: data = {'' + key + '': value} jsonFile", "enabled_sensor self._sensorproperty = sensorproperty self._departure_table = [] self._deviations_table = []", "site, si2key or ri4key\", sensorconf[ATTR_FRIENDLY_NAME]) if sensorconf[CONF_SENSOR_TYPE] == 'status' or", "'sortOrder': value['SortOrder'], }) self._deviations_table = \\ sorted(deviations, key=lambda k: k['sortOrder'])", "sensors = [] if config[CONF_VERSION]: sensors.append(SLVersionSensor(hass)) _LOGGER.info(\"Created version sensor for", "is 'updated': if self._lastupdate is '-': return '-' return refresh.strftime('%Y-%m-%d", "expected = value['ExpectedDateTime'] or '' groupofline = value['GroupOfLine'] or ''", "in within # the specified interval then use that data", ")) _LOGGER.info(\"Created departures sensor %s...\", sensorname) else: _LOGGER.error(\"Sensor %s is", "= '0' self._nextdeparture_expected = '-' self._lastupdate = '-' self._interval =", "and the corresponding units of measure. unit_table = { 'min':", "trafficTypeIcons = { 'ferry': 'mdi:ferry', 'bus': 'mdi:bus', 'tram': 'mdi:tram', 'train':", "'siteid' CONF_LINES = 'lines' CONF_DIRECTION = 'direction' CONF_ENABLED_SENSOR = 'sensor'", "'-' self._lastupdate = '-' self._interval = interval self._unit_of_measure = unit_table.get(self._sensorproperty,", "default=True): cv.boolean, vol.Required(CONF_SENSORS, default=[]): vol.All(cv.ensure_list, [vol.All({ vol.Required(ATTR_FRIENDLY_NAME): cv.string, vol.Required(CONF_SENSOR_TYPE, default=DEFAULT_SENSORTYPE):", "'min': 'min', 'time': '', 'deviations': '', 'refresh': '', 'update': '',", "%s is missing site, si2key or ri4key\", sensorconf[ATTR_FRIENDLY_NAME]) if sensorconf[CONF_SENSOR_TYPE]", "enabled_sensor self._type = type self._sensordata = [] self._lastupdate = '-'", "0 def getCache(self, key): try: jsonFile = open(self._cachefile, 'r') data", "self._name) cacheage = self._hass.data[DOMAIN][self._ri4datakey] if not cacheage or now(self._hass.config.time_zone) \\", "'' destination = value['Destination'] or '' linenumber = value['LineNumber'] or", "= 'HASL Version' self._version = __version__ self._py_version = self._haslapi.version() @property", "0 displaytime = value['DisplayTime'] or '' destination = value['Destination'] or", "the frontend.\"\"\" return 'mdi:train-car' @property def device_state_attributes(self): \"\"\" Return the", "sensors.\"\"\" if not hass.data.get(DOMAIN): hass.data[DOMAIN] = {} sensors = []", "= interval self._enabled_sensor = enabled_sensor self._train_type = train_type self._data =", "of minutes to the next departure \"\"\" # If the", "Check if sensor is currently updating or not. if self._enabled_sensor", "\\ sensorconf[CONF_SENSOR_TYPE] == 'comb': sitekey = sensorconf.get(CONF_SITEID) si2key = config.get(CONF_SI2_KEY)", "return 'mdi:train-car' @property def device_state_attributes(self): \"\"\" Return the sensor attributes.\"\"\"", "API have already made the request in within # the", "= deviationdata['ResponseData'] self.putCache(self._si2datakey, deviationdata) self._hass.data[DOMAIN][self._si2datakey] = \\ now(self._hass.config.time_zone) _LOGGER.info('Updated cache", "'RB', 'TVB', 'SB', 'LB', 'SpvC', 'TB1', 'TB2', 'TB3'] # Default", "in config[CONF_SENSORS]: if sensorconf[CONF_SENSOR_TYPE] == 'departures' or \\ sensorconf[CONF_SENSOR_TYPE] ==", "datetime.datetime.strptime(expected_time, '%Y-%m-%dT%H:%M:%S') expected_time = expected_time.strftime('%H:%M:%S') else: expected_time = '-' expected_minutes", "type, minimization): self._tl2api = tl2api(tl2key) self._datakey = 'tl2_' + tl2key", "sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLTrainLocationSensor( hass, sensorname, train_type, sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_ENABLED_SENSOR), )) _LOGGER.info(\"Created train", "+ \"/\" + self._py_version class SLStatusSensor(Entity): \"\"\"Trafic Situation Sensor.\"\"\" def", "_LOGGER.info(\"Reusing data from cache for %s...\", self._name) except Exception as", "Keys vol.Optional(CONF_RI4_KEY): cv.string, vol.Optional(CONF_SI2_KEY): cv.string, vol.Optional(CONF_TL2_KEY): cv.string, vol.Optional(CONF_VERSION, default=False): cv.boolean,", "extra=vol.ALLOW_EXTRA) def setup_platform(hass, config, add_devices, discovery_info=None): \"\"\"Setup the sensors.\"\"\" if", "return expected # If the sensor should return the number", "errorOccured: deviations = [] for (idx, value) in enumerate(deviationdata): deviations.append({", "occured while \" \"updating train location sensor: %s\", e.details) return", "direction self._timewindow = timewindow self._nextdeparture_minutes = '0' self._nextdeparture_expected = '-'", "jsonFile = open(self._cachefile, 'r') data = json.load(jsonFile) jsonFile.close() data[key] =", "hass self._name = friendly_name self._lines = lines self._siteid = siteid", "is 'refresh': if self._enabled_sensor is None or sensor_state.state is STATE_ON:", "'hasl' # Keys used in the configuration. CONF_RI4_KEY = 'ri4key'", "# the specified interval then use that data instead of", "int(self._direction) == 0 or int(direction) \\ == int(self._direction): if self._lines", "int(s[0]) s = t.split(':') if len(s) > 1: rightnow =", "= logging.getLogger(__name__) DOMAIN = 'hasl' # Keys used in the", "if self._si2key: if not hass.data[DOMAIN].get(self._si2datakey): hass.data[DOMAIN][self._si2datakey] = '' # Setup", "# If the sensor should return the number of deviations.", "default=DEFAULT_TRAIN_TYPE): vol.In(LIST_TRAIN_TYPES) })]), }, extra=vol.ALLOW_EXTRA) def setup_platform(hass, config, add_devices, discovery_info=None):", "'version_sensor' CONF_USE_MINIMIZATION = 'api_minimization' LIST_SENSOR_TYPES = ['departures', 'status', 'trainlocation', 'comb',", "self._ri4key = ri4key self._ri4api = ri4api(ri4key, siteid, 60) self._ri4datakey =", "val['last_refresh'] = refresh val['next_departure_minutes'] = expected_minutes val['next_departure_time'] = expected_time val['deviation_count']", "+ 1440 return min except Exception: _LOGGER.warning(\"Failed to parse departure", "tl2key = config.get(CONF_TL2_KEY) if tl2key: sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLStatusSensor( hass,", "of deviations. if self._sensorproperty is 'deviations': return len(self._deviations_table) # If", "= self._unit_of_measure # Check if sensor is currently updating or", "import now from hasl import (haslapi, fpapi, tl2api, ri4api, si2api,", "'': val['unit_of_measurement'] = self._unit_of_measure # Check if sensor is currently", "the sensor should return minutes to next departure. if self._sensorproperty", "error occured while \" \"updating train location sensor: %s\", e.details)", "then use that data instead of # requesting it again", "enabled_sensor, interval, direction, timewindow, sensorproperty, minimization): \"\"\"Initialize\"\"\" # The table", "attributes.\"\"\" return self._sensordata @property def state(self): \"\"\" Return the state", "def __init__(self, hass, friendly_name, train_type, interval, enabled_sensor): self._hass = hass", "discovery_info=None): \"\"\"Setup the sensors.\"\"\" if not hass.data.get(DOMAIN): hass.data[DOMAIN] = {}", "= minimization if not hass.data[DOMAIN].get(self._ri4datakey): hass.data[DOMAIN][self._ri4datakey] = '' if self._si2key:", "logging from datetime import timedelta import homeassistant.helpers.config_validation as cv import", "= self._departure_table[0]['expected'] or '-' expected_minutes = self._departure_table[0]['time'] or '-' if", "_LOGGER.info(\"Starting to update TL2 for %s...\", self._name) # Object used", "%s...\", self._name) def _update_si2(self): errorOccured = False _LOGGER.info(\"Starting to update", "def _update(self): if self._enabled_sensor is not None: sensor_state = self._hass.states.get(self._enabled_sensor)", "if it is updating or not. if self._sensorproperty is 'refresh':", "if sensorconf[CONF_SENSOR_TYPE] == 'trainlocation': train_type = sensorconf.get(CONF_TRAIN_TYPE) if train_type: sensorname", "\"updating SI2 sensor: %s\", e) errorOccured = True else: try:", "if not hass.data[DOMAIN].get(self._si2datakey): hass.data[DOMAIN][self._si2datakey] = '' # Setup updating of", "self._departure_table[0]['time'] # If the sensor should return the time at", "in enumerate(['Metros', 'Buses', 'Trains', 'Trams', 'Ships']): for (idx, value) in", "for (i, traffictype) in enumerate(['Metros', 'Buses', 'Trains', 'Trams', 'Ships']): for", "self._departure_table[0]['expected'] or '-' expected_minutes = self._departure_table[0]['time'] or '-' if expected_time", "'time': '', 'deviations': '', 'refresh': '', 'update': '', } if", "try: apidata = self._tl2api.request() apidata = apidata['ResponseData']['TrafficTypes'] self.putCache(self._datakey, apidata) self._hass.data[DOMAIN][self._datakey]", "int(direction) \\ == int(self._direction): if self._lines == [] or linenumber", "if int(self._direction) == 0 or int(direction) \\ == int(self._direction): if", "data. newdata['attribution'] = \"Stockholms Lokaltrafik\" newdata['last_updated'] = \\ self._hass.data[DOMAIN][self._datakey].strftime('%Y-%m-%d' +", "self._update_ri4() if self._si2key: self._update_si2() self._lastupdate = now(self._hass.config.time_zone) def _update_ri4(self): errorOccured", "{ 'EventGood': 'Good', 'EventMinor': 'Minor', 'EventMajor': 'Closed', 'EventPlanned': 'Planned', }", "= 'min' DEFAULT_TRAIN_TYPE = 'PT' DEFAULT_TRAFFIC_CLASS = ['metro', 'train', 'local',", "if not hass.data[DOMAIN].get(self._datakey): hass.data[DOMAIN][self._datakey] = '' self.update = Throttle(interval)(self._update) @property", "hass.data[DOMAIN][self._ri4datakey] = '' if self._si2key: if not hass.data[DOMAIN].get(self._si2datakey): hass.data[DOMAIN][self._si2datakey] =", "= newdata self._lastupdate = newdata['last_updated'] _LOGGER.info(\"TL2 update completed for %s...\",", "while \" \"updating TL4 API: %s\", e) return else: apidata", "self._enabled_sensor = enabled_sensor self._sensorproperty = sensorproperty self._departure_table = [] self._deviations_table", "friendly_name self._enabled_sensor = enabled_sensor self._type = type self._sensordata = []", "we dont have external sensor or it is ON then", "self._hass = hass self._haslapi = haslapi() self._name = 'HASL Version'", "min = min + 1440 return min except Exception: _LOGGER.warning(\"Failed", "SLStatusSensor(Entity): \"\"\"Trafic Situation Sensor.\"\"\" def __init__(self, hass, tl2key, friendly_name, enabled_sensor,", "= refresh.strftime('%Y-%m-%d %H:%M:%S') # Setup the unit of measure. if", "the sensor attributes .\"\"\" # Initialize the state attributes. val", "errorOccured = False _LOGGER.info(\"Starting to update RI4 for %s...\", self._name)", "\"\"\"Initialize\"\"\" # The table of resulttypes and the corresponding units", "sensor: %s\", e) return self._data = apidata _LOGGER.info(\"Update completed %s...\",", "= '2.2.0' _LOGGER = logging.getLogger(__name__) DOMAIN = 'hasl' # Keys", "\"\"\" Return the sensor attributes .\"\"\" # Initialize the state", "self._name = friendly_name self._interval = interval self._enabled_sensor = enabled_sensor self._train_type", "else type) newdata[statustype + '_status'] = \\ statuses.get(response['StatusIcon']) newdata[statustype +", "'train_type' CONF_TRAFFIC_CLASS = 'traffic_class' CONF_VERSION = 'version_sensor' CONF_USE_MINIMIZATION = 'api_minimization'", "not errorOccured: departures = [] iconswitcher = { 'Buses': 'mdi:bus',", "SI2 sensor: %s\", e) errorOccured = True else: try: deviationdata", "CONF_SENSORPROPERTY = 'property' CONF_TRAIN_TYPE = 'train_type' CONF_TRAFFIC_CLASS = 'traffic_class' CONF_VERSION", "self._si2key: self._update_si2() self._lastupdate = now(self._hass.config.time_zone) def _update_ri4(self): errorOccured = False", "self._sensorproperty is 'refresh': if self._enabled_sensor is None or sensor_state.state is", "config.get(CONF_USE_MINIMIZATION) )) _LOGGER.info(\"Created status sensor %s...\", sensorname) else: _LOGGER.error(\"Sensor %s", ".\"\"\" # Initialize the state attributes. val = {} #", "response['Events'] # Attribution and update sensor data. newdata['attribution'] = \"Stockholms", "} trafficTypeIcons = { 'ferry': 'mdi:ferry', 'bus': 'mdi:bus', 'tram': 'mdi:tram',", "(i, traffictype) in enumerate(['Metros', 'Buses', 'Trains', 'Trams', 'Ships']): for (idx,", "Situation Sensor.\"\"\" def __init__(self, hass, friendly_name, train_type, interval, enabled_sensor): self._hass", "self._minimization: try: apidata = self._tl2api.request() apidata = apidata['ResponseData']['TrafficTypes'] self.putCache(self._datakey, apidata)", "# Default values for configuration. DEFAULT_INTERVAL = timedelta(minutes=10) DEFAULT_TIMEWINDOW =", "[vol.In(DEFAULT_TRAFFIC_CLASS)]), vol.Optional(CONF_TRAIN_TYPE, default=DEFAULT_TRAIN_TYPE): vol.In(LIST_TRAIN_TYPES) })]), }, extra=vol.ALLOW_EXTRA) def setup_platform(hass, config,", "__init__(self, hass, si2key, ri4key, siteid, lines, friendly_name, enabled_sensor, interval, direction,", "'mdi:triangle-outline' } trafficTypeIcons = { 'ferry': 'mdi:ferry', 'bus': 'mdi:bus', 'tram':", "in the configuration. CONF_RI4_KEY = 'ri4key' CONF_SI2_KEY = 'si2key' CONF_TL2_KEY", "sensorname) else: _LOGGER.error(\"Sensor %s is missing train_type attribute\", sensorconf[ATTR_FRIENDLY_NAME]) add_devices(sensors)", "= \\ datetime.datetime.strptime(expected_time, '%Y-%m-%dT%H:%M:%S') expected_time = expected_time.strftime('%H:%M:%S') else: expected_time =", "jsonFile.close() def _update(self): \"\"\"Get the departure board.\"\"\" # If using", "as e: _LOGGER.error(\"A communication error occured while \" \"updating RI4", "credits from dying. cacheage = self._hass.data[DOMAIN][self._datakey] if not cacheage or", "import timedelta import homeassistant.helpers.config_validation as cv import voluptuous as vol", "not cacheage or now(self._hass.config.time_zone) \\ - self._interval > cacheage or", "the configuration schema. PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ # API Keys vol.Optional(CONF_RI4_KEY):", "\"\"\"Get the departure board.\"\"\" # If using external sensor, get", "to create our object. newdata = {} # Use some", "jsonFile.close() return data.get(key) except: return {} def putCache(self, key, value):", "\"\"\" Return the icon for the frontend.\"\"\" return 'mdi:train-car' @property", "the sensor. self.update = Throttle(interval)(self._update) @property def name(self): \"\"\"Return the", "return None @property def device_state_attributes(self): \"\"\" Return the sensor attributes.\"\"\"", "vol.Required(CONF_SENSOR_TYPE, default=DEFAULT_SENSORTYPE): vol.In(LIST_SENSOR_TYPES), vol.Optional(CONF_ENABLED_SENSOR): cv.string, vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL): vol.Any(cv.time_period, cv.positive_timedelta), vol.Optional(CONF_SITEID):", "if expected_time is not '-': expected_time = \\ datetime.datetime.strptime(expected_time, '%Y-%m-%dT%H:%M:%S')", "'%H:%M:%S') self._sensordata = newdata self._lastupdate = newdata['last_updated'] _LOGGER.info(\"TL2 update completed", "self._sensorproperty is 'deviations': return len(self._deviations_table) # If the sensor should", "missing tl2key attribute\", sensorconf[ATTR_FRIENDLY_NAME]) if sensorconf[CONF_SENSOR_TYPE] == 'trainlocation': train_type =", "self._hass.data[DOMAIN][self._datakey] if not cacheage or now(self._hass.config.time_zone) \\ - self._interval >", "vol.Range(min=0, max=60)), vol.Optional(CONF_SENSORPROPERTY, default=DEFAULT_SENSORPROPERTY): vol.In(LIST_SENSOR_PROPERTIES), vol.Optional(CONF_TRAFFIC_CLASS, default=DEFAULT_TRAFFIC_CLASS): vol.All(cv.ensure_list, [vol.In(DEFAULT_TRAFFIC_CLASS)]), vol.Optional(CONF_TRAIN_TYPE,", "\" \"updating TL2 sensor: %s\", e.details) return except Exception as", "= type self._sensordata = [] self._lastupdate = '-' self._cachefile =", "departuredata = departuredata['ResponseData'] self.putCache(self._ri4datakey, departuredata) self._hass.data[DOMAIN][self._ri4datakey] = \\ now(self._hass.config.time_zone) _LOGGER.info(\"Updated", "'Stockholms Lokaltrafik' val['departures'] = self._departure_table val['deviations'] = self._deviations_table val['last_refresh'] =", "if not self._departure_table: return '-' expected = self._departure_table[0]['expected'] or '-'", "is 'deviations': return len(self._deviations_table) # If the sensor should return", "STATE_ON: return STATE_ON return STATE_OFF if self._sensorproperty is 'updated': if", "return 0 def getCache(self, key): try: jsonFile = open(self._cachefile, 'r')", "departuredata = self.getCache(self._ri4datakey) _LOGGER.info(\"Reusing data from cache for %s...\", self._name)", "self._deviations_table val['last_refresh'] = refresh val['next_departure_minutes'] = expected_minutes val['next_departure_time'] = expected_time", "ri4key: sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLDeparturesSensor( hass, si2key, ri4key, sitekey, sensorconf.get(CONF_LINES),", "\"\"\" weird time formats from the API, do some quick", "self._nextdeparture_expected = '-' self._lastupdate = '-' self._interval = interval self._unit_of_measure", "} for (i, traffictype) in enumerate(['Metros', 'Buses', 'Trains', 'Trams', 'Ships']):", "if not errorOccured: departures = [] iconswitcher = { 'Buses':", "sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLTrainLocationSensor( hass, sensorname, train_type, sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_ENABLED_SENSOR), ))", "\"\"\" Return the state of the sensor.\"\"\" return self._train_type def", "self._cachefile = hass.config.path(DEFAULT_CACHE_FILE) self._minimization = minimization if not hass.data[DOMAIN].get(self._ri4datakey): hass.data[DOMAIN][self._ri4datakey]", "groupofline, 'icon': icon, }) self._departure_table = sorted(departures, key=lambda k: k['time'])", "self._departure_table: return '-' expected = self._departure_table[0]['expected'] or '-' if expected", "If the sensor should return if it is updating or", "e) return self._data = apidata _LOGGER.info(\"Update completed %s...\", self._name) class", "\\ statuses.get(response['StatusIcon']) newdata[statustype + '_status_icon'] = \\ statusIcons.get(response['StatusIcon']) newdata[statustype +", "= [] self._direction = direction self._timewindow = timewindow self._nextdeparture_minutes =", "sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_TRAFFIC_CLASS), config.get(CONF_USE_MINIMIZATION) )) _LOGGER.info(\"Created status sensor %s...\", sensorname) else:", "= si2key self._si2api = si2api(si2key, siteid, '') self._si2datakey = 'si2_'", "= self._deviations_table val['last_refresh'] = refresh val['next_departure_minutes'] = expected_minutes val['next_departure_time'] =", "for (idx, value) in enumerate(deviationdata): deviations.append({ 'updated': value['Updated'], 'title': value['Header'],", "'title': value['Header'], 'fromDate': value['FromDateTime'], 'toDate': value['UpToDateTime'], 'details': value['Details'], 'sortOrder': value['SortOrder'],", "_LOGGER.error(\"Sensor %s is missing train_type attribute\", sensorconf[ATTR_FRIENDLY_NAME]) add_devices(sensors) class SLTrainLocationSensor(Entity):", "= apidata['ResponseData']['TrafficTypes'] self.putCache(self._datakey, apidata) self._hass.data[DOMAIN][self._datakey] = \\ now(self._hass.config.time_zone) _LOGGER.info(\"Updated cache", "errorOccured = False _LOGGER.info(\"Starting to update SI2 for %s...\", self._name)", "response['Events']: event['Status'] = statuses.get(event['StatusIcon']) event['StatusIcon'] = \\ statusIcons.get(event['StatusIcon']) newdata[statustype +", "e) errorOccured = True else: try: departuredata = self.getCache(self._ri4datakey) _LOGGER.info(\"Reusing", "= sensorconf.get(CONF_SITEID) si2key = config.get(CONF_SI2_KEY) ri4key = config.get(CONF_RI4_KEY) if sitekey", "hass.data[DOMAIN].get(self._ri4datakey): hass.data[DOMAIN][self._ri4datakey] = '' if self._si2key: if not hass.data[DOMAIN].get(self._si2datakey): hass.data[DOMAIN][self._si2datakey]", "for HomeAssistant. statusIcons = { 'EventGood': 'mdi:check', 'EventMinor': 'mdi:clock-alert-outline', 'EventMajor':", "self._departure_table[0]['expected'] or '-' if expected is not '-': expected =", "# If using external sensor, get its value. if self._enabled_sensor", "hass, sensorname, train_type, sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_ENABLED_SENSOR), )) _LOGGER.info(\"Created train sensor %s...\",", "= config.get(CONF_TL2_KEY) if tl2key: sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLStatusSensor( hass, tl2key,", "for %s...\", self._name) # Return only the relevant portion of", "open(self._cachefile, 'w') jsonFile.write(json.dumps(data)) jsonFile.close() def _update(self): if self._enabled_sensor is not", "%s...\", self._name) cacheage = self._hass.data[DOMAIN][self._ri4datakey] if not cacheage or now(self._hass.config.time_zone)", "occured while \" \"updating SI2 sensor: %s\", e) errorOccured =", "import (async_track_point_in_utc_time, async_track_utc_time_change, track_time_interval) from homeassistant.util import Throttle from homeassistant.util.dt", "external sensor, get its value. if self._enabled_sensor is not None:", "departuredata) self._hass.data[DOMAIN][self._ri4datakey] = \\ now(self._hass.config.time_zone) _LOGGER.info(\"Updated cache for %s...\", self._name)", "sorted(deviations, key=lambda k: k['sortOrder']) _LOGGER.info(\"SI2 update completed for %s...\", self._name)", "sensor data: %s\", e) errorOccured = True if not errorOccured:", "expected = \\ datetime.datetime.strptime(self._nextdeparture_expected, '%Y-%m-%dT%H:%M:%S') expected = expected.strftime('%H:%M:%S') return expected", "return STATE_ON return STATE_OFF if self._sensorproperty is 'updated': if self._lastupdate", "or '-' if expected_time is not '-': expected_time = \\", "+ self._py_version class SLStatusSensor(Entity): \"\"\"Trafic Situation Sensor.\"\"\" def __init__(self, hass,", "'0' self._nextdeparture_expected = '-' self._lastupdate = '-' self._interval = interval", "\"\"\"Return the name of the sensor.\"\"\" return self._name @property def", "'-' @property def device_state_attributes(self): \"\"\" Return the sensor attributes .\"\"\"", "vol.Optional(CONF_TRAFFIC_CLASS, default=DEFAULT_TRAFFIC_CLASS): vol.All(cv.ensure_list, [vol.In(DEFAULT_TRAFFIC_CLASS)]), vol.Optional(CONF_TRAIN_TYPE, default=DEFAULT_TRAIN_TYPE): vol.In(LIST_TRAIN_TYPES) })]), }, extra=vol.ALLOW_EXTRA)", "self._type: statustype = ('ferry' if type == 'fer' else type)", "hass self._fpapi = fpapi() self._name = friendly_name self._interval = interval", "= value['JourneyDirection'] or 0 displaytime = value['DisplayTime'] or '' destination", "default=DEFAULT_TIMEWINDOW): vol.All(vol.Coerce(int), vol.Range(min=0, max=60)), vol.Optional(CONF_SENSORPROPERTY, default=DEFAULT_SENSORPROPERTY): vol.In(LIST_SENSOR_PROPERTIES), vol.Optional(CONF_TRAFFIC_CLASS, default=DEFAULT_TRAFFIC_CLASS): vol.All(cv.ensure_list,", "if self._type is None or type in self._type: statustype =", "import json import logging from datetime import timedelta import homeassistant.helpers.config_validation", "= { 'EventGood': 'Good', 'EventMinor': 'Minor', 'EventMajor': 'Closed', 'EventPlanned': 'Planned',", "value['Header'], 'fromDate': value['FromDateTime'], 'toDate': value['UpToDateTime'], 'details': value['Details'], 'sortOrder': value['SortOrder'], })", "- (rightnow.hour * 60 + rightnow.minute) if min < 0:", "'sensor' CONF_TIMEWINDOW = 'timewindow' CONF_SENSORPROPERTY = 'property' CONF_TRAIN_TYPE = 'train_type'", "sensor attributes .\"\"\" # Initialize the state attributes. val =", "CONF_TRAIN_TYPE = 'train_type' CONF_TRAFFIC_CLASS = 'traffic_class' CONF_VERSION = 'version_sensor' CONF_USE_MINIMIZATION", "occured while retreiving \" \"cached RI4 sensor data: %s\", e)", "'EventMajor': 'mdi:close', 'EventPlanned': 'mdi:triangle-outline' } trafficTypeIcons = { 'ferry': 'mdi:ferry',", "for the frontend.\"\"\" if self._deviations_table: return 'mdi:bus-alert' return 'mdi:bus' @property", "cv.string, vol.Optional(CONF_TL2_KEY): cv.string, vol.Optional(CONF_VERSION, default=False): cv.boolean, vol.Optional(CONF_USE_MINIMIZATION, default=True): cv.boolean, vol.Required(CONF_SENSORS,", "Exception as e: _LOGGER.error(\"A communication error occured while \" \"updating", "[] or linenumber \\ in self._lines: diff = self.parseDepartureTime(displaytime) if", "== 0 or int(direction) \\ == int(self._direction): if self._lines ==", "not errorOccured: deviations = [] for (idx, value) in enumerate(deviationdata):", "apidata _LOGGER.info(\"Update completed %s...\", self._name) class SLVersionSensor(Entity): \"\"\"HASL Version Sensor.\"\"\"", "'mdi:ferry', 'bus': 'mdi:bus', 'tram': 'mdi:tram', 'train': 'mdi:train', 'local': 'mdi:train-variant', 'metro':", "ri4key + '_' + siteid self._hass = hass self._name =", "Failsafe return '-' @property def device_state_attributes(self): \"\"\" Return the sensor", "= direction self._timewindow = timewindow self._nextdeparture_minutes = '0' self._nextdeparture_expected =", "enabled_sensor, interval, type, minimization): self._tl2api = tl2api(tl2key) self._datakey = 'tl2_'", "= \"Stockholms Lokaltrafik\" newdata['last_updated'] = \\ self._hass.data[DOMAIN][self._datakey].strftime('%Y-%m-%d' + '%H:%M:%S') self._sensordata", "= value['DisplayTime'] or '' destination = value['Destination'] or '' linenumber", "expected_time = expected_time.strftime('%H:%M:%S') else: expected_time = '-' expected_minutes = '-'", "'TB2', 'TB3'] # Default values for configuration. DEFAULT_INTERVAL = timedelta(minutes=10)", "type = response['Type'] if self._type is None or type in", "import (haslapi, fpapi, tl2api, ri4api, si2api, HASL_Error, HASL_API_Error, HASL_HTTP_Error) __version__", "train_type, interval, enabled_sensor): self._hass = hass self._fpapi = fpapi() self._name", "# Icon table used for HomeAssistant. statusIcons = { 'EventGood':", "if type == 'fer' else type) newdata[statustype + '_status'] =", "'%Y-%m-%dT%H:%M:%S') expected = expected.strftime('%H:%M:%S') return expected # If the sensor", "for %s...\", self._name) class SLDeparturesSensor(Entity): \"\"\"Departure board for one SL", "from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import (async_track_point_in_utc_time, async_track_utc_time_change, track_time_interval)", "expected_time = \\ datetime.datetime.strptime(expected_time, '%Y-%m-%dT%H:%M:%S') expected_time = expected_time.strftime('%H:%M:%S') else: expected_time", "self._siteid = siteid self._enabled_sensor = enabled_sensor self._sensorproperty = sensorproperty self._departure_table", "@property def state(self): \"\"\" Return the state of the sensor.\"\"\"", "SLTrainLocationSensor(Entity): \"\"\"Trafic Situation Sensor.\"\"\" def __init__(self, hass, friendly_name, train_type, interval,", "frontend.\"\"\" return None @property def device_state_attributes(self): \"\"\" Return the sensor", "'Closed', 'EventPlanned': 'Planned', } # Icon table used for HomeAssistant.", "configuration. CONF_RI4_KEY = 'ri4key' CONF_SI2_KEY = 'si2key' CONF_TL2_KEY = 'tl2key'", "to parse departure time (%s) \", t) return 0 def", "sensorname, sensorconf.get(CONF_ENABLED_SENSOR), sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_TRAFFIC_CLASS), config.get(CONF_USE_MINIMIZATION) )) _LOGGER.info(\"Created status sensor %s...\",", "self._train_type = train_type self._data = {} self.update = Throttle(interval)(self._update) @property", "'data': json.dumps(self._data)} @property def state(self): \"\"\" Return the state of", "= { 'min': 'min', 'time': '', 'deviations': '', 'refresh': '',", "now from hasl import (haslapi, fpapi, tl2api, ri4api, si2api, HASL_Error,", "= sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLDeparturesSensor( hass, si2key, ri4key, sitekey, sensorconf.get(CONF_LINES), sensorname, sensorconf.get(CONF_ENABLED_SENSOR),", "in self._type: statustype = ('ferry' if type == 'fer' else", "0: min = min + 1440 return min except Exception:", "= unit_table.get(self._sensorproperty, 'min') self._cachefile = hass.config.path(DEFAULT_CACHE_FILE) self._minimization = minimization if", "# Defining the configuration schema. PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ # API", "not self._departure_table: return '-' return self._departure_table[0]['time'] # If the sensor", "or not self._minimization: try: deviationdata = self._si2api.request() deviationdata = deviationdata['ResponseData']", "= 'timewindow' CONF_SENSORPROPERTY = 'property' CONF_TRAIN_TYPE = 'train_type' CONF_TRAFFIC_CLASS =", "attribute\", sensorconf[ATTR_FRIENDLY_NAME]) if sensorconf[CONF_SENSOR_TYPE] == 'trainlocation': train_type = sensorconf.get(CONF_TRAIN_TYPE) if", "expected_time is not '-': expected_time = \\ datetime.datetime.strptime(expected_time, '%Y-%m-%dT%H:%M:%S') expected_time", "deviationdata = self._si2api.request() deviationdata = deviationdata['ResponseData'] self.putCache(self._si2datakey, deviationdata) self._hass.data[DOMAIN][self._si2datakey] =", "sensorconf.get(CONF_ENABLED_SENSOR), sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_TRAFFIC_CLASS), config.get(CONF_USE_MINIMIZATION) )) _LOGGER.info(\"Created status sensor %s...\", sensorname)", "# API Keys vol.Optional(CONF_RI4_KEY): cv.string, vol.Optional(CONF_SI2_KEY): cv.string, vol.Optional(CONF_TL2_KEY): cv.string, vol.Optional(CONF_VERSION,", "Exception as e: _LOGGER.error(\"A error occured while \" \"updating TL4", "cache for %s...\", self._name) except HASL_Error as e: _LOGGER.error(\"A communication", "= response['Events'] # Attribution and update sensor data. newdata['attribution'] =", "the next departure \"\"\" # If the sensor should return", "'departure': displaytime, 'destination': destination, 'time': diff, 'expected': expected, 'type': traffictype,", "[cv.string]), vol.Optional(CONF_DIRECTION, default=DEFAULT_DIRECTION): vol.All(vol.Coerce(int), vol.Range(min=0, max=2)), vol.Optional(CONF_TIMEWINDOW, default=DEFAULT_TIMEWINDOW): vol.All(vol.Coerce(int), vol.Range(min=0,", "\\ self._hass.data[DOMAIN][self._datakey].strftime('%Y-%m-%d' + '%H:%M:%S') self._sensordata = newdata self._lastupdate = newdata['last_updated']", "}) self._departure_table = sorted(departures, key=lambda k: k['time']) _LOGGER.info(\"RI4 update completed", "vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_DIRECTION, default=DEFAULT_DIRECTION): vol.All(vol.Coerce(int), vol.Range(min=0, max=2)), vol.Optional(CONF_TIMEWINDOW, default=DEFAULT_TIMEWINDOW): vol.All(vol.Coerce(int),", "datetime import timedelta import homeassistant.helpers.config_validation as cv import voluptuous as", "Setup updating of the sensor. self.update = Throttle(interval)(self._update) @property def", "= self._departure_table[0]['expected'] or '-' if expected is not '-': expected", "self._train_type, 'data': json.dumps(self._data)} @property def state(self): \"\"\" Return the state", "< 0: min = min + 1440 return min except", "\\ - self._interval > cacheage or not self._minimization: try: departuredata", "Return the state of the sensor.\"\"\" return self._train_type def _update(self):", "which next departure occurs. if self._sensorproperty is 'time': if not", "%H:%M:%S') # Setup the unit of measure. if self._unit_of_measure is", "= '-' expected_minutes = '-' # Format the last refresh", "[] for (idx, value) in enumerate(deviationdata): deviations.append({ 'updated': value['Updated'], 'title':", "\"\"\" Return the state of the sensor.\"\"\" return self._lastupdate def", "dirty conversions. \"\"\" try: if t == 'Nu': return 0", "destination = value['Destination'] or '' linenumber = value['LineNumber'] or ''", "sensors.append(SLTrainLocationSensor( hass, sensorname, train_type, sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_ENABLED_SENSOR), )) _LOGGER.info(\"Created train sensor", "trafficTypeIcons.get(statustype) for event in response['Events']: event['Status'] = statuses.get(event['StatusIcon']) event['StatusIcon'] =", "'tl2_' + tl2key self._interval = interval self._hass = hass self._name", "the API, do some quick and dirty conversions. \"\"\" try:", "= {} sensors = [] if config[CONF_VERSION]: sensors.append(SLVersionSensor(hass)) _LOGGER.info(\"Created version", "= 'PT' DEFAULT_TRAFFIC_CLASS = ['metro', 'train', 'local', 'tram', 'bus', 'fer']", "corresponding units of measure. unit_table = { 'min': 'min', 'time':", "vol.Range(min=0, max=2)), vol.Optional(CONF_TIMEWINDOW, default=DEFAULT_TIMEWINDOW): vol.All(vol.Coerce(int), vol.Range(min=0, max=60)), vol.Optional(CONF_SENSORPROPERTY, default=DEFAULT_SENSORPROPERTY): vol.In(LIST_SENSOR_PROPERTIES),", "if self._si2key: self._update_si2() self._lastupdate = now(self._hass.config.time_zone) def _update_ri4(self): errorOccured =", "return '-' return self._departure_table[0]['time'] # If the sensor should return", "not hass.data.get(DOMAIN): hass.data[DOMAIN] = {} sensors = [] if config[CONF_VERSION]:", "\\ in self._lines: diff = self.parseDepartureTime(displaytime) if diff < self._timewindow:", "hass, friendly_name, train_type, interval, enabled_sensor): self._hass = hass self._fpapi =", "# Use some nice translations for the statuses etc. statuses", "the sensor should return the time at which next departure", "jsonFile = open(self._cachefile, 'r') data = json.load(jsonFile) jsonFile.close() return data.get(key)", "is 'min': if not self._departure_table: return '-' return self._departure_table[0]['time'] #", "self._update_si2() self._lastupdate = now(self._hass.config.time_zone) def _update_ri4(self): errorOccured = False _LOGGER.info(\"Starting", "ON then proceed. if self._enabled_sensor is None or sensor_state.state \\", "weird time formats from the API, do some quick and", "= 'Stockholms Lokaltrafik' val['departures'] = self._departure_table val['deviations'] = self._deviations_table val['last_refresh']", "homeassistant.util import Throttle from homeassistant.util.dt import now from hasl import", "{ 'EventGood': 'mdi:check', 'EventMinor': 'mdi:clock-alert-outline', 'EventMajor': 'mdi:close', 'EventPlanned': 'mdi:triangle-outline' }", "self._datakey = 'tl2_' + tl2key self._interval = interval self._hass =", "Return the icon for the frontend.\"\"\" return None @property def", "only the relevant portion of the results. for response in", "errorOccured = True if not errorOccured: departures = [] iconswitcher", "CONF_SI2_KEY = 'si2key' CONF_TL2_KEY = 'tl2key' CONF_SITEID = 'siteid' CONF_LINES", "SI2 for %s...\", self._name) cacheage = self._hass.data[DOMAIN][self._si2datakey] if not cacheage", "# Set values of the sensor. val['attribution'] = 'Stockholms Lokaltrafik'", "-*- \"\"\"Simple service for SL (Storstockholms Lokaltrafik).\"\"\" import datetime import", "'EventGood': 'mdi:check', 'EventMinor': 'mdi:clock-alert-outline', 'EventMajor': 'mdi:close', 'EventPlanned': 'mdi:triangle-outline' } trafficTypeIcons", "_LOGGER.error(\"Sensor %s is missing tl2key attribute\", sensorconf[ATTR_FRIENDLY_NAME]) if sensorconf[CONF_SENSOR_TYPE] ==", "self._departure_table[0]['time'] or '-' if expected_time is not '-': expected_time =", "hass, si2key, ri4key, siteid, lines, friendly_name, enabled_sensor, interval, direction, timewindow,", "@property def state(self): \"\"\" Return number of minutes to the", "'si2key' CONF_TL2_KEY = 'tl2key' CONF_SITEID = 'siteid' CONF_LINES = 'lines'", "'type': traffictype, 'groupofline': groupofline, 'icon': icon, }) self._departure_table = sorted(departures,", "the results. for response in apidata: type = response['Type'] if", "Return the icon for the frontend.\"\"\" if self._deviations_table: return 'mdi:bus-alert'", "event['Status'] = statuses.get(event['StatusIcon']) event['StatusIcon'] = \\ statusIcons.get(event['StatusIcon']) newdata[statustype + '_events']", "request in within # the specified interval then use that", "default=False): cv.boolean, vol.Optional(CONF_USE_MINIMIZATION, default=True): cv.boolean, vol.Required(CONF_SENSORS, default=[]): vol.All(cv.ensure_list, [vol.All({ vol.Required(ATTR_FRIENDLY_NAME):", "_LOGGER = logging.getLogger(__name__) DOMAIN = 'hasl' # Keys used in", "timedelta(minutes=10) DEFAULT_TIMEWINDOW = 30 DEFAULT_DIRECTION = 0 DEFAULT_SENSORPROPERTY = 'min'", "\\ - self._interval > cacheage or not self._minimization: try: apidata", "vol.Optional(CONF_RI4_KEY): cv.string, vol.Optional(CONF_SI2_KEY): cv.string, vol.Optional(CONF_TL2_KEY): cv.string, vol.Optional(CONF_VERSION, default=False): cv.boolean, vol.Optional(CONF_USE_MINIMIZATION,", "self.parseDepartureTime(displaytime) if diff < self._timewindow: departures.append({ 'line': linenumber, 'direction': direction,", "statuses.get(response['StatusIcon']) newdata[statustype + '_status_icon'] = \\ statusIcons.get(response['StatusIcon']) newdata[statustype + '_icon']", "exptected time. if self._departure_table: expected_time = self._departure_table[0]['expected'] or '-' expected_minutes", "sensors.append(SLDeparturesSensor( hass, si2key, ri4key, sitekey, sensorconf.get(CONF_LINES), sensorname, sensorconf.get(CONF_ENABLED_SENSOR), sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_DIRECTION),", "or not. if self._sensorproperty is 'refresh': if self._enabled_sensor is None", "'mdi:subway-variant' } # If the same API have already made", "= \\ statusIcons.get(response['StatusIcon']) newdata[statustype + '_icon'] = \\ trafficTypeIcons.get(statustype) for", "jsonFile = open(self._cachefile, 'w') jsonFile.write(json.dumps(data)) jsonFile.close() def _update(self): if self._enabled_sensor", "Sensor.\"\"\" def __init__(self, hass, friendly_name, train_type, interval, enabled_sensor): self._hass =", "rightnow.minute) if min < 0: min = min + 1440", "- self._interval > cacheage or not self._minimization: try: deviationdata =", "self._name = friendly_name self._lines = lines self._siteid = siteid self._enabled_sensor", "cv.string, vol.Optional(CONF_SI2_KEY): cv.string, vol.Optional(CONF_TL2_KEY): cv.string, vol.Optional(CONF_VERSION, default=False): cv.boolean, vol.Optional(CONF_USE_MINIMIZATION, default=True):", "as e: _LOGGER.error(\"A communication error occured while \" \"updating SI2", "vol.Optional(CONF_SENSORPROPERTY, default=DEFAULT_SENSORPROPERTY): vol.In(LIST_SENSOR_PROPERTIES), vol.Optional(CONF_TRAFFIC_CLASS, default=DEFAULT_TRAFFIC_CLASS): vol.All(cv.ensure_list, [vol.In(DEFAULT_TRAFFIC_CLASS)]), vol.Optional(CONF_TRAIN_TYPE, default=DEFAULT_TRAIN_TYPE): vol.In(LIST_TRAIN_TYPES)", "len(s) > 1: rightnow = now(self._hass.config.time_zone) min = int(s[0]) *", "vol.Optional(CONF_TIMEWINDOW, default=DEFAULT_TIMEWINDOW): vol.All(vol.Coerce(int), vol.Range(min=0, max=60)), vol.Optional(CONF_SENSORPROPERTY, default=DEFAULT_SENSORPROPERTY): vol.In(LIST_SENSOR_PROPERTIES), vol.Optional(CONF_TRAFFIC_CLASS, default=DEFAULT_TRAFFIC_CLASS):", "use that data instead of # requesting it again and", "sensor %s...\", sensorname) else: _LOGGER.error(\"Sensor %s is missing site, si2key", "type == 'fer' else type) newdata[statustype + '_status'] = \\", "json import logging from datetime import timedelta import homeassistant.helpers.config_validation as", "== int(self._direction): if self._lines == [] or linenumber \\ in", "hass.config.path(DEFAULT_CACHE_FILE) self._minimization = minimization if not hass.data[DOMAIN].get(self._datakey): hass.data[DOMAIN][self._datakey] = ''", "= sorted(departures, key=lambda k: k['time']) _LOGGER.info(\"RI4 update completed for %s...\",", "return self._data = apidata _LOGGER.info(\"Update completed %s...\", self._name) class SLVersionSensor(Entity):", "value['ExpectedDateTime'] or '' groupofline = value['GroupOfLine'] or '' icon =", "datetime import json import logging from datetime import timedelta import", "+ '_icon'] = \\ trafficTypeIcons.get(statustype) for event in response['Events']: event['Status']", "self._sensordata @property def state(self): \"\"\" Return the state of the", "open(self._cachefile, 'r') data = json.load(jsonFile) jsonFile.close() return data.get(key) except: return", "self._lines: diff = self.parseDepartureTime(displaytime) if diff < self._timewindow: departures.append({ 'line':", "_LOGGER.info(\"Created version sensor for HASL\") for sensorconf in config[CONF_SENSORS]: if", "statuses etc. statuses = { 'EventGood': 'Good', 'EventMinor': 'Minor', 'EventMajor':", "self._si2api.request() deviationdata = deviationdata['ResponseData'] self.putCache(self._si2datakey, deviationdata) self._hass.data[DOMAIN][self._si2datakey] = \\ now(self._hass.config.time_zone)", "_LOGGER.error(\"A error occured while \" \"updating SI2 sensor: %s\", e)", "as e: _LOGGER.error(\"A communication error occured while \" \"updating TL2", "relevant portion of the results. for response in apidata: type", "= minimization if not hass.data[DOMAIN].get(self._datakey): hass.data[DOMAIN][self._datakey] = '' self.update =", "if not hass.data.get(DOMAIN): hass.data[DOMAIN] = {} sensors = [] if", "PLATFORM_SCHEMA.extend({ # API Keys vol.Optional(CONF_RI4_KEY): cv.string, vol.Optional(CONF_SI2_KEY): cv.string, vol.Optional(CONF_TL2_KEY): cv.string,", "= open(self._cachefile, 'r') data = json.load(jsonFile) jsonFile.close() return data.get(key) except:", "self._hass.data[DOMAIN][self._si2datakey] if not cacheage or now(self._hass.config.time_zone) \\ - self._interval >", "data = json.load(jsonFile) jsonFile.close() data[key] = value except: data =", "while retreiving \" \"cached SI2 sensor: %s\", e.details) errorOccured =", "self._data = apidata _LOGGER.info(\"Update completed %s...\", self._name) class SLVersionSensor(Entity): \"\"\"HASL", "= expected_time.strftime('%H:%M:%S') else: expected_time = '-' expected_minutes = '-' #", "def device_state_attributes(self): \"\"\" Return the sensor attributes.\"\"\" return {'hasl': self._version,", "from dying. cacheage = self._hass.data[DOMAIN][self._datakey] if not cacheage or now(self._hass.config.time_zone)", "%s...\", self._name) except HASL_Error as e: _LOGGER.error(\"A communication error occured", "\"\"\"Setup the sensors.\"\"\" if not hass.data.get(DOMAIN): hass.data[DOMAIN] = {} sensors", "== 'min': return int(s[0]) s = t.split(':') if len(s) >", "[] self._direction = direction self._timewindow = timewindow self._nextdeparture_minutes = '0'", "\"\"\" Return the sensor attributes.\"\"\" return {'type': self._train_type, 'data': json.dumps(self._data)}", "def device_state_attributes(self): \"\"\" Return the sensor attributes.\"\"\" return self._sensordata @property", "def setup_platform(hass, config, add_devices, discovery_info=None): \"\"\"Setup the sensors.\"\"\" if not", "Return the sensor attributes.\"\"\" return {'hasl': self._version, 'pyHasl': self._py_version} @property", "is not '-': expected_time = \\ datetime.datetime.strptime(expected_time, '%Y-%m-%dT%H:%M:%S') expected_time =", "If the same API have already made the request in", "deviationdata = deviationdata['ResponseData'] self.putCache(self._si2datakey, deviationdata) self._hass.data[DOMAIN][self._si2datakey] = \\ now(self._hass.config.time_zone) _LOGGER.info('Updated", "\"\"\" Return the sensor attributes.\"\"\" return {'hasl': self._version, 'pyHasl': self._py_version}", "homeassistant.const import (ATTR_FRIENDLY_NAME, CONF_SCAN_INTERVAL, CONF_SENSOR_TYPE, CONF_SENSORS, STATE_OFF, STATE_ON) from homeassistant.helpers.entity", "timewindow, sensorproperty, minimization): \"\"\"Initialize\"\"\" # The table of resulttypes and", "or not self._minimization: try: departuredata = self._ri4api.request() departuredata = departuredata['ResponseData']", "Attribution and update sensor data. newdata['attribution'] = \"Stockholms Lokaltrafik\" newdata['last_updated']", "if self._enabled_sensor is None or sensor_state.state is STATE_ON: val['refresh_enabled'] =", "max=2)), vol.Optional(CONF_TIMEWINDOW, default=DEFAULT_TIMEWINDOW): vol.All(vol.Coerce(int), vol.Range(min=0, max=60)), vol.Optional(CONF_SENSORPROPERTY, default=DEFAULT_SENSORPROPERTY): vol.In(LIST_SENSOR_PROPERTIES), vol.Optional(CONF_TRAFFIC_CLASS,", "homeassistant.helpers.config_validation as cv import voluptuous as vol from homeassistant.components.sensor import", "None or sensor_state.state is STATE_ON: try: apidata = self._fpapi.request(self._train_type) except", "to update RI4 for %s...\", self._name) cacheage = self._hass.data[DOMAIN][self._ri4datakey] if", "RI4 API: %s\", e) errorOccured = True else: try: departuredata", "{} def putCache(self, key, value): try: jsonFile = open(self._cachefile, 'r')", "the unit of measure. if self._unit_of_measure is not '': val['unit_of_measurement']", "dont have external sensor or it is ON then proceed.", "or '-' expected_minutes = self._departure_table[0]['time'] or '-' if expected_time is", "self._minimization = minimization if not hass.data[DOMAIN].get(self._ri4datakey): hass.data[DOMAIN][self._ri4datakey] = '' if", "have already made the request in within # the specified", "from datetime import timedelta import homeassistant.helpers.config_validation as cv import voluptuous", "= {} # Format the next exptected time. if self._departure_table:", "or '' expected = value['ExpectedDateTime'] or '' groupofline = value['GroupOfLine']", "minutes to the next departure \"\"\" # If the sensor", "'deviations': '', 'refresh': '', 'update': '', } if si2key: self._si2key", "putCache(self, key, value): try: jsonFile = open(self._cachefile, 'r') data =", "self._departure_table val['deviations'] = self._deviations_table val['last_refresh'] = refresh val['next_departure_minutes'] = expected_minutes", "}) self._deviations_table = \\ sorted(deviations, key=lambda k: k['sortOrder']) _LOGGER.info(\"SI2 update", "used for HomeAssistant. statusIcons = { 'EventGood': 'mdi:check', 'EventMinor': 'mdi:clock-alert-outline',", "self._name = 'HASL Version' self._version = __version__ self._py_version = self._haslapi.version()", "used to create our object. newdata = {} # Use", "None @property def device_state_attributes(self): \"\"\" Return the sensor attributes.\"\"\" return", "self._si2api = si2api(si2key, siteid, '') self._si2datakey = 'si2_' + si2key", "30 DEFAULT_DIRECTION = 0 DEFAULT_SENSORPROPERTY = 'min' DEFAULT_TRAIN_TYPE = 'PT'", "tl2api, ri4api, si2api, HASL_Error, HASL_API_Error, HASL_HTTP_Error) __version__ = '2.2.0' _LOGGER", "_LOGGER.error(\"A communication error occured while \" \"updating train location sensor:", "= json.load(jsonFile) jsonFile.close() data[key] = value except: data = {''", "'TVB', 'SB', 'LB', 'SpvC', 'TB1', 'TB2', 'TB3'] # Default values", "the sensor.\"\"\" return self._version + \"/\" + self._py_version class SLStatusSensor(Entity):", "self._hass.states.get(self._enabled_sensor) if self._enabled_sensor is None or sensor_state.state is STATE_ON: try:", "refresh time. refresh = self._lastupdate if self._lastupdate is not '-':", "train_type: sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLTrainLocationSensor( hass, sensorname, train_type, sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_ENABLED_SENSOR),", "e) errorOccured = True else: try: deviationdata = self.getCache(self._si2datakey) _LOGGER.info(\"Reusing", "for (idx, value) in enumerate(departuredata[traffictype]): direction = value['JourneyDirection'] or 0", "return '-' expected = self._departure_table[0]['expected'] or '-' if expected is", "homeassistant.util.dt import now from hasl import (haslapi, fpapi, tl2api, ri4api,", "update completed for %s...\", self._name) def _update_si2(self): errorOccured = False", "value) in enumerate(deviationdata): deviations.append({ 'updated': value['Updated'], 'title': value['Header'], 'fromDate': value['FromDateTime'],", "site.\"\"\" def __init__(self, hass, si2key, ri4key, siteid, lines, friendly_name, enabled_sensor,", "error occured while \" \"updating RI4 API: %s\", e) errorOccured", "interval self._enabled_sensor = enabled_sensor self._train_type = train_type self._data = {}", "sensorconf[ATTR_FRIENDLY_NAME]) if sensorconf[CONF_SENSOR_TYPE] == 'trainlocation': train_type = sensorconf.get(CONF_TRAIN_TYPE) if train_type:", "type) newdata[statustype + '_status'] = \\ statuses.get(response['StatusIcon']) newdata[statustype + '_status_icon']", "self._deviations_table = [] self._direction = direction self._timewindow = timewindow self._nextdeparture_minutes", "is None or sensor_state.state is STATE_ON: try: apidata = self._fpapi.request(self._train_type)", "self._name) # Object used to create our object. newdata =", "@property def name(self): \"\"\"Return the name of the sensor.\"\"\" return", "try: deviationdata = self.getCache(self._si2datakey) _LOGGER.info(\"Reusing data from cache for %s...\",", "val['refresh_enabled'] = STATE_OFF # Set values of the sensor. val['attribution']", "iconswitcher = { 'Buses': 'mdi:bus', 'Trams': 'mdi:tram', 'Ships': 'mdi:ferry', 'Metros':", "'ri4key' CONF_SI2_KEY = 'si2key' CONF_TL2_KEY = 'tl2key' CONF_SITEID = 'siteid'", "or ri4key\", sensorconf[ATTR_FRIENDLY_NAME]) if sensorconf[CONF_SENSOR_TYPE] == 'status' or \\ sensorconf[CONF_SENSOR_TYPE]", "some nice translations for the statuses etc. statuses = {", "sensor_state.state is STATE_ON: val['refresh_enabled'] = STATE_ON else: val['refresh_enabled'] = STATE_OFF", "['departures', 'status', 'trainlocation', 'comb', 'tl2'] LIST_SENSOR_PROPERTIES = ['min', 'time', 'deviations',", "update completed for %s...\", self._name) class SLDeparturesSensor(Entity): \"\"\"Departure board for", "the sensor should return if it is updating or not.", "# Failsafe return '-' @property def device_state_attributes(self): \"\"\" Return the", "not self._departure_table: return '-' expected = self._departure_table[0]['expected'] or '-' if", "'') self._si2datakey = 'si2_' + si2key + '_' + siteid", "self._interval = interval self._enabled_sensor = enabled_sensor self._train_type = train_type self._data", "'Trams', 'Ships']): for (idx, value) in enumerate(departuredata[traffictype]): direction = value['JourneyDirection']", "self._name) def _update_si2(self): errorOccured = False _LOGGER.info(\"Starting to update SI2", "sensor attributes.\"\"\" return {'hasl': self._version, 'pyHasl': self._py_version} @property def state(self):", "timewindow self._nextdeparture_minutes = '0' self._nextdeparture_expected = '-' self._lastupdate = '-'", "value['FromDateTime'], 'toDate': value['UpToDateTime'], 'details': value['Details'], 'sortOrder': value['SortOrder'], }) self._deviations_table =", "Return only the relevant portion of the results. for response", "sensor_state.state is STATE_ON: try: apidata = self._fpapi.request(self._train_type) except HASL_Error as", "sensor_state.state is STATE_ON: _LOGGER.info(\"Starting to update TL2 for %s...\", self._name)", "'' linenumber = value['LineNumber'] or '' expected = value['ExpectedDateTime'] or", "%s\", e.details) errorOccured = True if not errorOccured: deviations =", "hass.data[DOMAIN] = {} sensors = [] if config[CONF_VERSION]: sensors.append(SLVersionSensor(hass)) _LOGGER.info(\"Created", "Format the last refresh time. refresh = self._lastupdate if self._lastupdate", "'tl2key' CONF_SITEID = 'siteid' CONF_LINES = 'lines' CONF_DIRECTION = 'direction'", "default=DEFAULT_DIRECTION): vol.All(vol.Coerce(int), vol.Range(min=0, max=2)), vol.Optional(CONF_TIMEWINDOW, default=DEFAULT_TIMEWINDOW): vol.All(vol.Coerce(int), vol.Range(min=0, max=60)), vol.Optional(CONF_SENSORPROPERTY,", "icon for the frontend.\"\"\" if self._deviations_table: return 'mdi:bus-alert' return 'mdi:bus'", "'LB', 'SpvC', 'TB1', 'TB2', 'TB3'] # Default values for configuration.", "enabled_sensor self._train_type = train_type self._data = {} self.update = Throttle(interval)(self._update)", "self._enabled_sensor is not None: sensor_state = self._hass.states.get(self._enabled_sensor) if self._enabled_sensor is", "'Planned', } # Icon table used for HomeAssistant. statusIcons =", "None: sensor_state = self._hass.states.get(self._enabled_sensor) if self._enabled_sensor is None or sensor_state.state", "retreiving \" \"cached SI2 sensor: %s\", e.details) errorOccured = True", "'Ships': 'mdi:ferry', 'Metros': 'mdi:subway-variant', 'Trains': 'mdi:train', } for (i, traffictype)", "} # If the same API have already made the", "= 'tl2key' CONF_SITEID = 'siteid' CONF_LINES = 'lines' CONF_DIRECTION =", "from the API, do some quick and dirty conversions. \"\"\"", "for %s...', self._name) except HASL_Error as e: _LOGGER.error(\"A communication error", "self._nextdeparture_minutes = '0' self._nextdeparture_expected = '-' self._lastupdate = '-' self._interval", "_LOGGER.error(\"Sensor %s is missing site, si2key or ri4key\", sensorconf[ATTR_FRIENDLY_NAME]) if", "\"\"\" Return number of minutes to the next departure \"\"\"", "jsonFile.close() def _update(self): if self._enabled_sensor is not None: sensor_state =", "return val def parseDepartureTime(self, t): \"\"\" weird time formats from", "# Keys used in the configuration. CONF_RI4_KEY = 'ri4key' CONF_SI2_KEY", "the sensor. val['attribution'] = 'Stockholms Lokaltrafik' val['departures'] = self._departure_table val['deviations']", "interval, enabled_sensor): self._hass = hass self._fpapi = fpapi() self._name =", "(async_track_point_in_utc_time, async_track_utc_time_change, track_time_interval) from homeassistant.util import Throttle from homeassistant.util.dt import", "_LOGGER.info('Updated cache for %s...', self._name) except HASL_Error as e: _LOGGER.error(\"A", "is STATE_ON: try: apidata = self._fpapi.request(self._train_type) except HASL_Error as e:", "for %s...\", self._name) except Exception as e: _LOGGER.error(\"A error occured", "0 or int(direction) \\ == int(self._direction): if self._lines == []", "'-' if expected is not '-': expected = \\ datetime.datetime.strptime(self._nextdeparture_expected,", "is STATE_ON: return STATE_ON return STATE_OFF if self._sensorproperty is 'updated':", "'line': linenumber, 'direction': direction, 'departure': displaytime, 'destination': destination, 'time': diff,", "__init__(self, hass, friendly_name, train_type, interval, enabled_sensor): self._hass = hass self._fpapi", "statusIcons.get(event['StatusIcon']) newdata[statustype + '_events'] = response['Events'] # Attribution and update", "sensor: %s\", e.details) errorOccured = True if not errorOccured: deviations", "HASL\") for sensorconf in config[CONF_SENSORS]: if sensorconf[CONF_SENSOR_TYPE] == 'departures' or", "coding: utf-8 -*- \"\"\"Simple service for SL (Storstockholms Lokaltrafik).\"\"\" import", "self._departure_table: expected_time = self._departure_table[0]['expected'] or '-' expected_minutes = self._departure_table[0]['time'] or", "{ 'ferry': 'mdi:ferry', 'bus': 'mdi:bus', 'tram': 'mdi:tram', 'train': 'mdi:train', 'local':", "class SLTrainLocationSensor(Entity): \"\"\"Trafic Situation Sensor.\"\"\" def __init__(self, hass, friendly_name, train_type,", "\" \"cached RI4 sensor data: %s\", e) errorOccured = True", "\"\"\" try: if t == 'Nu': return 0 s =", "If we dont have external sensor or it is ON", "linenumber = value['LineNumber'] or '' expected = value['ExpectedDateTime'] or ''", "sensorconf[ATTR_FRIENDLY_NAME]) add_devices(sensors) class SLTrainLocationSensor(Entity): \"\"\"Trafic Situation Sensor.\"\"\" def __init__(self, hass,", "while \" \"updating train location sensor: %s\", e.details) return except", "value} jsonFile = open(self._cachefile, 'w') jsonFile.write(json.dumps(data)) jsonFile.close() def _update(self): \"\"\"Get", "of measure. if self._unit_of_measure is not '': val['unit_of_measurement'] = self._unit_of_measure", "unit_table = { 'min': 'min', 'time': '', 'deviations': '', 'refresh':", "'api_minimization' LIST_SENSOR_TYPES = ['departures', 'status', 'trainlocation', 'comb', 'tl2'] LIST_SENSOR_PROPERTIES =", "int(self._direction): if self._lines == [] or linenumber \\ in self._lines:", "Use some nice translations for the statuses etc. statuses =", "siteid self._ri4key = ri4key self._ri4api = ri4api(ri4key, siteid, 60) self._ri4datakey", "+ ri4key + '_' + siteid self._hass = hass self._name", "it is ON then proceed. if self._enabled_sensor is None or", "state of the sensor.\"\"\" return self._train_type def _update(self): if self._enabled_sensor", "self._haslapi = haslapi() self._name = 'HASL Version' self._version = __version__", "_LOGGER.info(\"Created departures sensor %s...\", sensorname) else: _LOGGER.error(\"Sensor %s is missing", "sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_ENABLED_SENSOR), )) _LOGGER.info(\"Created train sensor %s...\", sensorname) else: _LOGGER.error(\"Sensor", "%s\", e) errorOccured = True if not errorOccured: departures =", "sensorconf.get(CONF_LINES), sensorname, sensorconf.get(CONF_ENABLED_SENSOR), sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_DIRECTION), sensorconf.get(CONF_TIMEWINDOW), sensorconf.get(CONF_SENSORPROPERTY), config.get(CONF_USE_MINIMIZATION) )) _LOGGER.info(\"Created", "interval then use that data instead of # requesting it", "return 'mdi:bus' @property def state(self): \"\"\" Return number of minutes", "key, value): try: jsonFile = open(self._cachefile, 'r') data = json.load(jsonFile)", "'si2_' + si2key + '_' + siteid self._ri4key = ri4key", "= friendly_name self._lines = lines self._siteid = siteid self._enabled_sensor =", "True if not errorOccured: departures = [] iconswitcher = {", "'min': if not self._departure_table: return '-' return self._departure_table[0]['time'] # If", "'TB1', 'TB2', 'TB3'] # Default values for configuration. DEFAULT_INTERVAL =", "None or sensor_state.state is STATE_ON: return STATE_ON return STATE_OFF if", "return the number of deviations. if self._sensorproperty is 'deviations': return", "_LOGGER.warning(\"Failed to parse departure time (%s) \", t) return 0", "{'' + key + '': value} jsonFile = open(self._cachefile, 'w')", "sensor for HASL\") for sensorconf in config[CONF_SENSORS]: if sensorconf[CONF_SENSOR_TYPE] ==", "and dirty conversions. \"\"\" try: if t == 'Nu': return", "hass.data.get(DOMAIN): hass.data[DOMAIN] = {} sensors = [] if config[CONF_VERSION]: sensors.append(SLVersionSensor(hass))", "+ siteid self._ri4key = ri4key self._ri4api = ri4api(ri4key, siteid, 60)", "ri4key = config.get(CONF_RI4_KEY) if sitekey and ri4key: sensorname = sensorconf[ATTR_FRIENDLY_NAME]", "newdata self._lastupdate = newdata['last_updated'] _LOGGER.info(\"TL2 update completed for %s...\", self._name)", "{'type': self._train_type, 'data': json.dumps(self._data)} @property def state(self): \"\"\" Return the", "_LOGGER.error(\"A error occured while \" \"updating TL4 API: %s\", e)", "\\ - self._interval > cacheage or not self._minimization: try: deviationdata", "apidata = self._fpapi.request(self._train_type) except HASL_Error as e: _LOGGER.error(\"A communication error", "\"\"\"Simple service for SL (Storstockholms Lokaltrafik).\"\"\" import datetime import json", "siteid self._hass = hass self._name = friendly_name self._lines = lines", "tl2key: sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLStatusSensor( hass, tl2key, sensorname, sensorconf.get(CONF_ENABLED_SENSOR), sensorconf.get(CONF_SCAN_INTERVAL),", "_update(self): if self._enabled_sensor is not None: sensor_state = self._hass.states.get(self._enabled_sensor) if", "val['next_departure_minutes'] = expected_minutes val['next_departure_time'] = expected_time val['deviation_count'] = len(self._deviations_table) return", "data[key] = value except: data = {'' + key +", "retreiving \" \"cached RI4 sensor data: %s\", e) errorOccured =", "departures sensor %s...\", sensorname) else: _LOGGER.error(\"Sensor %s is missing site,", "expected is not '-': expected = \\ datetime.datetime.strptime(self._nextdeparture_expected, '%Y-%m-%dT%H:%M:%S') expected", ")) _LOGGER.info(\"Created train sensor %s...\", sensorname) else: _LOGGER.error(\"Sensor %s is", "t == 'Nu': return 0 s = t.split() if len(s)", "open(self._cachefile, 'w') jsonFile.write(json.dumps(data)) jsonFile.close() def _update(self): \"\"\"Get the departure board.\"\"\"", "\"updating RI4 API: %s\", e) errorOccured = True else: try:", "as e: _LOGGER.error(\"A error occured while \" \"updating TL4 API:", "s = t.split(':') if len(s) > 1: rightnow = now(self._hass.config.time_zone)", "now(self._hass.config.time_zone) \\ - self._interval > cacheage or not self._minimization: try:", "\" \"updating TL4 API: %s\", e) return else: apidata =", "portion of the results. for response in apidata: type =", "cv.boolean, vol.Optional(CONF_USE_MINIMIZATION, default=True): cv.boolean, vol.Required(CONF_SENSORS, default=[]): vol.All(cv.ensure_list, [vol.All({ vol.Required(ATTR_FRIENDLY_NAME): cv.string,", "CONF_LINES = 'lines' CONF_DIRECTION = 'direction' CONF_ENABLED_SENSOR = 'sensor' CONF_TIMEWINDOW", "'bus', 'fer'] DEFAULT_SENSORTYPE = 'departures' DEFAULT_CACHE_FILE = '.storage/haslcache.json' # Defining", "for the frontend.\"\"\" return None @property def device_state_attributes(self): \"\"\" Return", "attributes.\"\"\" return {'type': self._train_type, 'data': json.dumps(self._data)} @property def state(self): \"\"\"", "sensor.\"\"\" return self._train_type def _update(self): if self._enabled_sensor is not None:", "\\ datetime.datetime.strptime(self._nextdeparture_expected, '%Y-%m-%dT%H:%M:%S') expected = expected.strftime('%H:%M:%S') return expected # If", "def state(self): \"\"\" Return the state of the sensor.\"\"\" return", "self._lastupdate def getCache(self, key): try: jsonFile = open(self._cachefile, 'r') data", "CONF_DIRECTION = 'direction' CONF_ENABLED_SENSOR = 'sensor' CONF_TIMEWINDOW = 'timewindow' CONF_SENSORPROPERTY", "from hasl import (haslapi, fpapi, tl2api, ri4api, si2api, HASL_Error, HASL_API_Error,", "\" \"cached SI2 sensor: %s\", e.details) errorOccured = True if", "except HASL_Error as e: _LOGGER.error(\"A communication error occured while \"", "'Buses': 'mdi:bus', 'Trams': 'mdi:tram', 'Ships': 'mdi:ferry', 'Metros': 'mdi:subway-variant', 'Trains': 'mdi:train',", "= value except: data = {'' + key + '':", "SL site.\"\"\" def __init__(self, hass, si2key, ri4key, siteid, lines, friendly_name,", "* 60 + int(s[1]) - (rightnow.hour * 60 + rightnow.minute)", "e: _LOGGER.error(\"A communication error occured while \" \"updating TL2 sensor:", "self._type = type self._sensordata = [] self._lastupdate = '-' self._cachefile", "sensor. val['attribution'] = 'Stockholms Lokaltrafik' val['departures'] = self._departure_table val['deviations'] =", "None: sensor_state = self._hass.states.get(self._enabled_sensor) # If we dont have external", "e: _LOGGER.error(\"A communication error occured while \" \"updating RI4 API:", "if self._sensorproperty is 'updated': if self._lastupdate is '-': return '-'", "self.getCache(self._datakey) _LOGGER.info(\"Reusing data from cache for %s...\", self._name) # Return", "= True if not errorOccured: departures = [] iconswitcher =", "'', 'deviations': '', 'refresh': '', 'update': '', } if si2key:", "= 'version_sensor' CONF_USE_MINIMIZATION = 'api_minimization' LIST_SENSOR_TYPES = ['departures', 'status', 'trainlocation',", "error occured while retreiving \" \"cached RI4 sensor data: %s\",", "self._unit_of_measure = unit_table.get(self._sensorproperty, 'min') self._cachefile = hass.config.path(DEFAULT_CACHE_FILE) self._minimization = minimization", "from homeassistant.util import Throttle from homeassistant.util.dt import now from hasl", "'min' DEFAULT_TRAIN_TYPE = 'PT' DEFAULT_TRAFFIC_CLASS = ['metro', 'train', 'local', 'tram',", "_LOGGER.info(\"Created train sensor %s...\", sensorname) else: _LOGGER.error(\"Sensor %s is missing", "e: _LOGGER.error(\"A error occured while\" \"updating train location sensor: %s\",", "is not '-': expected = \\ datetime.datetime.strptime(self._nextdeparture_expected, '%Y-%m-%dT%H:%M:%S') expected =", "= haslapi() self._name = 'HASL Version' self._version = __version__ self._py_version", "self._hass.data[DOMAIN][self._datakey].strftime('%Y-%m-%d' + '%H:%M:%S') self._sensordata = newdata self._lastupdate = newdata['last_updated'] _LOGGER.info(\"TL2", "self.getCache(self._ri4datakey) _LOGGER.info(\"Reusing data from cache for %s...\", self._name) except Exception", "or not self._minimization: try: apidata = self._tl2api.request() apidata = apidata['ResponseData']['TrafficTypes']", "or it is ON then proceed. if self._enabled_sensor is None", "{} self.update = Throttle(interval)(self._update) @property def name(self): \"\"\"Return the name", "spare some innocent credits from dying. cacheage = self._hass.data[DOMAIN][self._datakey] if", "cacheage or now(self._hass.config.time_zone) \\ - self._interval > cacheage or not", "'-': refresh = refresh.strftime('%Y-%m-%d %H:%M:%S') # Setup the unit of", "for %s...\", self._name) except HASL_Error as e: _LOGGER.error(\"A communication error", "= ('ferry' if type == 'fer' else type) newdata[statustype +", "ri4key, sitekey, sensorconf.get(CONF_LINES), sensorname, sensorconf.get(CONF_ENABLED_SENSOR), sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_DIRECTION), sensorconf.get(CONF_TIMEWINDOW), sensorconf.get(CONF_SENSORPROPERTY), config.get(CONF_USE_MINIMIZATION)", "\"updating train location sensor: %s\", e.details) return except Exception as", "tl2key, sensorname, sensorconf.get(CONF_ENABLED_SENSOR), sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_TRAFFIC_CLASS), config.get(CONF_USE_MINIMIZATION) )) _LOGGER.info(\"Created status sensor", "is not None: sensor_state = self._hass.states.get(self._enabled_sensor) # If we dont", "self._minimization: try: departuredata = self._ri4api.request() departuredata = departuredata['ResponseData'] self.putCache(self._ri4datakey, departuredata)", "the state of the sensor.\"\"\" return self._version + \"/\" +", "= 'si2key' CONF_TL2_KEY = 'tl2key' CONF_SITEID = 'siteid' CONF_LINES =", "departuredata['ResponseData'] self.putCache(self._ri4datakey, departuredata) self._hass.data[DOMAIN][self._ri4datakey] = \\ now(self._hass.config.time_zone) _LOGGER.info(\"Updated cache for", "{ 'Buses': 'mdi:bus', 'Trams': 'mdi:tram', 'Ships': 'mdi:ferry', 'Metros': 'mdi:subway-variant', 'Trains':", "@property def device_state_attributes(self): \"\"\" Return the sensor attributes .\"\"\" #", "minimization if not hass.data[DOMAIN].get(self._datakey): hass.data[DOMAIN][self._datakey] = '' self.update = Throttle(interval)(self._update)", "Exception as e: _LOGGER.error(\"A error occured while retreiving \" \"cached", "linenumber \\ in self._lines: diff = self.parseDepartureTime(displaytime) if diff <", "location sensor: %s\", e.details) return except Exception as e: _LOGGER.error(\"A", "'train': 'mdi:train', 'local': 'mdi:train-variant', 'metro': 'mdi:subway-variant' } # If the", "= response['Type'] if self._type is None or type in self._type:", "self.getCache(self._si2datakey) _LOGGER.info(\"Reusing data from cache for %s...\", self._name) except Exception", "> cacheage or not self._minimization: try: departuredata = self._ri4api.request() departuredata", "def __init__(self, hass, tl2key, friendly_name, enabled_sensor, interval, type, minimization): self._tl2api", "try: departuredata = self.getCache(self._ri4datakey) _LOGGER.info(\"Reusing data from cache for %s...\",", "for configuration. DEFAULT_INTERVAL = timedelta(minutes=10) DEFAULT_TIMEWINDOW = 30 DEFAULT_DIRECTION =", "# If the sensor should return if it is updating", "= self._ri4api.request() departuredata = departuredata['ResponseData'] self.putCache(self._ri4datakey, departuredata) self._hass.data[DOMAIN][self._ri4datakey] = \\", "CONF_TIMEWINDOW = 'timewindow' CONF_SENSORPROPERTY = 'property' CONF_TRAIN_TYPE = 'train_type' CONF_TRAFFIC_CLASS", "'Nu': return 0 s = t.split() if len(s) > 1", "if sensorconf[CONF_SENSOR_TYPE] == 'departures' or \\ sensorconf[CONF_SENSOR_TYPE] == 'comb': sitekey", "STATE_OFF # Set values of the sensor. val['attribution'] = 'Stockholms", "== 'Nu': return 0 s = t.split() if len(s) >", "default=DEFAULT_TRAFFIC_CLASS): vol.All(cv.ensure_list, [vol.In(DEFAULT_TRAFFIC_CLASS)]), vol.Optional(CONF_TRAIN_TYPE, default=DEFAULT_TRAIN_TYPE): vol.In(LIST_TRAIN_TYPES) })]), }, extra=vol.ALLOW_EXTRA) def", "= self._hass.states.get(self._enabled_sensor) if self._enabled_sensor is None or sensor_state.state is STATE_ON:", "for sensorconf in config[CONF_SENSORS]: if sensorconf[CONF_SENSOR_TYPE] == 'departures' or \\", "\\ statusIcons.get(event['StatusIcon']) newdata[statustype + '_events'] = response['Events'] # Attribution and", "self._lastupdate is not '-': refresh = refresh.strftime('%Y-%m-%d %H:%M:%S') # Setup", "sensor. self.update = Throttle(interval)(self._update) @property def name(self): \"\"\"Return the name", "self._lines = lines self._siteid = siteid self._enabled_sensor = enabled_sensor self._sensorproperty", "[] self._lastupdate = '-' self._cachefile = hass.config.path(DEFAULT_CACHE_FILE) self._minimization = minimization", "= hass self._haslapi = haslapi() self._name = 'HASL Version' self._version", "diff < self._timewindow: departures.append({ 'line': linenumber, 'direction': direction, 'departure': displaytime,", "= True else: try: departuredata = self.getCache(self._ri4datakey) _LOGGER.info(\"Reusing data from", "state(self): \"\"\" Return the state of the sensor.\"\"\" return self._train_type", "departures.append({ 'line': linenumber, 'direction': direction, 'departure': displaytime, 'destination': destination, 'time':", "'PT' DEFAULT_TRAFFIC_CLASS = ['metro', 'train', 'local', 'tram', 'bus', 'fer'] DEFAULT_SENSORTYPE", "for HASL\") for sensorconf in config[CONF_SENSORS]: if sensorconf[CONF_SENSOR_TYPE] == 'departures'", "departure. if self._sensorproperty is 'min': if not self._departure_table: return '-'", "'Metros': 'mdi:subway-variant', 'Trains': 'mdi:train', } for (i, traffictype) in enumerate(['Metros',", "self._tl2api.request() apidata = apidata['ResponseData']['TrafficTypes'] self.putCache(self._datakey, apidata) self._hass.data[DOMAIN][self._datakey] = \\ now(self._hass.config.time_zone)", "= ['departures', 'status', 'trainlocation', 'comb', 'tl2'] LIST_SENSOR_PROPERTIES = ['min', 'time',", "# If the sensor should return the time at which", "'toDate': value['UpToDateTime'], 'details': value['Details'], 'sortOrder': value['SortOrder'], }) self._deviations_table = \\", "statuses = { 'EventGood': 'Good', 'EventMinor': 'Minor', 'EventMajor': 'Closed', 'EventPlanned':", "'direction' CONF_ENABLED_SENSOR = 'sensor' CONF_TIMEWINDOW = 'timewindow' CONF_SENSORPROPERTY = 'property'", "class SLVersionSensor(Entity): \"\"\"HASL Version Sensor.\"\"\" def __init__(self, hass): self._hass =", "errorOccured = True except Exception as e: _LOGGER.error(\"A error occured", "'w') jsonFile.write(json.dumps(data)) jsonFile.close() def _update(self): \"\"\"Get the departure board.\"\"\" #", "value['LineNumber'] or '' expected = value['ExpectedDateTime'] or '' groupofline =", "CONF_TL2_KEY = 'tl2key' CONF_SITEID = 'siteid' CONF_LINES = 'lines' CONF_DIRECTION", "(idx, value) in enumerate(departuredata[traffictype]): direction = value['JourneyDirection'] or 0 displaytime", "val['refresh_enabled'] = STATE_ON else: val['refresh_enabled'] = STATE_OFF # Set values", "sensor, get its value. if self._enabled_sensor is not None: sensor_state", "vol.Optional(CONF_ENABLED_SENSOR): cv.string, vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL): vol.Any(cv.time_period, cv.positive_timedelta), vol.Optional(CONF_SITEID): cv.string, vol.Optional(CONF_LINES, default=[]):", "'-' expected = self._departure_table[0]['expected'] or '-' if expected is not", "siteid, 60) self._ri4datakey = 'ri2_' + ri4key + '_' +", "the sensor attributes.\"\"\" return self._sensordata @property def state(self): \"\"\" Return", "'updated': if self._lastupdate is '-': return '-' return refresh.strftime('%Y-%m-%d %H:%M:%S')", "of the sensor.\"\"\" return self._train_type def _update(self): if self._enabled_sensor is", "Setup the unit of measure. if self._unit_of_measure is not '':", "self._name @property def icon(self): \"\"\" Return the icon for the", "for %s...\", self._name) def _update_si2(self): errorOccured = False _LOGGER.info(\"Starting to", "si2key, ri4key, siteid, lines, friendly_name, enabled_sensor, interval, direction, timewindow, sensorproperty,", "add_devices(sensors) class SLTrainLocationSensor(Entity): \"\"\"Trafic Situation Sensor.\"\"\" def __init__(self, hass, friendly_name,", "'_status'] = \\ statuses.get(response['StatusIcon']) newdata[statustype + '_status_icon'] = \\ statusIcons.get(response['StatusIcon'])", "'min') self._cachefile = hass.config.path(DEFAULT_CACHE_FILE) self._minimization = minimization if not hass.data[DOMAIN].get(self._ri4datakey):", "'EventMinor': 'Minor', 'EventMajor': 'Closed', 'EventPlanned': 'Planned', } # Icon table", "'-' return refresh.strftime('%Y-%m-%d %H:%M:%S') # Failsafe return '-' @property def", "minimization if not hass.data[DOMAIN].get(self._ri4datakey): hass.data[DOMAIN][self._ri4datakey] = '' if self._si2key: if", "do some quick and dirty conversions. \"\"\" try: if t", "PLATFORM_SCHEMA from homeassistant.const import (ATTR_FRIENDLY_NAME, CONF_SCAN_INTERVAL, CONF_SENSOR_TYPE, CONF_SENSORS, STATE_OFF, STATE_ON)", "jsonFile.write(json.dumps(data)) jsonFile.close() def _update(self): if self._enabled_sensor is not None: sensor_state", "refresh val['next_departure_minutes'] = expected_minutes val['next_departure_time'] = expected_time val['deviation_count'] = len(self._deviations_table)", "sensor: %s\", e.details) return except Exception as e: _LOGGER.error(\"A error", "= '.storage/haslcache.json' # Defining the configuration schema. PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({", "'local': 'mdi:train-variant', 'metro': 'mdi:subway-variant' } # If the same API", "{} # Format the next exptected time. if self._departure_table: expected_time", "'time': if not self._departure_table: return '-' expected = self._departure_table[0]['expected'] or", "\"\"\" Return the sensor attributes.\"\"\" return self._sensordata @property def state(self):", "import PLATFORM_SCHEMA from homeassistant.const import (ATTR_FRIENDLY_NAME, CONF_SCAN_INTERVAL, CONF_SENSOR_TYPE, CONF_SENSORS, STATE_OFF,", "if self._enabled_sensor is None or sensor_state.state \\ is STATE_ON: self._update_ri4()", "= { 'Buses': 'mdi:bus', 'Trams': 'mdi:tram', 'Ships': 'mdi:ferry', 'Metros': 'mdi:subway-variant',", "si2key + '_' + siteid self._ri4key = ri4key self._ri4api =", "groupofline = value['GroupOfLine'] or '' icon = iconswitcher.get(traffictype, 'mdi:train-car') if", "value['UpToDateTime'], 'details': value['Details'], 'sortOrder': value['SortOrder'], }) self._deviations_table = \\ sorted(deviations,", "= enabled_sensor self._type = type self._sensordata = [] self._lastupdate =", "sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLDeparturesSensor( hass, si2key, ri4key, sitekey, sensorconf.get(CONF_LINES), sensorname,", "= hass self._fpapi = fpapi() self._name = friendly_name self._interval =", "'.storage/haslcache.json' # Defining the configuration schema. PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ #", "of resulttypes and the corresponding units of measure. unit_table =", "Sensor.\"\"\" def __init__(self, hass, tl2key, friendly_name, enabled_sensor, interval, type, minimization):", "\"\"\"Trafic Situation Sensor.\"\"\" def __init__(self, hass, friendly_name, train_type, interval, enabled_sensor):", "not. if self._sensorproperty is 'refresh': if self._enabled_sensor is None or", "val['next_departure_time'] = expected_time val['deviation_count'] = len(self._deviations_table) return val def parseDepartureTime(self,", "should return minutes to next departure. if self._sensorproperty is 'min':", "= {} self.update = Throttle(interval)(self._update) @property def name(self): \"\"\"Return the", "self._hass.data[DOMAIN][self._ri4datakey] = \\ now(self._hass.config.time_zone) _LOGGER.info(\"Updated cache for %s...\", self._name) except", "friendly_name self._lines = lines self._siteid = siteid self._enabled_sensor = enabled_sensor", "'-' expected_minutes = self._departure_table[0]['time'] or '-' if expected_time is not", "return min except Exception: _LOGGER.warning(\"Failed to parse departure time (%s)", "the specified interval then use that data instead of #", "or '-' if expected is not '-': expected = \\", "= __version__ self._py_version = self._haslapi.version() @property def name(self): \"\"\"Return the", "\"updating SI2 sensor: %s\", e.details) errorOccured = True except Exception", "= True else: try: deviationdata = self.getCache(self._si2datakey) _LOGGER.info(\"Reusing data from", "'_' + siteid self._ri4key = ri4key self._ri4api = ri4api(ri4key, siteid,", "e) return else: apidata = self.getCache(self._datakey) _LOGGER.info(\"Reusing data from cache", "error occured while retreiving \" \"cached SI2 sensor: %s\", e.details)", "configuration schema. PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ # API Keys vol.Optional(CONF_RI4_KEY): cv.string,", "= ['min', 'time', 'deviations', 'refresh', 'updated'] LIST_TRAIN_TYPES = ['PT', 'RB',", "'' if self._si2key: if not hass.data[DOMAIN].get(self._si2datakey): hass.data[DOMAIN][self._si2datakey] = '' #", "cacheage = self._hass.data[DOMAIN][self._si2datakey] if not cacheage or now(self._hass.config.time_zone) \\ -", "apidata = self.getCache(self._datakey) _LOGGER.info(\"Reusing data from cache for %s...\", self._name)", "self._si2key = si2key self._si2api = si2api(si2key, siteid, '') self._si2datakey =", "number of deviations. if self._sensorproperty is 'deviations': return len(self._deviations_table) #", "CONF_SENSOR_TYPE, CONF_SENSORS, STATE_OFF, STATE_ON) from homeassistant.helpers.entity import Entity from homeassistant.helpers.event", "= self._tl2api.request() apidata = apidata['ResponseData']['TrafficTypes'] self.putCache(self._datakey, apidata) self._hass.data[DOMAIN][self._datakey] = \\", "{'hasl': self._version, 'pyHasl': self._py_version} @property def state(self): \"\"\" Return the", "hass.config.path(DEFAULT_CACHE_FILE) self._minimization = minimization if not hass.data[DOMAIN].get(self._ri4datakey): hass.data[DOMAIN][self._ri4datakey] = ''", "= expected.strftime('%H:%M:%S') return expected # If the sensor should return", "'' # Setup updating of the sensor. self.update = Throttle(interval)(self._update)", "sensor data. newdata['attribution'] = \"Stockholms Lokaltrafik\" newdata['last_updated'] = \\ self._hass.data[DOMAIN][self._datakey].strftime('%Y-%m-%d'", "try: if t == 'Nu': return 0 s = t.split()", "cache for %s...\", self._name) # Return only the relevant portion", "is None or sensor_state.state is STATE_ON: val['refresh_enabled'] = STATE_ON else:", "= {'' + key + '': value} jsonFile = open(self._cachefile,", "or sensor_state.state is STATE_ON: val['refresh_enabled'] = STATE_ON else: val['refresh_enabled'] =", "_update_si2(self): errorOccured = False _LOGGER.info(\"Starting to update SI2 for %s...\",", "= '-' self._lastupdate = '-' self._interval = interval self._unit_of_measure =", "%s\", e.details) errorOccured = True except Exception as e: _LOGGER.error(\"A", "is STATE_ON: _LOGGER.info(\"Starting to update TL2 for %s...\", self._name) #", "return except Exception as e: _LOGGER.error(\"A error occured while \"", "min < 0: min = min + 1440 return min", "values for configuration. DEFAULT_INTERVAL = timedelta(minutes=10) DEFAULT_TIMEWINDOW = 30 DEFAULT_DIRECTION", "value['Destination'] or '' linenumber = value['LineNumber'] or '' expected =", "'updated'] LIST_TRAIN_TYPES = ['PT', 'RB', 'TVB', 'SB', 'LB', 'SpvC', 'TB1',", "'destination': destination, 'time': diff, 'expected': expected, 'type': traffictype, 'groupofline': groupofline,", "self._data = {} self.update = Throttle(interval)(self._update) @property def name(self): \"\"\"Return", "return self._lastupdate def getCache(self, key): try: jsonFile = open(self._cachefile, 'r')", "'mdi:tram', 'train': 'mdi:train', 'local': 'mdi:train-variant', 'metro': 'mdi:subway-variant' } # If", "value} jsonFile = open(self._cachefile, 'w') jsonFile.write(json.dumps(data)) jsonFile.close() def _update(self): if", "'refresh': if self._enabled_sensor is None or sensor_state.state is STATE_ON: return", "not self._minimization: try: deviationdata = self._si2api.request() deviationdata = deviationdata['ResponseData'] self.putCache(self._si2datakey,", "sensor is currently updating or not. if self._enabled_sensor is not", "'TB3'] # Default values for configuration. DEFAULT_INTERVAL = timedelta(minutes=10) DEFAULT_TIMEWINDOW", "sorted(departures, key=lambda k: k['time']) _LOGGER.info(\"RI4 update completed for %s...\", self._name)", "'time': diff, 'expected': expected, 'type': traffictype, 'groupofline': groupofline, 'icon': icon,", "_LOGGER.info(\"RI4 update completed for %s...\", self._name) def _update_si2(self): errorOccured =", "not hass.data[DOMAIN].get(self._ri4datakey): hass.data[DOMAIN][self._ri4datakey] = '' if self._si2key: if not hass.data[DOMAIN].get(self._si2datakey):", "'Trains', 'Trams', 'Ships']): for (idx, value) in enumerate(departuredata[traffictype]): direction =", "Default values for configuration. DEFAULT_INTERVAL = timedelta(minutes=10) DEFAULT_TIMEWINDOW = 30", "'EventPlanned': 'Planned', } # Icon table used for HomeAssistant. statusIcons", "_LOGGER.error(\"A communication error occured while \" \"updating TL2 sensor: %s\",", "self._name) # Return only the relevant portion of the results.", "HASL_Error, HASL_API_Error, HASL_HTTP_Error) __version__ = '2.2.0' _LOGGER = logging.getLogger(__name__) DOMAIN", "hass, si2key, ri4key, sitekey, sensorconf.get(CONF_LINES), sensorname, sensorconf.get(CONF_ENABLED_SENSOR), sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_DIRECTION), sensorconf.get(CONF_TIMEWINDOW),", "apidata) self._hass.data[DOMAIN][self._datakey] = \\ now(self._hass.config.time_zone) _LOGGER.info(\"Updated cache for %s...\", self._name)", "sensorconf.get(CONF_TRAIN_TYPE) if train_type: sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLTrainLocationSensor( hass, sensorname, train_type,", "'status', 'trainlocation', 'comb', 'tl2'] LIST_SENSOR_PROPERTIES = ['min', 'time', 'deviations', 'refresh',", "self._enabled_sensor = enabled_sensor self._type = type self._sensordata = [] self._lastupdate", "None or sensor_state.state \\ is STATE_ON: self._update_ri4() if self._si2key: self._update_si2()", "occured while retreiving \" \"cached SI2 sensor: %s\", e.details) errorOccured", "or \\ sensorconf[CONF_SENSOR_TYPE] == 'comb': sitekey = sensorconf.get(CONF_SITEID) si2key =", "homeassistant.helpers.event import (async_track_point_in_utc_time, async_track_utc_time_change, track_time_interval) from homeassistant.util import Throttle from", "is None or type in self._type: statustype = ('ferry' if", "\" \"updating SI2 sensor: %s\", e.details) errorOccured = True except", "= 'si2_' + si2key + '_' + siteid self._ri4key =", "data: %s\", e) errorOccured = True if not errorOccured: departures", "return self._sensordata @property def state(self): \"\"\" Return the state of", "sensorconf[ATTR_FRIENDLY_NAME]) if sensorconf[CONF_SENSOR_TYPE] == 'status' or \\ sensorconf[CONF_SENSOR_TYPE] == 'tl2':", "\"\"\"HASL Version Sensor.\"\"\" def __init__(self, hass): self._hass = hass self._haslapi", "except Exception as e: _LOGGER.error(\"A error occured while retreiving \"", "communication error occured while \" \"updating train location sensor: %s\",", "cache for %s...\", self._name) except Exception as e: _LOGGER.error(\"A error", "'Trains': 'mdi:train', } for (i, traffictype) in enumerate(['Metros', 'Buses', 'Trains',", "_LOGGER.error(\"A error occured while\" \"updating train location sensor: %s\", e)", "train location sensor: %s\", e) return self._data = apidata _LOGGER.info(\"Update", "# Check if sensor is currently updating or not. if", "formats from the API, do some quick and dirty conversions.", "self._py_version = self._haslapi.version() @property def name(self): \"\"\"Return the name of", "= 'direction' CONF_ENABLED_SENSOR = 'sensor' CONF_TIMEWINDOW = 'timewindow' CONF_SENSORPROPERTY =", "'', 'update': '', } if si2key: self._si2key = si2key self._si2api", "cacheage or not self._minimization: try: apidata = self._tl2api.request() apidata =", "cacheage or not self._minimization: try: deviationdata = self._si2api.request() deviationdata =", "expected_time = '-' expected_minutes = '-' # Format the last", "sensorconf[CONF_SENSOR_TYPE] == 'tl2': tl2key = config.get(CONF_TL2_KEY) if tl2key: sensorname =", "board.\"\"\" # If using external sensor, get its value. if", "Lokaltrafik\" newdata['last_updated'] = \\ self._hass.data[DOMAIN][self._datakey].strftime('%Y-%m-%d' + '%H:%M:%S') self._sensordata = newdata", "= fpapi() self._name = friendly_name self._interval = interval self._enabled_sensor =", "error occured while \" \"updating SI2 sensor: %s\", e) errorOccured", "not hass.data[DOMAIN].get(self._datakey): hass.data[DOMAIN][self._datakey] = '' self.update = Throttle(interval)(self._update) @property def", "schema. PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ # API Keys vol.Optional(CONF_RI4_KEY): cv.string, vol.Optional(CONF_SI2_KEY):", "+ '_' + siteid self._hass = hass self._name = friendly_name", "Return the sensor attributes.\"\"\" return self._sensordata @property def state(self): \"\"\"", "(%s) \", t) return 0 def getCache(self, key): try: jsonFile", "in apidata: type = response['Type'] if self._type is None or", "ri4api(ri4key, siteid, 60) self._ri4datakey = 'ri2_' + ri4key + '_'", "= t.split() if len(s) > 1 and s[1] == 'min':", "errorOccured = True if not errorOccured: deviations = [] for", "sitekey, sensorconf.get(CONF_LINES), sensorname, sensorconf.get(CONF_ENABLED_SENSOR), sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_DIRECTION), sensorconf.get(CONF_TIMEWINDOW), sensorconf.get(CONF_SENSORPROPERTY), config.get(CONF_USE_MINIMIZATION) ))", "self._deviations_table = \\ sorted(deviations, key=lambda k: k['sortOrder']) _LOGGER.info(\"SI2 update completed", "= 'lines' CONF_DIRECTION = 'direction' CONF_ENABLED_SENSOR = 'sensor' CONF_TIMEWINDOW =", "_LOGGER.error(\"A communication error occured while \" \"updating SI2 sensor: %s\",", "enabled_sensor): self._hass = hass self._fpapi = fpapi() self._name = friendly_name", "newdata['attribution'] = \"Stockholms Lokaltrafik\" newdata['last_updated'] = \\ self._hass.data[DOMAIN][self._datakey].strftime('%Y-%m-%d' + '%H:%M:%S')", "val def parseDepartureTime(self, t): \"\"\" weird time formats from the", "\\ now(self._hass.config.time_zone) _LOGGER.info('Updated cache for %s...', self._name) except HASL_Error as", "updating or not. if self._enabled_sensor is not None: sensor_state =", "self._name = friendly_name self._enabled_sensor = enabled_sensor self._type = type self._sensordata", "quick and dirty conversions. \"\"\" try: if t == 'Nu':", "self._enabled_sensor = enabled_sensor self._train_type = train_type self._data = {} self.update", "data from cache for %s...\", self._name) # Return only the", "= self.getCache(self._ri4datakey) _LOGGER.info(\"Reusing data from cache for %s...\", self._name) except", "missing train_type attribute\", sensorconf[ATTR_FRIENDLY_NAME]) add_devices(sensors) class SLTrainLocationSensor(Entity): \"\"\"Trafic Situation Sensor.\"\"\"", "s[1] == 'min': return int(s[0]) s = t.split(':') if len(s)", "= self._hass.data[DOMAIN][self._si2datakey] if not cacheage or now(self._hass.config.time_zone) \\ - self._interval", "setup_platform(hass, config, add_devices, discovery_info=None): \"\"\"Setup the sensors.\"\"\" if not hass.data.get(DOMAIN):", "# Format the last refresh time. refresh = self._lastupdate if", "SLVersionSensor(Entity): \"\"\"HASL Version Sensor.\"\"\" def __init__(self, hass): self._hass = hass", "icon(self): \"\"\" Return the icon for the frontend.\"\"\" return None", "used in the configuration. CONF_RI4_KEY = 'ri4key' CONF_SI2_KEY = 'si2key'", "self._train_type def _update(self): if self._enabled_sensor is not None: sensor_state =", "len(self._deviations_table) return val def parseDepartureTime(self, t): \"\"\" weird time formats", "= friendly_name self._enabled_sensor = enabled_sensor self._type = type self._sensordata =", "if not hass.data[DOMAIN].get(self._ri4datakey): hass.data[DOMAIN][self._ri4datakey] = '' if self._si2key: if not", "then proceed. if self._enabled_sensor is None or sensor_state.state \\ is", "'property' CONF_TRAIN_TYPE = 'train_type' CONF_TRAFFIC_CLASS = 'traffic_class' CONF_VERSION = 'version_sensor'", "except: return {} def putCache(self, key, value): try: jsonFile =", "data instead of # requesting it again and spare some", "'Buses', 'Trains', 'Trams', 'Ships']): for (idx, value) in enumerate(departuredata[traffictype]): direction", "now(self._hass.config.time_zone) def _update_ri4(self): errorOccured = False _LOGGER.info(\"Starting to update RI4", "self._direction = direction self._timewindow = timewindow self._nextdeparture_minutes = '0' self._nextdeparture_expected", "import Entity from homeassistant.helpers.event import (async_track_point_in_utc_time, async_track_utc_time_change, track_time_interval) from homeassistant.util", "= min + 1440 return min except Exception: _LOGGER.warning(\"Failed to", "self._interval > cacheage or not self._minimization: try: departuredata = self._ri4api.request()", "'train', 'local', 'tram', 'bus', 'fer'] DEFAULT_SENSORTYPE = 'departures' DEFAULT_CACHE_FILE =", "using external sensor, get its value. if self._enabled_sensor is not", "deviationdata) self._hass.data[DOMAIN][self._si2datakey] = \\ now(self._hass.config.time_zone) _LOGGER.info('Updated cache for %s...', self._name)", "time. refresh = self._lastupdate if self._lastupdate is not '-': refresh", "Return the state of the sensor.\"\"\" return self._lastupdate def getCache(self,", "vol.Any(cv.time_period, cv.positive_timedelta), vol.Optional(CONF_SITEID): cv.string, vol.Optional(CONF_LINES, default=[]): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_DIRECTION, default=DEFAULT_DIRECTION):", "DEFAULT_TIMEWINDOW = 30 DEFAULT_DIRECTION = 0 DEFAULT_SENSORPROPERTY = 'min' DEFAULT_TRAIN_TYPE", "= sensorconf.get(CONF_TRAIN_TYPE) if train_type: sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLTrainLocationSensor( hass, sensorname,", "None or sensor_state.state is STATE_ON: val['refresh_enabled'] = STATE_ON else: val['refresh_enabled']", "\"\"\" Return the icon for the frontend.\"\"\" return None @property", "'mdi:check', 'EventMinor': 'mdi:clock-alert-outline', 'EventMajor': 'mdi:close', 'EventPlanned': 'mdi:triangle-outline' } trafficTypeIcons =", "frontend.\"\"\" return 'mdi:train-car' @property def device_state_attributes(self): \"\"\" Return the sensor", "sensor: %s\", e.details) errorOccured = True except Exception as e:", "expected = self._departure_table[0]['expected'] or '-' if expected is not '-':", "def __init__(self, hass): self._hass = hass self._haslapi = haslapi() self._name", "the sensor attributes.\"\"\" return {'hasl': self._version, 'pyHasl': self._py_version} @property def", "errorOccured = True except Exception as e: _LOGGER.error(\"A communication error", "%s...\", self._name) except Exception as e: _LOGGER.error(\"A error occured while", "STATE_ON: _LOGGER.info(\"Starting to update TL2 for %s...\", self._name) # Object", "is 'time': if not self._departure_table: return '-' expected = self._departure_table[0]['expected']", "= 30 DEFAULT_DIRECTION = 0 DEFAULT_SENSORPROPERTY = 'min' DEFAULT_TRAIN_TYPE =", "expected_minutes = self._departure_table[0]['time'] or '-' if expected_time is not '-':", "traffictype, 'groupofline': groupofline, 'icon': icon, }) self._departure_table = sorted(departures, key=lambda", "create our object. newdata = {} # Use some nice", "from homeassistant.helpers.event import (async_track_point_in_utc_time, async_track_utc_time_change, track_time_interval) from homeassistant.util import Throttle", "Exception as e: _LOGGER.error(\"A error occured while\" \"updating train location", "return {'type': self._train_type, 'data': json.dumps(self._data)} @property def state(self): \"\"\" Return", "})]), }, extra=vol.ALLOW_EXTRA) def setup_platform(hass, config, add_devices, discovery_info=None): \"\"\"Setup the", "t.split(':') if len(s) > 1: rightnow = now(self._hass.config.time_zone) min =", "k['time']) _LOGGER.info(\"RI4 update completed for %s...\", self._name) def _update_si2(self): errorOccured", "self._hass = hass self._name = friendly_name self._enabled_sensor = enabled_sensor self._type", "TL2 sensor: %s\", e.details) return except Exception as e: _LOGGER.error(\"A", "# Setup updating of the sensor. self.update = Throttle(interval)(self._update) @property", "ri4key self._ri4api = ri4api(ri4key, siteid, 60) self._ri4datakey = 'ri2_' +", "vol.Optional(CONF_LINES, default=[]): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_DIRECTION, default=DEFAULT_DIRECTION): vol.All(vol.Coerce(int), vol.Range(min=0, max=2)), vol.Optional(CONF_TIMEWINDOW,", "vol.Optional(CONF_SI2_KEY): cv.string, vol.Optional(CONF_TL2_KEY): cv.string, vol.Optional(CONF_VERSION, default=False): cv.boolean, vol.Optional(CONF_USE_MINIMIZATION, default=True): cv.boolean,", "train_type attribute\", sensorconf[ATTR_FRIENDLY_NAME]) add_devices(sensors) class SLTrainLocationSensor(Entity): \"\"\"Trafic Situation Sensor.\"\"\" def", "icon = iconswitcher.get(traffictype, 'mdi:train-car') if int(self._direction) == 0 or int(direction)", "import (ATTR_FRIENDLY_NAME, CONF_SCAN_INTERVAL, CONF_SENSOR_TYPE, CONF_SENSORS, STATE_OFF, STATE_ON) from homeassistant.helpers.entity import", "return {} def putCache(self, key, value): try: jsonFile = open(self._cachefile,", "updating of the sensor. self.update = Throttle(interval)(self._update) @property def name(self):", "should return the time at which next departure occurs. if", "statuses.get(event['StatusIcon']) event['StatusIcon'] = \\ statusIcons.get(event['StatusIcon']) newdata[statustype + '_events'] = response['Events']", "cv.boolean, vol.Required(CONF_SENSORS, default=[]): vol.All(cv.ensure_list, [vol.All({ vol.Required(ATTR_FRIENDLY_NAME): cv.string, vol.Required(CONF_SENSOR_TYPE, default=DEFAULT_SENSORTYPE): vol.In(LIST_SENSOR_TYPES),", "for %s...\", self._name) cacheage = self._hass.data[DOMAIN][self._si2datakey] if not cacheage or", "occured while \" \"updating RI4 API: %s\", e) errorOccured =", "import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const", "'SpvC', 'TB1', 'TB2', 'TB3'] # Default values for configuration. DEFAULT_INTERVAL", "return '-' @property def device_state_attributes(self): \"\"\" Return the sensor attributes", "t.split() if len(s) > 1 and s[1] == 'min': return", "train location sensor: %s\", e.details) return except Exception as e:", "'trainlocation', 'comb', 'tl2'] LIST_SENSOR_PROPERTIES = ['min', 'time', 'deviations', 'refresh', 'updated']", "'mdi:train-variant', 'metro': 'mdi:subway-variant' } # If the same API have", "['min', 'time', 'deviations', 'refresh', 'updated'] LIST_TRAIN_TYPES = ['PT', 'RB', 'TVB',", "'comb', 'tl2'] LIST_SENSOR_PROPERTIES = ['min', 'time', 'deviations', 'refresh', 'updated'] LIST_TRAIN_TYPES", "minimization): \"\"\"Initialize\"\"\" # The table of resulttypes and the corresponding", "departures = [] iconswitcher = { 'Buses': 'mdi:bus', 'Trams': 'mdi:tram',", "if self._enabled_sensor is not None: sensor_state = self._hass.states.get(self._enabled_sensor) if self._enabled_sensor", "self._sensorproperty is 'updated': if self._lastupdate is '-': return '-' return", "if self._enabled_sensor is not None: sensor_state = self._hass.states.get(self._enabled_sensor) # If", "sensorconf[CONF_SENSOR_TYPE] == 'trainlocation': train_type = sensorconf.get(CONF_TRAIN_TYPE) if train_type: sensorname =", "while \" \"updating RI4 API: %s\", e) errorOccured = True", "iconswitcher.get(traffictype, 'mdi:train-car') if int(self._direction) == 0 or int(direction) \\ ==", "def icon(self): \"\"\" Return the icon for the frontend.\"\"\" if", "t) return 0 def getCache(self, key): try: jsonFile = open(self._cachefile,", "= { 'EventGood': 'mdi:check', 'EventMinor': 'mdi:clock-alert-outline', 'EventMajor': 'mdi:close', 'EventPlanned': 'mdi:triangle-outline'", "+ si2key + '_' + siteid self._ri4key = ri4key self._ri4api", "> 1: rightnow = now(self._hass.config.time_zone) min = int(s[0]) * 60", "= '' # Setup updating of the sensor. self.update =", "\"\"\" # If the sensor should return minutes to next", "sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLStatusSensor( hass, tl2key, sensorname, sensorconf.get(CONF_ENABLED_SENSOR), sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_TRAFFIC_CLASS),", "if min < 0: min = min + 1440 return", "errorOccured = True else: try: deviationdata = self.getCache(self._si2datakey) _LOGGER.info(\"Reusing data", "train_type = sensorconf.get(CONF_TRAIN_TYPE) if train_type: sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLTrainLocationSensor( hass,", "of the sensor.\"\"\" return self._name @property def icon(self): \"\"\" Return", "\\ now(self._hass.config.time_zone) _LOGGER.info(\"Updated cache for %s...\", self._name) except HASL_Error as", "Return number of minutes to the next departure \"\"\" #", "value['Updated'], 'title': value['Header'], 'fromDate': value['FromDateTime'], 'toDate': value['UpToDateTime'], 'details': value['Details'], 'sortOrder':", "or now(self._hass.config.time_zone) \\ - self._interval > cacheage or not self._minimization:", "s = t.split() if len(s) > 1 and s[1] ==", "self._si2key: if not hass.data[DOMAIN].get(self._si2datakey): hass.data[DOMAIN][self._si2datakey] = '' # Setup updating", "STATE_ON: try: apidata = self._fpapi.request(self._train_type) except HASL_Error as e: _LOGGER.error(\"A", "cacheage = self._hass.data[DOMAIN][self._ri4datakey] if not cacheage or now(self._hass.config.time_zone) \\ -", "- self._interval > cacheage or not self._minimization: try: departuredata =", "in enumerate(departuredata[traffictype]): direction = value['JourneyDirection'] or 0 displaytime = value['DisplayTime']", "'deviations', 'refresh', 'updated'] LIST_TRAIN_TYPES = ['PT', 'RB', 'TVB', 'SB', 'LB',", "}, extra=vol.ALLOW_EXTRA) def setup_platform(hass, config, add_devices, discovery_info=None): \"\"\"Setup the sensors.\"\"\"", "if diff < self._timewindow: departures.append({ 'line': linenumber, 'direction': direction, 'departure':", "error occured while \" \"updating TL4 API: %s\", e) return", "'_icon'] = \\ trafficTypeIcons.get(statustype) for event in response['Events']: event['Status'] =", "if self._sensorproperty is 'min': if not self._departure_table: return '-' return", "else: val['refresh_enabled'] = STATE_OFF # Set values of the sensor.", "'mdi:tram', 'Ships': 'mdi:ferry', 'Metros': 'mdi:subway-variant', 'Trains': 'mdi:train', } for (i,", "time formats from the API, do some quick and dirty", "'_events'] = response['Events'] # Attribution and update sensor data. newdata['attribution']", "= lines self._siteid = siteid self._enabled_sensor = enabled_sensor self._sensorproperty =", "('ferry' if type == 'fer' else type) newdata[statustype + '_status']", "True except Exception as e: _LOGGER.error(\"A communication error occured while", "friendly_name, train_type, interval, enabled_sensor): self._hass = hass self._fpapi = fpapi()", "si2api, HASL_Error, HASL_API_Error, HASL_HTTP_Error) __version__ = '2.2.0' _LOGGER = logging.getLogger(__name__)", "the name of the sensor.\"\"\" return self._name @property def icon(self):", "for the statuses etc. statuses = { 'EventGood': 'Good', 'EventMinor':", "'mdi:bus', 'tram': 'mdi:tram', 'train': 'mdi:train', 'local': 'mdi:train-variant', 'metro': 'mdi:subway-variant' }", "sensor_state.state is STATE_ON: return STATE_ON return STATE_OFF if self._sensorproperty is", "for the frontend.\"\"\" return 'mdi:train-car' @property def device_state_attributes(self): \"\"\" Return", "if self._unit_of_measure is not '': val['unit_of_measurement'] = self._unit_of_measure # Check", "== 'tl2': tl2key = config.get(CONF_TL2_KEY) if tl2key: sensorname = sensorconf[ATTR_FRIENDLY_NAME]", "vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import (ATTR_FRIENDLY_NAME, CONF_SCAN_INTERVAL,", "\\ trafficTypeIcons.get(statustype) for event in response['Events']: event['Status'] = statuses.get(event['StatusIcon']) event['StatusIcon']", "the same API have already made the request in within", "self._enabled_sensor is None or sensor_state.state is STATE_ON: try: apidata =", "'Ships']): for (idx, value) in enumerate(departuredata[traffictype]): direction = value['JourneyDirection'] or", "= tl2api(tl2key) self._datakey = 'tl2_' + tl2key self._interval = interval", "'mdi:train-car') if int(self._direction) == 0 or int(direction) \\ == int(self._direction):", "@property def device_state_attributes(self): \"\"\" Return the sensor attributes.\"\"\" return {'type':", "'timewindow' CONF_SENSORPROPERTY = 'property' CONF_TRAIN_TYPE = 'train_type' CONF_TRAFFIC_CLASS = 'traffic_class'", "value['JourneyDirection'] or 0 displaytime = value['DisplayTime'] or '' destination =", "'mdi:train-car' @property def device_state_attributes(self): \"\"\" Return the sensor attributes.\"\"\" return", "time at which next departure occurs. if self._sensorproperty is 'time':", "= now(self._hass.config.time_zone) def _update_ri4(self): errorOccured = False _LOGGER.info(\"Starting to update", "CONF_ENABLED_SENSOR = 'sensor' CONF_TIMEWINDOW = 'timewindow' CONF_SENSORPROPERTY = 'property' CONF_TRAIN_TYPE", "sensors.append(SLVersionSensor(hass)) _LOGGER.info(\"Created version sensor for HASL\") for sensorconf in config[CONF_SENSORS]:", "should return the number of deviations. if self._sensorproperty is 'deviations':", "self._ri4datakey = 'ri2_' + ri4key + '_' + siteid self._hass", "= len(self._deviations_table) return val def parseDepartureTime(self, t): \"\"\" weird time", "values of the sensor. val['attribution'] = 'Stockholms Lokaltrafik' val['departures'] =", "self._version + \"/\" + self._py_version class SLStatusSensor(Entity): \"\"\"Trafic Situation Sensor.\"\"\"", "departure \"\"\" # If the sensor should return minutes to", "self._version = __version__ self._py_version = self._haslapi.version() @property def name(self): \"\"\"Return", "self._departure_table = [] self._deviations_table = [] self._direction = direction self._timewindow", "sensorconf[CONF_SENSOR_TYPE] == 'comb': sitekey = sensorconf.get(CONF_SITEID) si2key = config.get(CONF_SI2_KEY) ri4key", "= self._haslapi.version() @property def name(self): \"\"\"Return the name of the", "to update TL2 for %s...\", self._name) # Object used to", "HASL_Error as e: _LOGGER.error(\"A communication error occured while \" \"updating", "if tl2key: sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLStatusSensor( hass, tl2key, sensorname, sensorconf.get(CONF_ENABLED_SENSOR),", "for event in response['Events']: event['Status'] = statuses.get(event['StatusIcon']) event['StatusIcon'] = \\", "self._haslapi.version() @property def name(self): \"\"\"Return the name of the sensor.\"\"\"", "response in apidata: type = response['Type'] if self._type is None", "'tl2': tl2key = config.get(CONF_TL2_KEY) if tl2key: sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLStatusSensor(", "vol.In(LIST_SENSOR_PROPERTIES), vol.Optional(CONF_TRAFFIC_CLASS, default=DEFAULT_TRAFFIC_CLASS): vol.All(cv.ensure_list, [vol.In(DEFAULT_TRAFFIC_CLASS)]), vol.Optional(CONF_TRAIN_TYPE, default=DEFAULT_TRAIN_TYPE): vol.In(LIST_TRAIN_TYPES) })]), },", "API: %s\", e) errorOccured = True else: try: departuredata =", "= hass self._name = friendly_name self._enabled_sensor = enabled_sensor self._type =", "config.get(CONF_TL2_KEY) if tl2key: sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLStatusSensor( hass, tl2key, sensorname,", "vol.In(LIST_SENSOR_TYPES), vol.Optional(CONF_ENABLED_SENSOR): cv.string, vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL): vol.Any(cv.time_period, cv.positive_timedelta), vol.Optional(CONF_SITEID): cv.string, vol.Optional(CONF_LINES,", "error occured while \" \"updating TL2 sensor: %s\", e.details) return", "self._interval = interval self._hass = hass self._name = friendly_name self._enabled_sensor", "= '' self.update = Throttle(interval)(self._update) @property def name(self): \"\"\"Return the", "60 + int(s[1]) - (rightnow.hour * 60 + rightnow.minute) if", "state of the sensor.\"\"\" return self._lastupdate def getCache(self, key): try:", "sensor attributes.\"\"\" return self._sensordata @property def state(self): \"\"\" Return the", "= \\ self._hass.data[DOMAIN][self._datakey].strftime('%Y-%m-%d' + '%H:%M:%S') self._sensordata = newdata self._lastupdate =", "'bus': 'mdi:bus', 'tram': 'mdi:tram', 'train': 'mdi:train', 'local': 'mdi:train-variant', 'metro': 'mdi:subway-variant'", "Sensor.\"\"\" def __init__(self, hass): self._hass = hass self._haslapi = haslapi()", "'r') data = json.load(jsonFile) jsonFile.close() data[key] = value except: data", "and spare some innocent credits from dying. cacheage = self._hass.data[DOMAIN][self._datakey]", "update sensor data. newdata['attribution'] = \"Stockholms Lokaltrafik\" newdata['last_updated'] = \\", "self._interval = interval self._unit_of_measure = unit_table.get(self._sensorproperty, 'min') self._cachefile = hass.config.path(DEFAULT_CACHE_FILE)", "the sensor.\"\"\" return self._train_type def _update(self): if self._enabled_sensor is not", "(idx, value) in enumerate(deviationdata): deviations.append({ 'updated': value['Updated'], 'title': value['Header'], 'fromDate':", "sensorconf.get(CONF_ENABLED_SENSOR), )) _LOGGER.info(\"Created train sensor %s...\", sensorname) else: _LOGGER.error(\"Sensor %s", "e: _LOGGER.error(\"A error occured while retreiving \" \"cached RI4 sensor", "sensorproperty, minimization): \"\"\"Initialize\"\"\" # The table of resulttypes and the", "si2key = config.get(CONF_SI2_KEY) ri4key = config.get(CONF_RI4_KEY) if sitekey and ri4key:", "%s...\", sensorname) else: _LOGGER.error(\"Sensor %s is missing site, si2key or", "If the sensor should return the number of deviations. if", "return {'hasl': self._version, 'pyHasl': self._py_version} @property def state(self): \"\"\" Return", "expected # If the sensor should return the number of", "%s\", e) return self._data = apidata _LOGGER.info(\"Update completed %s...\", self._name)", "sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLStatusSensor( hass, tl2key, sensorname, sensorconf.get(CONF_ENABLED_SENSOR), sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_TRAFFIC_CLASS), config.get(CONF_USE_MINIMIZATION) ))", "self._enabled_sensor is None or sensor_state.state \\ is STATE_ON: self._update_ri4() if", "default=DEFAULT_SENSORPROPERTY): vol.In(LIST_SENSOR_PROPERTIES), vol.Optional(CONF_TRAFFIC_CLASS, default=DEFAULT_TRAFFIC_CLASS): vol.All(cv.ensure_list, [vol.In(DEFAULT_TRAFFIC_CLASS)]), vol.Optional(CONF_TRAIN_TYPE, default=DEFAULT_TRAIN_TYPE): vol.In(LIST_TRAIN_TYPES) })]),", "refresh = self._lastupdate if self._lastupdate is not '-': refresh =", "device_state_attributes(self): \"\"\" Return the sensor attributes.\"\"\" return {'type': self._train_type, 'data':", "> cacheage or not self._minimization: try: apidata = self._tl2api.request() apidata", "STATE_ON: self._update_ri4() if self._si2key: self._update_si2() self._lastupdate = now(self._hass.config.time_zone) def _update_ri4(self):", "sensor: %s\", e) errorOccured = True else: try: deviationdata =", "len(s) > 1 and s[1] == 'min': return int(s[0]) s", "Object used to create our object. newdata = {} #", "if t == 'Nu': return 0 s = t.split() if", "If the sensor should return the time at which next", "statusIcons = { 'EventGood': 'mdi:check', 'EventMinor': 'mdi:clock-alert-outline', 'EventMajor': 'mdi:close', 'EventPlanned':", "expected_time val['deviation_count'] = len(self._deviations_table) return val def parseDepartureTime(self, t): \"\"\"", "default=[]): vol.All(cv.ensure_list, [vol.All({ vol.Required(ATTR_FRIENDLY_NAME): cv.string, vol.Required(CONF_SENSOR_TYPE, default=DEFAULT_SENSORTYPE): vol.In(LIST_SENSOR_TYPES), vol.Optional(CONF_ENABLED_SENSOR): cv.string,", "val['attribution'] = 'Stockholms Lokaltrafik' val['departures'] = self._departure_table val['deviations'] = self._deviations_table", "= 'train_type' CONF_TRAFFIC_CLASS = 'traffic_class' CONF_VERSION = 'version_sensor' CONF_USE_MINIMIZATION =", "self._type is None or type in self._type: statustype = ('ferry'", "etc. statuses = { 'EventGood': 'Good', 'EventMinor': 'Minor', 'EventMajor': 'Closed',", "class SLStatusSensor(Entity): \"\"\"Trafic Situation Sensor.\"\"\" def __init__(self, hass, tl2key, friendly_name,", "'time', 'deviations', 'refresh', 'updated'] LIST_TRAIN_TYPES = ['PT', 'RB', 'TVB', 'SB',", "= '-' # Format the last refresh time. refresh =", "= int(s[0]) * 60 + int(s[1]) - (rightnow.hour * 60", "The table of resulttypes and the corresponding units of measure.", "of the sensor.\"\"\" return self._lastupdate def getCache(self, key): try: jsonFile", "_LOGGER.info(\"Update completed %s...\", self._name) class SLVersionSensor(Entity): \"\"\"HASL Version Sensor.\"\"\" def", "vol.Optional(CONF_SITEID): cv.string, vol.Optional(CONF_LINES, default=[]): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_DIRECTION, default=DEFAULT_DIRECTION): vol.All(vol.Coerce(int), vol.Range(min=0,", "= ri4api(ri4key, siteid, 60) self._ri4datakey = 'ri2_' + ri4key +", "= apidata _LOGGER.info(\"Update completed %s...\", self._name) class SLVersionSensor(Entity): \"\"\"HASL Version", "json.dumps(self._data)} @property def state(self): \"\"\" Return the state of the", "departuredata = self._ri4api.request() departuredata = departuredata['ResponseData'] self.putCache(self._ri4datakey, departuredata) self._hass.data[DOMAIN][self._ri4datakey] =", "self._hass.data[DOMAIN][self._si2datakey] = \\ now(self._hass.config.time_zone) _LOGGER.info('Updated cache for %s...', self._name) except", "\"updating TL4 API: %s\", e) return else: apidata = self.getCache(self._datakey)", "-*- coding: utf-8 -*- \"\"\"Simple service for SL (Storstockholms Lokaltrafik).\"\"\"", "json.load(jsonFile) jsonFile.close() data[key] = value except: data = {'' +", "\"updating TL2 sensor: %s\", e.details) return except Exception as e:", "_LOGGER.info(\"Reusing data from cache for %s...\", self._name) # Return only", "next departure. if self._sensorproperty is 'min': if not self._departure_table: return", "'trainlocation': train_type = sensorconf.get(CONF_TRAIN_TYPE) if train_type: sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLTrainLocationSensor(", "tl2key self._interval = interval self._hass = hass self._name = friendly_name", "= '-' self._cachefile = hass.config.path(DEFAULT_CACHE_FILE) self._minimization = minimization if not", "ri4key, siteid, lines, friendly_name, enabled_sensor, interval, direction, timewindow, sensorproperty, minimization):", "val['unit_of_measurement'] = self._unit_of_measure # Check if sensor is currently updating", "sensorname, sensorconf.get(CONF_ENABLED_SENSOR), sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_DIRECTION), sensorconf.get(CONF_TIMEWINDOW), sensorconf.get(CONF_SENSORPROPERTY), config.get(CONF_USE_MINIMIZATION) )) _LOGGER.info(\"Created departures", "e: _LOGGER.error(\"A communication error occured while \" \"updating train location", "val['deviations'] = self._deviations_table val['last_refresh'] = refresh val['next_departure_minutes'] = expected_minutes val['next_departure_time']", "or linenumber \\ in self._lines: diff = self.parseDepartureTime(displaytime) if diff", "[vol.All({ vol.Required(ATTR_FRIENDLY_NAME): cv.string, vol.Required(CONF_SENSOR_TYPE, default=DEFAULT_SENSORTYPE): vol.In(LIST_SENSOR_TYPES), vol.Optional(CONF_ENABLED_SENSOR): cv.string, vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL):", "'mdi:train', } for (i, traffictype) in enumerate(['Metros', 'Buses', 'Trains', 'Trams',", "vol.Optional(CONF_VERSION, default=False): cv.boolean, vol.Optional(CONF_USE_MINIMIZATION, default=True): cv.boolean, vol.Required(CONF_SENSORS, default=[]): vol.All(cv.ensure_list, [vol.All({", "\"\"\"Trafic Situation Sensor.\"\"\" def __init__(self, hass, tl2key, friendly_name, enabled_sensor, interval,", "sitekey and ri4key: sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLDeparturesSensor( hass, si2key, ri4key,", "while\" \"updating train location sensor: %s\", e) return self._data =", "not '-': expected = \\ datetime.datetime.strptime(self._nextdeparture_expected, '%Y-%m-%dT%H:%M:%S') expected = expected.strftime('%H:%M:%S')", "60 + rightnow.minute) if min < 0: min = min", "in self._lines: diff = self.parseDepartureTime(displaytime) if diff < self._timewindow: departures.append({", "= PLATFORM_SCHEMA.extend({ # API Keys vol.Optional(CONF_RI4_KEY): cv.string, vol.Optional(CONF_SI2_KEY): cv.string, vol.Optional(CONF_TL2_KEY):", "CONF_SCAN_INTERVAL, CONF_SENSOR_TYPE, CONF_SENSORS, STATE_OFF, STATE_ON) from homeassistant.helpers.entity import Entity from", "displaytime, 'destination': destination, 'time': diff, 'expected': expected, 'type': traffictype, 'groupofline':", "if sensorconf[CONF_SENSOR_TYPE] == 'status' or \\ sensorconf[CONF_SENSOR_TYPE] == 'tl2': tl2key", "or sensor_state.state is STATE_ON: try: apidata = self._fpapi.request(self._train_type) except HASL_Error", "cv.string, vol.Optional(CONF_VERSION, default=False): cv.boolean, vol.Optional(CONF_USE_MINIMIZATION, default=True): cv.boolean, vol.Required(CONF_SENSORS, default=[]): vol.All(cv.ensure_list,", "response['Type'] if self._type is None or type in self._type: statustype", "deviations. if self._sensorproperty is 'deviations': return len(self._deviations_table) # If the", "deviations = [] for (idx, value) in enumerate(deviationdata): deviations.append({ 'updated':", "or type in self._type: statustype = ('ferry' if type ==", "= \\ trafficTypeIcons.get(statustype) for event in response['Events']: event['Status'] = statuses.get(event['StatusIcon'])", "the sensor.\"\"\" return self._name @property def icon(self): \"\"\" Return the", "= enabled_sensor self._sensorproperty = sensorproperty self._departure_table = [] self._deviations_table =", "self._timewindow: departures.append({ 'line': linenumber, 'direction': direction, 'departure': displaytime, 'destination': destination,", "'' self.update = Throttle(interval)(self._update) @property def name(self): \"\"\"Return the name", "cv import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from", "it again and spare some innocent credits from dying. cacheage", "Version Sensor.\"\"\" def __init__(self, hass): self._hass = hass self._haslapi =", "self._lastupdate = newdata['last_updated'] _LOGGER.info(\"TL2 update completed for %s...\", self._name) class", "expected_minutes = '-' # Format the last refresh time. refresh", "if len(s) > 1 and s[1] == 'min': return int(s[0])", "icon(self): \"\"\" Return the icon for the frontend.\"\"\" if self._deviations_table:", "minimization): self._tl2api = tl2api(tl2key) self._datakey = 'tl2_' + tl2key self._interval", "_LOGGER.info(\"Created status sensor %s...\", sensorname) else: _LOGGER.error(\"Sensor %s is missing", "\\ sensorconf[CONF_SENSOR_TYPE] == 'tl2': tl2key = config.get(CONF_TL2_KEY) if tl2key: sensorname", "direction, 'departure': displaytime, 'destination': destination, 'time': diff, 'expected': expected, 'type':", "_LOGGER.info(\"Updated cache for %s...\", self._name) except HASL_Error as e: _LOGGER.error(\"A", "val = {} # Format the next exptected time. if", "sensor should return the time at which next departure occurs.", "'tram': 'mdi:tram', 'train': 'mdi:train', 'local': 'mdi:train-variant', 'metro': 'mdi:subway-variant' } #", "timedelta import homeassistant.helpers.config_validation as cv import voluptuous as vol from", "= open(self._cachefile, 'r') data = json.load(jsonFile) jsonFile.close() data[key] = value", "\\ statusIcons.get(response['StatusIcon']) newdata[statustype + '_icon'] = \\ trafficTypeIcons.get(statustype) for event", "statustype = ('ferry' if type == 'fer' else type) newdata[statustype", "'mdi:train', 'local': 'mdi:train-variant', 'metro': 'mdi:subway-variant' } # If the same", "= 'tl2_' + tl2key self._interval = interval self._hass = hass", "next departure \"\"\" # If the sensor should return minutes", "is currently updating or not. if self._enabled_sensor is not None:", "= si2api(si2key, siteid, '') self._si2datakey = 'si2_' + si2key +", "class SLDeparturesSensor(Entity): \"\"\"Departure board for one SL site.\"\"\" def __init__(self,", "is missing site, si2key or ri4key\", sensorconf[ATTR_FRIENDLY_NAME]) if sensorconf[CONF_SENSOR_TYPE] ==", "= STATE_OFF # Set values of the sensor. val['attribution'] =", "\"cached RI4 sensor data: %s\", e) errorOccured = True if", "newdata[statustype + '_status'] = \\ statuses.get(response['StatusIcon']) newdata[statustype + '_status_icon'] =", "+ key + '': value} jsonFile = open(self._cachefile, 'w') jsonFile.write(json.dumps(data))", "data = {'' + key + '': value} jsonFile =", "'lines' CONF_DIRECTION = 'direction' CONF_ENABLED_SENSOR = 'sensor' CONF_TIMEWINDOW = 'timewindow'", "(ATTR_FRIENDLY_NAME, CONF_SCAN_INTERVAL, CONF_SENSOR_TYPE, CONF_SENSORS, STATE_OFF, STATE_ON) from homeassistant.helpers.entity import Entity", "True if not errorOccured: deviations = [] for (idx, value)", "CONF_TRAFFIC_CLASS = 'traffic_class' CONF_VERSION = 'version_sensor' CONF_USE_MINIMIZATION = 'api_minimization' LIST_SENSOR_TYPES", "self._interval > cacheage or not self._minimization: try: apidata = self._tl2api.request()", "newdata[statustype + '_icon'] = \\ trafficTypeIcons.get(statustype) for event in response['Events']:", "self._ri4api = ri4api(ri4key, siteid, 60) self._ri4datakey = 'ri2_' + ri4key", "0 DEFAULT_SENSORPROPERTY = 'min' DEFAULT_TRAIN_TYPE = 'PT' DEFAULT_TRAFFIC_CLASS = ['metro',", "refresh.strftime('%Y-%m-%d %H:%M:%S') # Setup the unit of measure. if self._unit_of_measure", "proceed. if self._enabled_sensor is None or sensor_state.state \\ is STATE_ON:", "status sensor %s...\", sensorname) else: _LOGGER.error(\"Sensor %s is missing tl2key", "Situation Sensor.\"\"\" def __init__(self, hass, tl2key, friendly_name, enabled_sensor, interval, type,", "for response in apidata: type = response['Type'] if self._type is", "= [] for (idx, value) in enumerate(deviationdata): deviations.append({ 'updated': value['Updated'],", "# If the same API have already made the request", "direction = value['JourneyDirection'] or 0 displaytime = value['DisplayTime'] or ''", "hass self._haslapi = haslapi() self._name = 'HASL Version' self._version =", "that data instead of # requesting it again and spare", "%s...\", sensorname) else: _LOGGER.error(\"Sensor %s is missing tl2key attribute\", sensorconf[ATTR_FRIENDLY_NAME])", "as e: _LOGGER.error(\"A communication error occured while \" \"updating train", "the corresponding units of measure. unit_table = { 'min': 'min',", "statusIcons.get(response['StatusIcon']) newdata[statustype + '_icon'] = \\ trafficTypeIcons.get(statustype) for event in", "- self._interval > cacheage or not self._minimization: try: apidata =", "sensor should return if it is updating or not. if", "of the sensor. self.update = Throttle(interval)(self._update) @property def name(self): \"\"\"Return", "cache for %s...', self._name) except HASL_Error as e: _LOGGER.error(\"A communication", "update RI4 for %s...\", self._name) cacheage = self._hass.data[DOMAIN][self._ri4datakey] if not", "= ['metro', 'train', 'local', 'tram', 'bus', 'fer'] DEFAULT_SENSORTYPE = 'departures'", "(Storstockholms Lokaltrafik).\"\"\" import datetime import json import logging from datetime", "= 'ri2_' + ri4key + '_' + siteid self._hass =", "is missing train_type attribute\", sensorconf[ATTR_FRIENDLY_NAME]) add_devices(sensors) class SLTrainLocationSensor(Entity): \"\"\"Trafic Situation", "not. if self._enabled_sensor is not None: sensor_state = self._hass.states.get(self._enabled_sensor) if", "self._hass.states.get(self._enabled_sensor) if self._enabled_sensor is None or sensor_state.state is STATE_ON: _LOGGER.info(\"Starting", "linenumber, 'direction': direction, 'departure': displaytime, 'destination': destination, 'time': diff, 'expected':", "else: _LOGGER.error(\"Sensor %s is missing tl2key attribute\", sensorconf[ATTR_FRIENDLY_NAME]) if sensorconf[CONF_SENSOR_TYPE]", "def putCache(self, key, value): try: jsonFile = open(self._cachefile, 'r') data", "DEFAULT_SENSORTYPE = 'departures' DEFAULT_CACHE_FILE = '.storage/haslcache.json' # Defining the configuration", "or int(direction) \\ == int(self._direction): if self._lines == [] or", "= config.get(CONF_RI4_KEY) if sitekey and ri4key: sensorname = sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLDeparturesSensor(", "\"Stockholms Lokaltrafik\" newdata['last_updated'] = \\ self._hass.data[DOMAIN][self._datakey].strftime('%Y-%m-%d' + '%H:%M:%S') self._sensordata =", "def _update_ri4(self): errorOccured = False _LOGGER.info(\"Starting to update RI4 for", "value. if self._enabled_sensor is not None: sensor_state = self._hass.states.get(self._enabled_sensor) #", "update TL2 for %s...\", self._name) # Object used to create", "= self._hass.data[DOMAIN][self._datakey] if not cacheage or now(self._hass.config.time_zone) \\ - self._interval", "else: try: deviationdata = self.getCache(self._si2datakey) _LOGGER.info(\"Reusing data from cache for", "return 0 s = t.split() if len(s) > 1 and", "while \" \"updating SI2 sensor: %s\", e.details) errorOccured = True", "= 'api_minimization' LIST_SENSOR_TYPES = ['departures', 'status', 'trainlocation', 'comb', 'tl2'] LIST_SENSOR_PROPERTIES", "Lokaltrafik' val['departures'] = self._departure_table val['deviations'] = self._deviations_table val['last_refresh'] = refresh", "if config[CONF_VERSION]: sensors.append(SLVersionSensor(hass)) _LOGGER.info(\"Created version sensor for HASL\") for sensorconf", "HomeAssistant. statusIcons = { 'EventGood': 'mdi:check', 'EventMinor': 'mdi:clock-alert-outline', 'EventMajor': 'mdi:close',", "'Minor', 'EventMajor': 'Closed', 'EventPlanned': 'Planned', } # Icon table used", "sensorconf.get(CONF_SITEID) si2key = config.get(CONF_SI2_KEY) ri4key = config.get(CONF_RI4_KEY) if sitekey and", "from cache for %s...\", self._name) # Return only the relevant", "traffictype) in enumerate(['Metros', 'Buses', 'Trains', 'Trams', 'Ships']): for (idx, value)", "= 'departures' DEFAULT_CACHE_FILE = '.storage/haslcache.json' # Defining the configuration schema.", "_LOGGER.info(\"TL2 update completed for %s...\", self._name) class SLDeparturesSensor(Entity): \"\"\"Departure board", "+ '%H:%M:%S') self._sensordata = newdata self._lastupdate = newdata['last_updated'] _LOGGER.info(\"TL2 update", "0 s = t.split() if len(s) > 1 and s[1]", "['metro', 'train', 'local', 'tram', 'bus', 'fer'] DEFAULT_SENSORTYPE = 'departures' DEFAULT_CACHE_FILE", "'-' if expected_time is not '-': expected_time = \\ datetime.datetime.strptime(expected_time,", "of measure. unit_table = { 'min': 'min', 'time': '', 'deviations':", "sensor should return the number of deviations. if self._sensorproperty is", "else: _LOGGER.error(\"Sensor %s is missing site, si2key or ri4key\", sensorconf[ATTR_FRIENDLY_NAME])", "value['DisplayTime'] or '' destination = value['Destination'] or '' linenumber =", "= 'property' CONF_TRAIN_TYPE = 'train_type' CONF_TRAFFIC_CLASS = 'traffic_class' CONF_VERSION =", "'-': expected = \\ datetime.datetime.strptime(self._nextdeparture_expected, '%Y-%m-%dT%H:%M:%S') expected = expected.strftime('%H:%M:%S') return", "hass self._name = friendly_name self._enabled_sensor = enabled_sensor self._type = type", "as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import (ATTR_FRIENDLY_NAME,", "config.get(CONF_USE_MINIMIZATION) )) _LOGGER.info(\"Created departures sensor %s...\", sensorname) else: _LOGGER.error(\"Sensor %s", "self._hass = hass self._fpapi = fpapi() self._name = friendly_name self._interval", "self._departure_table = sorted(departures, key=lambda k: k['time']) _LOGGER.info(\"RI4 update completed for", "interval self._hass = hass self._name = friendly_name self._enabled_sensor = enabled_sensor", "> cacheage or not self._minimization: try: deviationdata = self._si2api.request() deviationdata", "error occured while\" \"updating train location sensor: %s\", e) return", "TL4 API: %s\", e) return else: apidata = self.getCache(self._datakey) _LOGGER.info(\"Reusing", "\"/\" + self._py_version class SLStatusSensor(Entity): \"\"\"Trafic Situation Sensor.\"\"\" def __init__(self,", "if self._deviations_table: return 'mdi:bus-alert' return 'mdi:bus' @property def state(self): \"\"\"", "self._lastupdate is '-': return '-' return refresh.strftime('%Y-%m-%d %H:%M:%S') # Failsafe", "already made the request in within # the specified interval", "# Attribution and update sensor data. newdata['attribution'] = \"Stockholms Lokaltrafik\"", "diff = self.parseDepartureTime(displaytime) if diff < self._timewindow: departures.append({ 'line': linenumber,", "return data.get(key) except: return {} def putCache(self, key, value): try:", "def device_state_attributes(self): \"\"\" Return the sensor attributes.\"\"\" return {'type': self._train_type,", "__init__(self, hass): self._hass = hass self._haslapi = haslapi() self._name =", "'-' self._cachefile = hass.config.path(DEFAULT_CACHE_FILE) self._minimization = minimization if not hass.data[DOMAIN].get(self._datakey):", "innocent credits from dying. cacheage = self._hass.data[DOMAIN][self._datakey] if not cacheage", "if not cacheage or now(self._hass.config.time_zone) \\ - self._interval > cacheage", "DEFAULT_INTERVAL = timedelta(minutes=10) DEFAULT_TIMEWINDOW = 30 DEFAULT_DIRECTION = 0 DEFAULT_SENSORPROPERTY", "errorOccured = True else: try: departuredata = self.getCache(self._ri4datakey) _LOGGER.info(\"Reusing data", "int(s[1]) - (rightnow.hour * 60 + rightnow.minute) if min <", "DEFAULT_CACHE_FILE = '.storage/haslcache.json' # Defining the configuration schema. PLATFORM_SCHEMA =", "+ siteid self._hass = hass self._name = friendly_name self._lines =", "hass, tl2key, sensorname, sensorconf.get(CONF_ENABLED_SENSOR), sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_TRAFFIC_CLASS), config.get(CONF_USE_MINIMIZATION) )) _LOGGER.info(\"Created status", "self._lastupdate = now(self._hass.config.time_zone) def _update_ri4(self): errorOccured = False _LOGGER.info(\"Starting to", "interval, direction, timewindow, sensorproperty, minimization): \"\"\"Initialize\"\"\" # The table of", "device_state_attributes(self): \"\"\" Return the sensor attributes.\"\"\" return {'hasl': self._version, 'pyHasl':", "STATE_ON else: val['refresh_enabled'] = STATE_OFF # Set values of the", "< self._timewindow: departures.append({ 'line': linenumber, 'direction': direction, 'departure': displaytime, 'destination':", "return '-' return refresh.strftime('%Y-%m-%d %H:%M:%S') # Failsafe return '-' @property", "is None or sensor_state.state is STATE_ON: return STATE_ON return STATE_OFF", "= ri4key self._ri4api = ri4api(ri4key, siteid, 60) self._ri4datakey = 'ri2_'", "as cv import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA", "'-' self._interval = interval self._unit_of_measure = unit_table.get(self._sensorproperty, 'min') self._cachefile =", "time (%s) \", t) return 0 def getCache(self, key): try:", "Entity from homeassistant.helpers.event import (async_track_point_in_utc_time, async_track_utc_time_change, track_time_interval) from homeassistant.util import", "name(self): \"\"\"Return the name of the sensor.\"\"\" return self._name @property", "type in self._type: statustype = ('ferry' if type == 'fer'", "self._fpapi = fpapi() self._name = friendly_name self._interval = interval self._enabled_sensor", "= sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLTrainLocationSensor( hass, sensorname, train_type, sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_ENABLED_SENSOR), )) _LOGGER.info(\"Created", "apidata = apidata['ResponseData']['TrafficTypes'] self.putCache(self._datakey, apidata) self._hass.data[DOMAIN][self._datakey] = \\ now(self._hass.config.time_zone) _LOGGER.info(\"Updated", "self._sensorproperty = sensorproperty self._departure_table = [] self._deviations_table = [] self._direction", "= [] if config[CONF_VERSION]: sensors.append(SLVersionSensor(hass)) _LOGGER.info(\"Created version sensor for HASL\")", "if self._lastupdate is not '-': refresh = refresh.strftime('%Y-%m-%d %H:%M:%S') #", "expected.strftime('%H:%M:%S') return expected # If the sensor should return the", "or 0 displaytime = value['DisplayTime'] or '' destination = value['Destination']", "= 'hasl' # Keys used in the configuration. CONF_RI4_KEY =", "unit of measure. if self._unit_of_measure is not '': val['unit_of_measurement'] =", "vol.Optional(CONF_USE_MINIMIZATION, default=True): cv.boolean, vol.Required(CONF_SENSORS, default=[]): vol.All(cv.ensure_list, [vol.All({ vol.Required(ATTR_FRIENDLY_NAME): cv.string, vol.Required(CONF_SENSOR_TYPE,", "'tram', 'bus', 'fer'] DEFAULT_SENSORTYPE = 'departures' DEFAULT_CACHE_FILE = '.storage/haslcache.json' #", "conversions. \"\"\" try: if t == 'Nu': return 0 s", "%s...\", self._name) cacheage = self._hass.data[DOMAIN][self._si2datakey] if not cacheage or now(self._hass.config.time_zone)", "for %s...\", self._name) cacheage = self._hass.data[DOMAIN][self._ri4datakey] if not cacheage or", "if expected is not '-': expected = \\ datetime.datetime.strptime(self._nextdeparture_expected, '%Y-%m-%dT%H:%M:%S')", "hass.data[DOMAIN].get(self._datakey): hass.data[DOMAIN][self._datakey] = '' self.update = Throttle(interval)(self._update) @property def name(self):", "key): try: jsonFile = open(self._cachefile, 'r') data = json.load(jsonFile) jsonFile.close()", "'', 'refresh': '', 'update': '', } if si2key: self._si2key =", "icon for the frontend.\"\"\" return 'mdi:train-car' @property def device_state_attributes(self): \"\"\"", "DOMAIN = 'hasl' # Keys used in the configuration. CONF_RI4_KEY", "= refresh val['next_departure_minutes'] = expected_minutes val['next_departure_time'] = expected_time val['deviation_count'] =", "cv.string, vol.Optional(CONF_LINES, default=[]): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_DIRECTION, default=DEFAULT_DIRECTION): vol.All(vol.Coerce(int), vol.Range(min=0, max=2)),", "self._timewindow = timewindow self._nextdeparture_minutes = '0' self._nextdeparture_expected = '-' self._lastupdate", "si2key or ri4key\", sensorconf[ATTR_FRIENDLY_NAME]) if sensorconf[CONF_SENSOR_TYPE] == 'status' or \\", "value['Details'], 'sortOrder': value['SortOrder'], }) self._deviations_table = \\ sorted(deviations, key=lambda k:", "tl2key, friendly_name, enabled_sensor, interval, type, minimization): self._tl2api = tl2api(tl2key) self._datakey", "'-': expected_time = \\ datetime.datetime.strptime(expected_time, '%Y-%m-%dT%H:%M:%S') expected_time = expected_time.strftime('%H:%M:%S') else:", "STATE_ON return STATE_OFF if self._sensorproperty is 'updated': if self._lastupdate is", "'comb': sitekey = sensorconf.get(CONF_SITEID) si2key = config.get(CONF_SI2_KEY) ri4key = config.get(CONF_RI4_KEY)", "sensorconf[CONF_SENSOR_TYPE] == 'departures' or \\ sensorconf[CONF_SENSOR_TYPE] == 'comb': sitekey =", "deviations.append({ 'updated': value['Updated'], 'title': value['Header'], 'fromDate': value['FromDateTime'], 'toDate': value['UpToDateTime'], 'details':", "is updating or not. if self._sensorproperty is 'refresh': if self._enabled_sensor", "1440 return min except Exception: _LOGGER.warning(\"Failed to parse departure time", "\" \"updating RI4 API: %s\", e) errorOccured = True else:", "self._lastupdate = '-' self._cachefile = hass.config.path(DEFAULT_CACHE_FILE) self._minimization = minimization if", "@property def device_state_attributes(self): \"\"\" Return the sensor attributes.\"\"\" return self._sensordata", "= True except Exception as e: _LOGGER.error(\"A communication error occured", "t): \"\"\" weird time formats from the API, do some", "newdata = {} # Use some nice translations for the", "updating or not. if self._sensorproperty is 'refresh': if self._enabled_sensor is", "try: jsonFile = open(self._cachefile, 'r') data = json.load(jsonFile) jsonFile.close() data[key]", "expected_minutes val['next_departure_time'] = expected_time val['deviation_count'] = len(self._deviations_table) return val def", "add_devices, discovery_info=None): \"\"\"Setup the sensors.\"\"\" if not hass.data.get(DOMAIN): hass.data[DOMAIN] =", "= hass self._name = friendly_name self._lines = lines self._siteid =", "self._fpapi.request(self._train_type) except HASL_Error as e: _LOGGER.error(\"A communication error occured while", "direction, timewindow, sensorproperty, minimization): \"\"\"Initialize\"\"\" # The table of resulttypes", "cv.string, vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL): vol.Any(cv.time_period, cv.positive_timedelta), vol.Optional(CONF_SITEID): cv.string, vol.Optional(CONF_LINES, default=[]): vol.All(cv.ensure_list,", "value['GroupOfLine'] or '' icon = iconswitcher.get(traffictype, 'mdi:train-car') if int(self._direction) ==", "self._minimization: try: deviationdata = self._si2api.request() deviationdata = deviationdata['ResponseData'] self.putCache(self._si2datakey, deviationdata)", "of # requesting it again and spare some innocent credits", "is not '-': refresh = refresh.strftime('%Y-%m-%d %H:%M:%S') # Setup the", "= newdata['last_updated'] _LOGGER.info(\"TL2 update completed for %s...\", self._name) class SLDeparturesSensor(Entity):", "config.get(CONF_SI2_KEY) ri4key = config.get(CONF_RI4_KEY) if sitekey and ri4key: sensorname =", "from cache for %s...\", self._name) except Exception as e: _LOGGER.error(\"A", "TL2 for %s...\", self._name) # Object used to create our", "# -*- coding: utf-8 -*- \"\"\"Simple service for SL (Storstockholms", "%H:%M:%S') # Failsafe return '-' @property def device_state_attributes(self): \"\"\" Return", "or '' destination = value['Destination'] or '' linenumber = value['LineNumber']", "= ['PT', 'RB', 'TVB', 'SB', 'LB', 'SpvC', 'TB1', 'TB2', 'TB3']", "'refresh', 'updated'] LIST_TRAIN_TYPES = ['PT', 'RB', 'TVB', 'SB', 'LB', 'SpvC',", "return 'mdi:bus-alert' return 'mdi:bus' @property def state(self): \"\"\" Return number", "si2api(si2key, siteid, '') self._si2datakey = 'si2_' + si2key + '_'", "self._hass.data[DOMAIN][self._ri4datakey] if not cacheage or now(self._hass.config.time_zone) \\ - self._interval >", "e.details) return except Exception as e: _LOGGER.error(\"A error occured while\"", "sensor attributes.\"\"\" return {'type': self._train_type, 'data': json.dumps(self._data)} @property def state(self):", "train_type self._data = {} self.update = Throttle(interval)(self._update) @property def name(self):", "async_track_utc_time_change, track_time_interval) from homeassistant.util import Throttle from homeassistant.util.dt import now", "'-': return '-' return refresh.strftime('%Y-%m-%d %H:%M:%S') # Failsafe return '-'", "(rightnow.hour * 60 + rightnow.minute) if min < 0: min", "again and spare some innocent credits from dying. cacheage =", "data = json.load(jsonFile) jsonFile.close() return data.get(key) except: return {} def", "self.putCache(self._ri4datakey, departuredata) self._hass.data[DOMAIN][self._ri4datakey] = \\ now(self._hass.config.time_zone) _LOGGER.info(\"Updated cache for %s...\",", "name of the sensor.\"\"\" return self._name @property def icon(self): \"\"\"", "sensorconf.get(CONF_SENSORPROPERTY), config.get(CONF_USE_MINIMIZATION) )) _LOGGER.info(\"Created departures sensor %s...\", sensorname) else: _LOGGER.error(\"Sensor", "cv.string, vol.Required(CONF_SENSOR_TYPE, default=DEFAULT_SENSORTYPE): vol.In(LIST_SENSOR_TYPES), vol.Optional(CONF_ENABLED_SENSOR): cv.string, vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL): vol.Any(cv.time_period, cv.positive_timedelta),", "or '' linenumber = value['LineNumber'] or '' expected = value['ExpectedDateTime']", "{ 'min': 'min', 'time': '', 'deviations': '', 'refresh': '', 'update':", "'w') jsonFile.write(json.dumps(data)) jsonFile.close() def _update(self): if self._enabled_sensor is not None:", "SL (Storstockholms Lokaltrafik).\"\"\" import datetime import json import logging from", "e.details) errorOccured = True if not errorOccured: deviations = []", "homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import (ATTR_FRIENDLY_NAME, CONF_SCAN_INTERVAL, CONF_SENSOR_TYPE, CONF_SENSORS,", "Version' self._version = __version__ self._py_version = self._haslapi.version() @property def name(self):", "val['departures'] = self._departure_table val['deviations'] = self._deviations_table val['last_refresh'] = refresh val['next_departure_minutes']", "'2.2.0' _LOGGER = logging.getLogger(__name__) DOMAIN = 'hasl' # Keys used", "homeassistant.helpers.entity import Entity from homeassistant.helpers.event import (async_track_point_in_utc_time, async_track_utc_time_change, track_time_interval) from", "self._hass.data[DOMAIN][self._datakey] = \\ now(self._hass.config.time_zone) _LOGGER.info(\"Updated cache for %s...\", self._name) except", "'fer'] DEFAULT_SENSORTYPE = 'departures' DEFAULT_CACHE_FILE = '.storage/haslcache.json' # Defining the", "# Setup the unit of measure. if self._unit_of_measure is not", "expected_time.strftime('%H:%M:%S') else: expected_time = '-' expected_minutes = '-' # Format", "= interval self._hass = hass self._name = friendly_name self._enabled_sensor =", "\\ datetime.datetime.strptime(expected_time, '%Y-%m-%dT%H:%M:%S') expected_time = expected_time.strftime('%H:%M:%S') else: expected_time = '-'", "or '' groupofline = value['GroupOfLine'] or '' icon = iconswitcher.get(traffictype,", "self._hass = hass self._name = friendly_name self._lines = lines self._siteid", "import Throttle from homeassistant.util.dt import now from hasl import (haslapi,", "expected = expected.strftime('%H:%M:%S') return expected # If the sensor should", "True else: try: deviationdata = self.getCache(self._si2datakey) _LOGGER.info(\"Reusing data from cache", "not hass.data[DOMAIN].get(self._si2datakey): hass.data[DOMAIN][self._si2datakey] = '' # Setup updating of the", "attributes .\"\"\" # Initialize the state attributes. val = {}", "self._sensorproperty is 'time': if not self._departure_table: return '-' expected =", "minutes to next departure. if self._sensorproperty is 'min': if not", "communication error occured while \" \"updating SI2 sensor: %s\", e.details)", "resulttypes and the corresponding units of measure. unit_table = {", "sensor.\"\"\" return self._version + \"/\" + self._py_version class SLStatusSensor(Entity): \"\"\"Trafic", "sensorconf.get(CONF_ENABLED_SENSOR), sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_DIRECTION), sensorconf.get(CONF_TIMEWINDOW), sensorconf.get(CONF_SENSORPROPERTY), config.get(CONF_USE_MINIMIZATION) )) _LOGGER.info(\"Created departures sensor", "friendly_name, enabled_sensor, interval, type, minimization): self._tl2api = tl2api(tl2key) self._datakey =", "hass.data[DOMAIN][self._si2datakey] = '' # Setup updating of the sensor. self.update", "si2key, ri4key, sitekey, sensorconf.get(CONF_LINES), sensorname, sensorconf.get(CONF_ENABLED_SENSOR), sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_DIRECTION), sensorconf.get(CONF_TIMEWINDOW), sensorconf.get(CONF_SENSORPROPERTY),", "in enumerate(deviationdata): deviations.append({ 'updated': value['Updated'], 'title': value['Header'], 'fromDate': value['FromDateTime'], 'toDate':", "else: _LOGGER.error(\"Sensor %s is missing train_type attribute\", sensorconf[ATTR_FRIENDLY_NAME]) add_devices(sensors) class", "vol.Required(CONF_SENSORS, default=[]): vol.All(cv.ensure_list, [vol.All({ vol.Required(ATTR_FRIENDLY_NAME): cv.string, vol.Required(CONF_SENSOR_TYPE, default=DEFAULT_SENSORTYPE): vol.In(LIST_SENSOR_TYPES), vol.Optional(CONF_ENABLED_SENSOR):", "measure. if self._unit_of_measure is not '': val['unit_of_measurement'] = self._unit_of_measure #", "departure board.\"\"\" # If using external sensor, get its value.", "except Exception as e: _LOGGER.error(\"A error occured while \" \"updating", "parse departure time (%s) \", t) return 0 def getCache(self,", "return self._name @property def icon(self): \"\"\" Return the icon for", "currently updating or not. if self._enabled_sensor is not None: sensor_state", "the icon for the frontend.\"\"\" return None @property def device_state_attributes(self):", "units of measure. unit_table = { 'min': 'min', 'time': '',", "max=60)), vol.Optional(CONF_SENSORPROPERTY, default=DEFAULT_SENSORPROPERTY): vol.In(LIST_SENSOR_PROPERTIES), vol.Optional(CONF_TRAFFIC_CLASS, default=DEFAULT_TRAFFIC_CLASS): vol.All(cv.ensure_list, [vol.In(DEFAULT_TRAFFIC_CLASS)]), vol.Optional(CONF_TRAIN_TYPE, default=DEFAULT_TRAIN_TYPE):", "one SL site.\"\"\" def __init__(self, hass, si2key, ri4key, siteid, lines,", "\"updating train location sensor: %s\", e) return self._data = apidata", "its value. if self._enabled_sensor is not None: sensor_state = self._hass.states.get(self._enabled_sensor)", "completed for %s...\", self._name) def _update_si2(self): errorOccured = False _LOGGER.info(\"Starting", "interval, type, minimization): self._tl2api = tl2api(tl2key) self._datakey = 'tl2_' +", "attributes.\"\"\" return {'hasl': self._version, 'pyHasl': self._py_version} @property def state(self): \"\"\"", "return refresh.strftime('%Y-%m-%d %H:%M:%S') # Failsafe return '-' @property def device_state_attributes(self):", "'' groupofline = value['GroupOfLine'] or '' icon = iconswitcher.get(traffictype, 'mdi:train-car')", "the state attributes. val = {} # Format the next", "version sensor for HASL\") for sensorconf in config[CONF_SENSORS]: if sensorconf[CONF_SENSOR_TYPE]", "try: jsonFile = open(self._cachefile, 'r') data = json.load(jsonFile) jsonFile.close() return", "'mdi:bus-alert' return 'mdi:bus' @property def state(self): \"\"\" Return number of", "sensor_state.state \\ is STATE_ON: self._update_ri4() if self._si2key: self._update_si2() self._lastupdate =", "occured while \" \"updating TL4 API: %s\", e) return else:", "data from cache for %s...\", self._name) except Exception as e:", "sensorname, train_type, sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_ENABLED_SENSOR), )) _LOGGER.info(\"Created train sensor %s...\", sensorname)", "= friendly_name self._interval = interval self._enabled_sensor = enabled_sensor self._train_type =", "DEFAULT_SENSORPROPERTY = 'min' DEFAULT_TRAIN_TYPE = 'PT' DEFAULT_TRAFFIC_CLASS = ['metro', 'train',", "= open(self._cachefile, 'w') jsonFile.write(json.dumps(data)) jsonFile.close() def _update(self): if self._enabled_sensor is", "\", t) return 0 def getCache(self, key): try: jsonFile =", "cacheage = self._hass.data[DOMAIN][self._datakey] if not cacheage or now(self._hass.config.time_zone) \\ -", "= self.parseDepartureTime(displaytime) if diff < self._timewindow: departures.append({ 'line': linenumber, 'direction':", "= enabled_sensor self._train_type = train_type self._data = {} self.update =", "the next exptected time. if self._departure_table: expected_time = self._departure_table[0]['expected'] or", "Return the sensor attributes.\"\"\" return {'type': self._train_type, 'data': json.dumps(self._data)} @property", "from homeassistant.util.dt import now from hasl import (haslapi, fpapi, tl2api,", "newdata[statustype + '_events'] = response['Events'] # Attribution and update sensor", "configuration. DEFAULT_INTERVAL = timedelta(minutes=10) DEFAULT_TIMEWINDOW = 30 DEFAULT_DIRECTION = 0", "self._name) class SLDeparturesSensor(Entity): \"\"\"Departure board for one SL site.\"\"\" def", "'tl2'] LIST_SENSOR_PROPERTIES = ['min', 'time', 'deviations', 'refresh', 'updated'] LIST_TRAIN_TYPES =", "@property def icon(self): \"\"\" Return the icon for the frontend.\"\"\"", "fpapi, tl2api, ri4api, si2api, HASL_Error, HASL_API_Error, HASL_HTTP_Error) __version__ = '2.2.0'", "= timewindow self._nextdeparture_minutes = '0' self._nextdeparture_expected = '-' self._lastupdate =", "+ '_' + siteid self._ri4key = ri4key self._ri4api = ri4api(ri4key,", "nice translations for the statuses etc. statuses = { 'EventGood':", "return self._version + \"/\" + self._py_version class SLStatusSensor(Entity): \"\"\"Trafic Situation", "HASL_API_Error, HASL_HTTP_Error) __version__ = '2.2.0' _LOGGER = logging.getLogger(__name__) DOMAIN =", "'Trams': 'mdi:tram', 'Ships': 'mdi:ferry', 'Metros': 'mdi:subway-variant', 'Trains': 'mdi:train', } for", "made the request in within # the specified interval then", "e) errorOccured = True if not errorOccured: departures = []", "self._lastupdate = '-' self._interval = interval self._unit_of_measure = unit_table.get(self._sensorproperty, 'min')", "_update(self): \"\"\"Get the departure board.\"\"\" # If using external sensor,", "number of minutes to the next departure \"\"\" # If", "next departure occurs. if self._sensorproperty is 'time': if not self._departure_table:", "__version__ self._py_version = self._haslapi.version() @property def name(self): \"\"\"Return the name", "= { 'ferry': 'mdi:ferry', 'bus': 'mdi:bus', 'tram': 'mdi:tram', 'train': 'mdi:train',", "# Object used to create our object. newdata = {}", "= 'sensor' CONF_TIMEWINDOW = 'timewindow' CONF_SENSORPROPERTY = 'property' CONF_TRAIN_TYPE =", "> 1 and s[1] == 'min': return int(s[0]) s =", "vol.All(cv.ensure_list, [vol.All({ vol.Required(ATTR_FRIENDLY_NAME): cv.string, vol.Required(CONF_SENSOR_TYPE, default=DEFAULT_SENSORTYPE): vol.In(LIST_SENSOR_TYPES), vol.Optional(CONF_ENABLED_SENSOR): cv.string, vol.Optional(CONF_SCAN_INTERVAL,", "refresh = refresh.strftime('%Y-%m-%d %H:%M:%S') # Setup the unit of measure.", "sensorconf in config[CONF_SENSORS]: if sensorconf[CONF_SENSOR_TYPE] == 'departures' or \\ sensorconf[CONF_SENSOR_TYPE]", "= hass.config.path(DEFAULT_CACHE_FILE) self._minimization = minimization if not hass.data[DOMAIN].get(self._ri4datakey): hass.data[DOMAIN][self._ri4datakey] =", "sensorconf.get(CONF_TRAFFIC_CLASS), config.get(CONF_USE_MINIMIZATION) )) _LOGGER.info(\"Created status sensor %s...\", sensorname) else: _LOGGER.error(\"Sensor", ")) _LOGGER.info(\"Created status sensor %s...\", sensorname) else: _LOGGER.error(\"Sensor %s is", "tl2key attribute\", sensorconf[ATTR_FRIENDLY_NAME]) if sensorconf[CONF_SENSOR_TYPE] == 'trainlocation': train_type = sensorconf.get(CONF_TRAIN_TYPE)", "completed for %s...\", self._name) class SLDeparturesSensor(Entity): \"\"\"Departure board for one", "self._enabled_sensor is not None: sensor_state = self._hass.states.get(self._enabled_sensor) # If we", "apidata = self._tl2api.request() apidata = apidata['ResponseData']['TrafficTypes'] self.putCache(self._datakey, apidata) self._hass.data[DOMAIN][self._datakey] =", "and s[1] == 'min': return int(s[0]) s = t.split(':') if", "the departure board.\"\"\" # If using external sensor, get its", "to update SI2 for %s...\", self._name) cacheage = self._hass.data[DOMAIN][self._si2datakey] if", "'mdi:bus' @property def state(self): \"\"\" Return number of minutes to", "requesting it again and spare some innocent credits from dying.", "%s is missing tl2key attribute\", sensorconf[ATTR_FRIENDLY_NAME]) if sensorconf[CONF_SENSOR_TYPE] == 'trainlocation':", "%s...\", sensorname) else: _LOGGER.error(\"Sensor %s is missing train_type attribute\", sensorconf[ATTR_FRIENDLY_NAME])", "%s...\", self._name) # Object used to create our object. newdata", "enumerate(departuredata[traffictype]): direction = value['JourneyDirection'] or 0 displaytime = value['DisplayTime'] or", "if sensor is currently updating or not. if self._enabled_sensor is", "deviationdata = self.getCache(self._si2datakey) _LOGGER.info(\"Reusing data from cache for %s...\", self._name)", "config[CONF_VERSION]: sensors.append(SLVersionSensor(hass)) _LOGGER.info(\"Created version sensor for HASL\") for sensorconf in", "%s...\", self._name) class SLDeparturesSensor(Entity): \"\"\"Departure board for one SL site.\"\"\"", "= [] self._lastupdate = '-' self._cachefile = hass.config.path(DEFAULT_CACHE_FILE) self._minimization =", "try: departuredata = self._ri4api.request() departuredata = departuredata['ResponseData'] self.putCache(self._ri4datakey, departuredata) self._hass.data[DOMAIN][self._ri4datakey]", "self._py_version class SLStatusSensor(Entity): \"\"\"Trafic Situation Sensor.\"\"\" def __init__(self, hass, tl2key,", "* 60 + rightnow.minute) if min < 0: min =", "API Keys vol.Optional(CONF_RI4_KEY): cv.string, vol.Optional(CONF_SI2_KEY): cv.string, vol.Optional(CONF_TL2_KEY): cv.string, vol.Optional(CONF_VERSION, default=False):", "min except Exception: _LOGGER.warning(\"Failed to parse departure time (%s) \",", "\\ is STATE_ON: self._update_ri4() if self._si2key: self._update_si2() self._lastupdate = now(self._hass.config.time_zone)", "self._lastupdate if self._lastupdate is not '-': refresh = refresh.strftime('%Y-%m-%d %H:%M:%S')", "'update': '', } if si2key: self._si2key = si2key self._si2api =", "'departures' or \\ sensorconf[CONF_SENSOR_TYPE] == 'comb': sitekey = sensorconf.get(CONF_SITEID) si2key", "instead of # requesting it again and spare some innocent", "train_type, sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_ENABLED_SENSOR), )) _LOGGER.info(\"Created train sensor %s...\", sensorname) else:", "try: apidata = self._fpapi.request(self._train_type) except HASL_Error as e: _LOGGER.error(\"A communication", "sensorconf[CONF_SENSOR_TYPE] == 'status' or \\ sensorconf[CONF_SENSOR_TYPE] == 'tl2': tl2key =", "'' expected = value['ExpectedDateTime'] or '' groupofline = value['GroupOfLine'] or", "= Throttle(interval)(self._update) @property def name(self): \"\"\"Return the name of the", "not None: sensor_state = self._hass.states.get(self._enabled_sensor) if self._enabled_sensor is None or", "device_state_attributes(self): \"\"\" Return the sensor attributes.\"\"\" return self._sensordata @property def", "frontend.\"\"\" if self._deviations_table: return 'mdi:bus-alert' return 'mdi:bus' @property def state(self):", "'mdi:ferry', 'Metros': 'mdi:subway-variant', 'Trains': 'mdi:train', } for (i, traffictype) in", "HASL_HTTP_Error) __version__ = '2.2.0' _LOGGER = logging.getLogger(__name__) DOMAIN = 'hasl'", "if not self._departure_table: return '-' return self._departure_table[0]['time'] # If the", "self._name) class SLVersionSensor(Entity): \"\"\"HASL Version Sensor.\"\"\" def __init__(self, hass): self._hass", "errorOccured: departures = [] iconswitcher = { 'Buses': 'mdi:bus', 'Trams':", "the statuses etc. statuses = { 'EventGood': 'Good', 'EventMinor': 'Minor',", "within # the specified interval then use that data instead", "open(self._cachefile, 'r') data = json.load(jsonFile) jsonFile.close() data[key] = value except:", "#!/usr/bin/python # -*- coding: utf-8 -*- \"\"\"Simple service for SL", "= config.get(CONF_SI2_KEY) ri4key = config.get(CONF_RI4_KEY) if sitekey and ri4key: sensorname", "as e: _LOGGER.error(\"A error occured while\" \"updating train location sensor:", "+ '_status_icon'] = \\ statusIcons.get(response['StatusIcon']) newdata[statustype + '_icon'] = \\", "hasl import (haslapi, fpapi, tl2api, ri4api, si2api, HASL_Error, HASL_API_Error, HASL_HTTP_Error)", "destination, 'time': diff, 'expected': expected, 'type': traffictype, 'groupofline': groupofline, 'icon':", "Return the icon for the frontend.\"\"\" return 'mdi:train-car' @property def", "Icon table used for HomeAssistant. statusIcons = { 'EventGood': 'mdi:check',", "sensor.\"\"\" return self._lastupdate def getCache(self, key): try: jsonFile = open(self._cachefile,", "hass.data[DOMAIN].get(self._si2datakey): hass.data[DOMAIN][self._si2datakey] = '' # Setup updating of the sensor.", "the icon for the frontend.\"\"\" if self._deviations_table: return 'mdi:bus-alert' return", "# Return only the relevant portion of the results. for", "cv.positive_timedelta), vol.Optional(CONF_SITEID): cv.string, vol.Optional(CONF_LINES, default=[]): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_DIRECTION, default=DEFAULT_DIRECTION): vol.All(vol.Coerce(int),", "or not. if self._enabled_sensor is not None: sensor_state = self._hass.states.get(self._enabled_sensor)", "state(self): \"\"\" Return number of minutes to the next departure", "= 'traffic_class' CONF_VERSION = 'version_sensor' CONF_USE_MINIMIZATION = 'api_minimization' LIST_SENSOR_TYPES =", "CONF_USE_MINIMIZATION = 'api_minimization' LIST_SENSOR_TYPES = ['departures', 'status', 'trainlocation', 'comb', 'tl2']", "departure occurs. if self._sensorproperty is 'time': if not self._departure_table: return", "location sensor: %s\", e) return self._data = apidata _LOGGER.info(\"Update completed", "vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL): vol.Any(cv.time_period, cv.positive_timedelta), vol.Optional(CONF_SITEID): cv.string, vol.Optional(CONF_LINES, default=[]): vol.All(cv.ensure_list, [cv.string]),", "is STATE_ON: val['refresh_enabled'] = STATE_ON else: val['refresh_enabled'] = STATE_OFF #", "value) in enumerate(departuredata[traffictype]): direction = value['JourneyDirection'] or 0 displaytime =", "lines, friendly_name, enabled_sensor, interval, direction, timewindow, sensorproperty, minimization): \"\"\"Initialize\"\"\" #", "get its value. if self._enabled_sensor is not None: sensor_state =", "fpapi() self._name = friendly_name self._interval = interval self._enabled_sensor = enabled_sensor", "1: rightnow = now(self._hass.config.time_zone) min = int(s[0]) * 60 +", "'%Y-%m-%dT%H:%M:%S') expected_time = expected_time.strftime('%H:%M:%S') else: expected_time = '-' expected_minutes =", "key=lambda k: k['time']) _LOGGER.info(\"RI4 update completed for %s...\", self._name) def", "import homeassistant.helpers.config_validation as cv import voluptuous as vol from homeassistant.components.sensor", "= self._departure_table[0]['time'] or '-' if expected_time is not '-': expected_time", "external sensor or it is ON then proceed. if self._enabled_sensor", "service for SL (Storstockholms Lokaltrafik).\"\"\" import datetime import json import", "+ '_status'] = \\ statuses.get(response['StatusIcon']) newdata[statustype + '_status_icon'] = \\", "== [] or linenumber \\ in self._lines: diff = self.parseDepartureTime(displaytime)", "is None or sensor_state.state is STATE_ON: _LOGGER.info(\"Starting to update TL2", "expected, 'type': traffictype, 'groupofline': groupofline, 'icon': icon, }) self._departure_table =", "sensorconf.get(CONF_DIRECTION), sensorconf.get(CONF_TIMEWINDOW), sensorconf.get(CONF_SENSORPROPERTY), config.get(CONF_USE_MINIMIZATION) )) _LOGGER.info(\"Created departures sensor %s...\", sensorname)", "Throttle from homeassistant.util.dt import now from hasl import (haslapi, fpapi,", "board for one SL site.\"\"\" def __init__(self, hass, si2key, ri4key,", "while \" \"updating SI2 sensor: %s\", e) errorOccured = True", "lines self._siteid = siteid self._enabled_sensor = enabled_sensor self._sensorproperty = sensorproperty", "newdata['last_updated'] _LOGGER.info(\"TL2 update completed for %s...\", self._name) class SLDeparturesSensor(Entity): \"\"\"Departure", "+ '': value} jsonFile = open(self._cachefile, 'w') jsonFile.write(json.dumps(data)) jsonFile.close() def", "occured while \" \"updating SI2 sensor: %s\", e.details) errorOccured =", "LIST_TRAIN_TYPES = ['PT', 'RB', 'TVB', 'SB', 'LB', 'SpvC', 'TB1', 'TB2',", "if self._enabled_sensor is None or sensor_state.state is STATE_ON: try: apidata", "= iconswitcher.get(traffictype, 'mdi:train-car') if int(self._direction) == 0 or int(direction) \\", "friendly_name self._interval = interval self._enabled_sensor = enabled_sensor self._train_type = train_type", "\" \"updating SI2 sensor: %s\", e) errorOccured = True else:", "= self._fpapi.request(self._train_type) except HASL_Error as e: _LOGGER.error(\"A communication error occured", "int(s[0]) * 60 + int(s[1]) - (rightnow.hour * 60 +", "= statuses.get(event['StatusIcon']) event['StatusIcon'] = \\ statusIcons.get(event['StatusIcon']) newdata[statustype + '_events'] =", "newdata[statustype + '_status_icon'] = \\ statusIcons.get(response['StatusIcon']) newdata[statustype + '_icon'] =", "type self._sensordata = [] self._lastupdate = '-' self._cachefile = hass.config.path(DEFAULT_CACHE_FILE)", "return minutes to next departure. if self._sensorproperty is 'min': if", "# The table of resulttypes and the corresponding units of", "'direction': direction, 'departure': displaytime, 'destination': destination, 'time': diff, 'expected': expected,", "= True except Exception as e: _LOGGER.error(\"A error occured while", "completed %s...\", self._name) class SLVersionSensor(Entity): \"\"\"HASL Version Sensor.\"\"\" def __init__(self,", "track_time_interval) from homeassistant.util import Throttle from homeassistant.util.dt import now from", "= [] self._deviations_table = [] self._direction = direction self._timewindow =", "the sensor should return the number of deviations. if self._sensorproperty", "jsonFile = open(self._cachefile, 'w') jsonFile.write(json.dumps(data)) jsonFile.close() def _update(self): \"\"\"Get the", "'' icon = iconswitcher.get(traffictype, 'mdi:train-car') if int(self._direction) == 0 or", "# Initialize the state attributes. val = {} # Format", "vol.Optional(CONF_TRAIN_TYPE, default=DEFAULT_TRAIN_TYPE): vol.In(LIST_TRAIN_TYPES) })]), }, extra=vol.ALLOW_EXTRA) def setup_platform(hass, config, add_devices,", "return int(s[0]) s = t.split(':') if len(s) > 1: rightnow", "def getCache(self, key): try: jsonFile = open(self._cachefile, 'r') data =", "= json.load(jsonFile) jsonFile.close() return data.get(key) except: return {} def putCache(self,", "If the sensor should return minutes to next departure. if", "else: try: departuredata = self.getCache(self._ri4datakey) _LOGGER.info(\"Reusing data from cache for", "\\ sorted(deviations, key=lambda k: k['sortOrder']) _LOGGER.info(\"SI2 update completed for %s...\",", "the configuration. CONF_RI4_KEY = 'ri4key' CONF_SI2_KEY = 'si2key' CONF_TL2_KEY =", "self._name) cacheage = self._hass.data[DOMAIN][self._si2datakey] if not cacheage or now(self._hass.config.time_zone) \\", "of the results. for response in apidata: type = response['Type']", "= expected_minutes val['next_departure_time'] = expected_time val['deviation_count'] = len(self._deviations_table) return val", "# Format the next exptected time. if self._departure_table: expected_time =", "1 and s[1] == 'min': return int(s[0]) s = t.split(':')", "sensorname) else: _LOGGER.error(\"Sensor %s is missing site, si2key or ri4key\",", "'mdi:bus', 'Trams': 'mdi:tram', 'Ships': 'mdi:ferry', 'Metros': 'mdi:subway-variant', 'Trains': 'mdi:train', }", "communication error occured while \" \"updating TL2 sensor: %s\", e.details)", "icon for the frontend.\"\"\" return None @property def device_state_attributes(self): \"\"\"", "Defining the configuration schema. PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ # API Keys", "%s is missing train_type attribute\", sensorconf[ATTR_FRIENDLY_NAME]) add_devices(sensors) class SLTrainLocationSensor(Entity): \"\"\"Trafic", "the time at which next departure occurs. if self._sensorproperty is", "== 'departures' or \\ sensorconf[CONF_SENSOR_TYPE] == 'comb': sitekey = sensorconf.get(CONF_SITEID)", "RI4 for %s...\", self._name) cacheage = self._hass.data[DOMAIN][self._ri4datakey] if not cacheage", "is not None: sensor_state = self._hass.states.get(self._enabled_sensor) if self._enabled_sensor is None", "object. newdata = {} # Use some nice translations for", "if not errorOccured: deviations = [] for (idx, value) in", "self._hass.states.get(self._enabled_sensor) # If we dont have external sensor or it", "occured while \" \"updating TL2 sensor: %s\", e.details) return except", "if si2key: self._si2key = si2key self._si2api = si2api(si2key, siteid, '')", "self._cachefile = hass.config.path(DEFAULT_CACHE_FILE) self._minimization = minimization if not hass.data[DOMAIN].get(self._datakey): hass.data[DOMAIN][self._datakey]", "= interval self._unit_of_measure = unit_table.get(self._sensorproperty, 'min') self._cachefile = hass.config.path(DEFAULT_CACHE_FILE) self._minimization", "jsonFile.write(json.dumps(data)) jsonFile.close() def _update(self): \"\"\"Get the departure board.\"\"\" # If", "sensorname) else: _LOGGER.error(\"Sensor %s is missing tl2key attribute\", sensorconf[ATTR_FRIENDLY_NAME]) if", "None or type in self._type: statustype = ('ferry' if type", "'mdi:subway-variant', 'Trains': 'mdi:train', } for (i, traffictype) in enumerate(['Metros', 'Buses',", "\"\"\"Departure board for one SL site.\"\"\" def __init__(self, hass, si2key,", "= value['GroupOfLine'] or '' icon = iconswitcher.get(traffictype, 'mdi:train-car') if int(self._direction)", "# requesting it again and spare some innocent credits from", "ri4key\", sensorconf[ATTR_FRIENDLY_NAME]) if sensorconf[CONF_SENSOR_TYPE] == 'status' or \\ sensorconf[CONF_SENSOR_TYPE] ==", "not self._minimization: try: apidata = self._tl2api.request() apidata = apidata['ResponseData']['TrafficTypes'] self.putCache(self._datakey,", "unit_table.get(self._sensorproperty, 'min') self._cachefile = hass.config.path(DEFAULT_CACHE_FILE) self._minimization = minimization if not", "= 'ri4key' CONF_SI2_KEY = 'si2key' CONF_TL2_KEY = 'tl2key' CONF_SITEID =", "60) self._ri4datakey = 'ri2_' + ri4key + '_' + siteid", "is not '': val['unit_of_measurement'] = self._unit_of_measure # Check if sensor", "\"cached SI2 sensor: %s\", e.details) errorOccured = True if not", "self.update = Throttle(interval)(self._update) @property def name(self): \"\"\"Return the name of", "e.details) errorOccured = True except Exception as e: _LOGGER.error(\"A communication", "while \" \"updating TL2 sensor: %s\", e.details) return except Exception", "= t.split(':') if len(s) > 1: rightnow = now(self._hass.config.time_zone) min", "\" \"updating train location sensor: %s\", e.details) return except Exception", "event['StatusIcon'] = \\ statusIcons.get(event['StatusIcon']) newdata[statustype + '_events'] = response['Events'] #", "self._hass.states.get(self._enabled_sensor) if self._enabled_sensor is None or sensor_state.state is STATE_ON: val['refresh_enabled']", "if self._sensorproperty is 'deviations': return len(self._deviations_table) # If the sensor", "jsonFile.close() data[key] = value except: data = {'' + key", "have external sensor or it is ON then proceed. if", "= 'siteid' CONF_LINES = 'lines' CONF_DIRECTION = 'direction' CONF_ENABLED_SENSOR =", "'icon': icon, }) self._departure_table = sorted(departures, key=lambda k: k['time']) _LOGGER.info(\"RI4", "data.get(key) except: return {} def putCache(self, key, value): try: jsonFile", "si2key self._si2api = si2api(si2key, siteid, '') self._si2datakey = 'si2_' +", "sensorconf[ATTR_FRIENDLY_NAME] sensors.append(SLDeparturesSensor( hass, si2key, ri4key, sitekey, sensorconf.get(CONF_LINES), sensorname, sensorconf.get(CONF_ENABLED_SENSOR), sensorconf.get(CONF_SCAN_INTERVAL),", "not None: sensor_state = self._hass.states.get(self._enabled_sensor) # If we dont have", "if self._lines == [] or linenumber \\ in self._lines: diff", "apidata['ResponseData']['TrafficTypes'] self.putCache(self._datakey, apidata) self._hass.data[DOMAIN][self._datakey] = \\ now(self._hass.config.time_zone) _LOGGER.info(\"Updated cache for", "default=DEFAULT_SENSORTYPE): vol.In(LIST_SENSOR_TYPES), vol.Optional(CONF_ENABLED_SENSOR): cv.string, vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL): vol.Any(cv.time_period, cv.positive_timedelta), vol.Optional(CONF_SITEID): cv.string,", "Format the next exptected time. if self._departure_table: expected_time = self._departure_table[0]['expected']", "def _update_si2(self): errorOccured = False _LOGGER.info(\"Starting to update SI2 for", "API, do some quick and dirty conversions. \"\"\" try: if", "self.putCache(self._si2datakey, deviationdata) self._hass.data[DOMAIN][self._si2datakey] = \\ now(self._hass.config.time_zone) _LOGGER.info('Updated cache for %s...',", "now(self._hass.config.time_zone) _LOGGER.info('Updated cache for %s...', self._name) except HASL_Error as e:", "specified interval then use that data instead of # requesting", "'expected': expected, 'type': traffictype, 'groupofline': groupofline, 'icon': icon, }) self._departure_table", "%s\", e) return else: apidata = self.getCache(self._datakey) _LOGGER.info(\"Reusing data from", "vol.Optional(CONF_TL2_KEY): cv.string, vol.Optional(CONF_VERSION, default=False): cv.boolean, vol.Optional(CONF_USE_MINIMIZATION, default=True): cv.boolean, vol.Required(CONF_SENSORS, default=[]):", "e: _LOGGER.error(\"A communication error occured while \" \"updating SI2 sensor:", "} # Icon table used for HomeAssistant. statusIcons = {", "CONF_SENSORS, STATE_OFF, STATE_ON) from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import", "Return the sensor attributes .\"\"\" # Initialize the state attributes.", "vol.Optional(CONF_DIRECTION, default=DEFAULT_DIRECTION): vol.All(vol.Coerce(int), vol.Range(min=0, max=2)), vol.Optional(CONF_TIMEWINDOW, default=DEFAULT_TIMEWINDOW): vol.All(vol.Coerce(int), vol.Range(min=0, max=60)),", "hass, tl2key, friendly_name, enabled_sensor, interval, type, minimization): self._tl2api = tl2api(tl2key)", "self._sensordata = [] self._lastupdate = '-' self._cachefile = hass.config.path(DEFAULT_CACHE_FILE) self._minimization", "= sensorproperty self._departure_table = [] self._deviations_table = [] self._direction =", "else: expected_time = '-' expected_minutes = '-' # Format the", "True except Exception as e: _LOGGER.error(\"A error occured while \"", "return except Exception as e: _LOGGER.error(\"A error occured while\" \"updating", "= train_type self._data = {} self.update = Throttle(interval)(self._update) @property def", "the request in within # the specified interval then use", "the frontend.\"\"\" if self._deviations_table: return 'mdi:bus-alert' return 'mdi:bus' @property def", "DEFAULT_DIRECTION = 0 DEFAULT_SENSORPROPERTY = 'min' DEFAULT_TRAIN_TYPE = 'PT' DEFAULT_TRAFFIC_CLASS", "sensor.\"\"\" return self._name @property def icon(self): \"\"\" Return the icon", "STATE_OFF if self._sensorproperty is 'updated': if self._lastupdate is '-': return", "sensors.append(SLStatusSensor( hass, tl2key, sensorname, sensorconf.get(CONF_ENABLED_SENSOR), sensorconf.get(CONF_SCAN_INTERVAL), sensorconf.get(CONF_TRAFFIC_CLASS), config.get(CONF_USE_MINIMIZATION) )) _LOGGER.info(\"Created", "= 0 DEFAULT_SENSORPROPERTY = 'min' DEFAULT_TRAIN_TYPE = 'PT' DEFAULT_TRAFFIC_CLASS =", "apidata: type = response['Type'] if self._type is None or type", "'ferry': 'mdi:ferry', 'bus': 'mdi:bus', 'tram': 'mdi:tram', 'train': 'mdi:train', 'local': 'mdi:train-variant',", "Lokaltrafik).\"\"\" import datetime import json import logging from datetime import", "self.putCache(self._datakey, apidata) self._hass.data[DOMAIN][self._datakey] = \\ now(self._hass.config.time_zone) _LOGGER.info(\"Updated cache for %s...\",", "update SI2 for %s...\", self._name) cacheage = self._hass.data[DOMAIN][self._si2datakey] if not", "vol.In(LIST_TRAIN_TYPES) })]), }, extra=vol.ALLOW_EXTRA) def setup_platform(hass, config, add_devices, discovery_info=None): \"\"\"Setup", "the last refresh time. refresh = self._lastupdate if self._lastupdate is", "newdata['last_updated'] = \\ self._hass.data[DOMAIN][self._datakey].strftime('%Y-%m-%d' + '%H:%M:%S') self._sensordata = newdata self._lastupdate", "value except: data = {'' + key + '': value}", "\"\"\" Return the state of the sensor.\"\"\" return self._version +", "or \\ sensorconf[CONF_SENSOR_TYPE] == 'tl2': tl2key = config.get(CONF_TL2_KEY) if tl2key:", "the icon for the frontend.\"\"\" return 'mdi:train-car' @property def device_state_attributes(self):", "self._lines == [] or linenumber \\ in self._lines: diff =", "tl2api(tl2key) self._datakey = 'tl2_' + tl2key self._interval = interval self._hass", "train sensor %s...\", sensorname) else: _LOGGER.error(\"Sensor %s is missing train_type", "%s\", e) errorOccured = True else: try: deviationdata = self.getCache(self._si2datakey)", "= self.getCache(self._si2datakey) _LOGGER.info(\"Reusing data from cache for %s...\", self._name) except", "same API have already made the request in within #", "utf-8 -*- \"\"\"Simple service for SL (Storstockholms Lokaltrafik).\"\"\" import datetime", "except Exception: _LOGGER.warning(\"Failed to parse departure time (%s) \", t)", "refresh.strftime('%Y-%m-%d %H:%M:%S') # Failsafe return '-' @property def device_state_attributes(self): \"\"\"", "[] if config[CONF_VERSION]: sensors.append(SLVersionSensor(hass)) _LOGGER.info(\"Created version sensor for HASL\") for", "datetime.datetime.strptime(self._nextdeparture_expected, '%Y-%m-%dT%H:%M:%S') expected = expected.strftime('%H:%M:%S') return expected # If the", "it is updating or not. if self._sensorproperty is 'refresh': if", "return the time at which next departure occurs. if self._sensorproperty", "value): try: jsonFile = open(self._cachefile, 'r') data = json.load(jsonFile) jsonFile.close()", "min = int(s[0]) * 60 + int(s[1]) - (rightnow.hour *", "'mdi:clock-alert-outline', 'EventMajor': 'mdi:close', 'EventPlanned': 'mdi:triangle-outline' } trafficTypeIcons = { 'ferry':", "self._sensordata = newdata self._lastupdate = newdata['last_updated'] _LOGGER.info(\"TL2 update completed for", "if self._sensorproperty is 'refresh': if self._enabled_sensor is None or sensor_state.state", "'EventMinor': 'mdi:clock-alert-outline', 'EventMajor': 'mdi:close', 'EventPlanned': 'mdi:triangle-outline' } trafficTypeIcons = {", "json.load(jsonFile) jsonFile.close() return data.get(key) except: return {} def putCache(self, key,", "from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import (ATTR_FRIENDLY_NAME, CONF_SCAN_INTERVAL, CONF_SENSOR_TYPE,", "[] self._deviations_table = [] self._direction = direction self._timewindow = timewindow", "if self._lastupdate is '-': return '-' return refresh.strftime('%Y-%m-%d %H:%M:%S') #", "'mdi:close', 'EventPlanned': 'mdi:triangle-outline' } trafficTypeIcons = { 'ferry': 'mdi:ferry', 'bus':", "sensorconf.get(CONF_TIMEWINDOW), sensorconf.get(CONF_SENSORPROPERTY), config.get(CONF_USE_MINIMIZATION) )) _LOGGER.info(\"Created departures sensor %s...\", sensorname) else:", "default=[]): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_DIRECTION, default=DEFAULT_DIRECTION): vol.All(vol.Coerce(int), vol.Range(min=0, max=2)), vol.Optional(CONF_TIMEWINDOW, default=DEFAULT_TIMEWINDOW):", "= self._si2api.request() deviationdata = deviationdata['ResponseData'] self.putCache(self._si2datakey, deviationdata) self._hass.data[DOMAIN][self._si2datakey] = \\", "device_state_attributes(self): \"\"\" Return the sensor attributes .\"\"\" # Initialize the", "enumerate(['Metros', 'Buses', 'Trains', 'Trams', 'Ships']): for (idx, value) in enumerate(departuredata[traffictype]):", "%s...\", self._name) # Return only the relevant portion of the", "return self._departure_table[0]['time'] # If the sensor should return the time", "table used for HomeAssistant. statusIcons = { 'EventGood': 'mdi:check', 'EventMinor':", "icon(self): \"\"\" Return the icon for the frontend.\"\"\" return 'mdi:train-car'", "{} sensors = [] if config[CONF_VERSION]: sensors.append(SLVersionSensor(hass)) _LOGGER.info(\"Created version sensor", "'status' or \\ sensorconf[CONF_SENSOR_TYPE] == 'tl2': tl2key = config.get(CONF_TL2_KEY) if", "== 'fer' else type) newdata[statustype + '_status'] = \\ statuses.get(response['StatusIcon'])", "PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ # API Keys vol.Optional(CONF_RI4_KEY): cv.string, vol.Optional(CONF_SI2_KEY): cv.string,", "self._enabled_sensor is None or sensor_state.state is STATE_ON: val['refresh_enabled'] = STATE_ON", "if self._enabled_sensor is None or sensor_state.state is STATE_ON: _LOGGER.info(\"Starting to", "except Exception as e: _LOGGER.error(\"A error occured while\" \"updating train", "self._unit_of_measure is not '': val['unit_of_measurement'] = self._unit_of_measure # Check if", "= hass.config.path(DEFAULT_CACHE_FILE) self._minimization = minimization if not hass.data[DOMAIN].get(self._datakey): hass.data[DOMAIN][self._datakey] =", "+ rightnow.minute) if min < 0: min = min +", "'local', 'tram', 'bus', 'fer'] DEFAULT_SENSORTYPE = 'departures' DEFAULT_CACHE_FILE = '.storage/haslcache.json'", "or sensor_state.state \\ is STATE_ON: self._update_ri4() if self._si2key: self._update_si2() self._lastupdate", "vol.Required(ATTR_FRIENDLY_NAME): cv.string, vol.Required(CONF_SENSOR_TYPE, default=DEFAULT_SENSORTYPE): vol.In(LIST_SENSOR_TYPES), vol.Optional(CONF_ENABLED_SENSOR): cv.string, vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL): vol.Any(cv.time_period,", "False _LOGGER.info(\"Starting to update SI2 for %s...\", self._name) cacheage =", "table of resulttypes and the corresponding units of measure. unit_table", "If using external sensor, get its value. if self._enabled_sensor is", "occured while\" \"updating train location sensor: %s\", e) return self._data", "haslapi() self._name = 'HASL Version' self._version = __version__ self._py_version =", "state attributes. val = {} # Format the next exptected", "\\ == int(self._direction): if self._lines == [] or linenumber \\", "not '-': expected_time = \\ datetime.datetime.strptime(expected_time, '%Y-%m-%dT%H:%M:%S') expected_time = expected_time.strftime('%H:%M:%S')", "return else: apidata = self.getCache(self._datakey) _LOGGER.info(\"Reusing data from cache for", "sensor should return minutes to next departure. if self._sensorproperty is", "DEFAULT_TRAIN_TYPE = 'PT' DEFAULT_TRAFFIC_CLASS = ['metro', 'train', 'local', 'tram', 'bus',", "if self._departure_table: expected_time = self._departure_table[0]['expected'] or '-' expected_minutes = self._departure_table[0]['time']", "displaytime = value['DisplayTime'] or '' destination = value['Destination'] or ''", "%s\", e) errorOccured = True else: try: departuredata = self.getCache(self._ri4datakey)", "default=DEFAULT_INTERVAL): vol.Any(cv.time_period, cv.positive_timedelta), vol.Optional(CONF_SITEID): cv.string, vol.Optional(CONF_LINES, default=[]): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_DIRECTION,", "= self.getCache(self._datakey) _LOGGER.info(\"Reusing data from cache for %s...\", self._name) #", "'details': value['Details'], 'sortOrder': value['SortOrder'], }) self._deviations_table = \\ sorted(deviations, key=lambda", "def __init__(self, hass, si2key, ri4key, siteid, lines, friendly_name, enabled_sensor, interval,", "'pyHasl': self._py_version} @property def state(self): \"\"\" Return the state of", "state of the sensor.\"\"\" return self._version + \"/\" + self._py_version", "sensor_state = self._hass.states.get(self._enabled_sensor) if self._enabled_sensor is None or sensor_state.state is", "%s...\", self._name) class SLVersionSensor(Entity): \"\"\"HASL Version Sensor.\"\"\" def __init__(self, hass):", "value['SortOrder'], }) self._deviations_table = \\ sorted(deviations, key=lambda k: k['sortOrder']) _LOGGER.info(\"SI2", "# If we dont have external sensor or it is", "API: %s\", e) return else: apidata = self.getCache(self._datakey) _LOGGER.info(\"Reusing data", "'EventMajor': 'Closed', 'EventPlanned': 'Planned', } # Icon table used for", "'deviations': return len(self._deviations_table) # If the sensor should return if", "e: _LOGGER.error(\"A error occured while \" \"updating TL4 API: %s\",", "e.details) return except Exception as e: _LOGGER.error(\"A error occured while", "= self._hass.states.get(self._enabled_sensor) # If we dont have external sensor or", "state(self): \"\"\" Return the state of the sensor.\"\"\" return self._version", "= \\ now(self._hass.config.time_zone) _LOGGER.info(\"Updated cache for %s...\", self._name) except HASL_Error", "= False _LOGGER.info(\"Starting to update SI2 for %s...\", self._name) cacheage", "for one SL site.\"\"\" def __init__(self, hass, si2key, ri4key, siteid,", "return self._train_type def _update(self): if self._enabled_sensor is not None: sensor_state", "%s\", e.details) return except Exception as e: _LOGGER.error(\"A error occured", "= open(self._cachefile, 'w') jsonFile.write(json.dumps(data)) jsonFile.close() def _update(self): \"\"\"Get the departure", "dying. cacheage = self._hass.data[DOMAIN][self._datakey] if not cacheage or now(self._hass.config.time_zone) \\", "= self._lastupdate if self._lastupdate is not '-': refresh = refresh.strftime('%Y-%m-%d", "attribute\", sensorconf[ATTR_FRIENDLY_NAME]) add_devices(sensors) class SLTrainLocationSensor(Entity): \"\"\"Trafic Situation Sensor.\"\"\" def __init__(self,", "the relevant portion of the results. for response in apidata:", "_LOGGER.error(\"A communication error occured while \" \"updating RI4 API: %s\",", "self._py_version} @property def state(self): \"\"\" Return the state of the", "= now(self._hass.config.time_zone) min = int(s[0]) * 60 + int(s[1]) -", "is None or sensor_state.state \\ is STATE_ON: self._update_ri4() if self._si2key:", "enumerate(deviationdata): deviations.append({ 'updated': value['Updated'], 'title': value['Header'], 'fromDate': value['FromDateTime'], 'toDate': value['UpToDateTime'],", "_LOGGER.info(\"Starting to update SI2 for %s...\", self._name) cacheage = self._hass.data[DOMAIN][self._si2datakey]", "STATE_OFF, STATE_ON) from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import (async_track_point_in_utc_time,", "some innocent credits from dying. cacheage = self._hass.data[DOMAIN][self._datakey] if not", "'-' expected_minutes = '-' # Format the last refresh time.", "getCache(self, key): try: jsonFile = open(self._cachefile, 'r') data = json.load(jsonFile)", "self._departure_table: return '-' return self._departure_table[0]['time'] # If the sensor should", "'departures' DEFAULT_CACHE_FILE = '.storage/haslcache.json' # Defining the configuration schema. PLATFORM_SCHEMA", "config[CONF_SENSORS]: if sensorconf[CONF_SENSOR_TYPE] == 'departures' or \\ sensorconf[CONF_SENSOR_TYPE] == 'comb':", "missing site, si2key or ri4key\", sensorconf[ATTR_FRIENDLY_NAME]) if sensorconf[CONF_SENSOR_TYPE] == 'status'", "diff, 'expected': expected, 'type': traffictype, 'groupofline': groupofline, 'icon': icon, })", "last refresh time. refresh = self._lastupdate if self._lastupdate is not", "return STATE_OFF if self._sensorproperty is 'updated': if self._lastupdate is '-':", "sensor %s...\", sensorname) else: _LOGGER.error(\"Sensor %s is missing train_type attribute\",", "logging.getLogger(__name__) DOMAIN = 'hasl' # Keys used in the configuration.", "'_status_icon'] = \\ statusIcons.get(response['StatusIcon']) newdata[statustype + '_icon'] = \\ trafficTypeIcons.get(statustype)", "k: k['time']) _LOGGER.info(\"RI4 update completed for %s...\", self._name) def _update_si2(self):", "self._interval > cacheage or not self._minimization: try: deviationdata = self._si2api.request()", "= \\ now(self._hass.config.time_zone) _LOGGER.info('Updated cache for %s...', self._name) except HASL_Error", "vol.All(vol.Coerce(int), vol.Range(min=0, max=60)), vol.Optional(CONF_SENSORPROPERTY, default=DEFAULT_SENSORPROPERTY): vol.In(LIST_SENSOR_PROPERTIES), vol.Optional(CONF_TRAFFIC_CLASS, default=DEFAULT_TRAFFIC_CLASS): vol.All(cv.ensure_list, [vol.In(DEFAULT_TRAFFIC_CLASS)]),", "def icon(self): \"\"\" Return the icon for the frontend.\"\"\" return", "+ '_events'] = response['Events'] # Attribution and update sensor data.", "= '-' self._interval = interval self._unit_of_measure = unit_table.get(self._sensorproperty, 'min') self._cachefile", "= \\ datetime.datetime.strptime(self._nextdeparture_expected, '%Y-%m-%dT%H:%M:%S') expected = expected.strftime('%H:%M:%S') return expected #", "} if si2key: self._si2key = si2key self._si2api = si2api(si2key, siteid,", "return len(self._deviations_table) # If the sensor should return if it", "of the sensor. val['attribution'] = 'Stockholms Lokaltrafik' val['departures'] = self._departure_table", "icon, }) self._departure_table = sorted(departures, key=lambda k: k['time']) _LOGGER.info(\"RI4 update", "'fer' else type) newdata[statustype + '_status'] = \\ statuses.get(response['StatusIcon']) newdata[statustype", "should return if it is updating or not. if self._sensorproperty", "False _LOGGER.info(\"Starting to update RI4 for %s...\", self._name) cacheage =", "the sensors.\"\"\" if not hass.data.get(DOMAIN): hass.data[DOMAIN] = {} sensors =", "'updated': value['Updated'], 'title': value['Header'], 'fromDate': value['FromDateTime'], 'toDate': value['UpToDateTime'], 'details': value['Details'],", "for %s...\", self._name) # Object used to create our object.", "event in response['Events']: event['Status'] = statuses.get(event['StatusIcon']) event['StatusIcon'] = \\ statusIcons.get(event['StatusIcon'])", "e: _LOGGER.error(\"A error occured while retreiving \" \"cached SI2 sensor:", "self._tl2api = tl2api(tl2key) self._datakey = 'tl2_' + tl2key self._interval =", "= STATE_ON else: val['refresh_enabled'] = STATE_OFF # Set values of", "None or sensor_state.state is STATE_ON: _LOGGER.info(\"Starting to update TL2 for", "= self._departure_table val['deviations'] = self._deviations_table val['last_refresh'] = refresh val['next_departure_minutes'] =", "self._ri4api.request() departuredata = departuredata['ResponseData'] self.putCache(self._ri4datakey, departuredata) self._hass.data[DOMAIN][self._ri4datakey] = \\ now(self._hass.config.time_zone)", "= value['Destination'] or '' linenumber = value['LineNumber'] or '' expected", "deviationdata['ResponseData'] self.putCache(self._si2datakey, deviationdata) self._hass.data[DOMAIN][self._si2datakey] = \\ now(self._hass.config.time_zone) _LOGGER.info('Updated cache for", "= \\ statusIcons.get(event['StatusIcon']) newdata[statustype + '_events'] = response['Events'] # Attribution", "communication error occured while \" \"updating RI4 API: %s\", e)", "sensorproperty self._departure_table = [] self._deviations_table = [] self._direction = direction", "try: deviationdata = self._si2api.request() deviationdata = deviationdata['ResponseData'] self.putCache(self._si2datakey, deviationdata) self._hass.data[DOMAIN][self._si2datakey]", "def name(self): \"\"\"Return the name of the sensor.\"\"\" return self._name", "'', } if si2key: self._si2key = si2key self._si2api = si2api(si2key,", "e.details) errorOccured = True except Exception as e: _LOGGER.error(\"A error", "= expected_time val['deviation_count'] = len(self._deviations_table) return val def parseDepartureTime(self, t):", "self._si2datakey = 'si2_' + si2key + '_' + siteid self._ri4key", "is missing tl2key attribute\", sensorconf[ATTR_FRIENDLY_NAME]) if sensorconf[CONF_SENSOR_TYPE] == 'trainlocation': train_type", "friendly_name, enabled_sensor, interval, direction, timewindow, sensorproperty, minimization): \"\"\"Initialize\"\"\" # The", "in response['Events']: event['Status'] = statuses.get(event['StatusIcon']) event['StatusIcon'] = \\ statusIcons.get(event['StatusIcon']) newdata[statustype", "the number of deviations. if self._sensorproperty is 'deviations': return len(self._deviations_table)", "ri4api, si2api, HASL_Error, HASL_API_Error, HASL_HTTP_Error) __version__ = '2.2.0' _LOGGER =", "except Exception as e: _LOGGER.error(\"A communication error occured while \"", "'min', 'time': '', 'deviations': '', 'refresh': '', 'update': '', }", "def _update(self): \"\"\"Get the departure board.\"\"\" # If using external", "= value['LineNumber'] or '' expected = value['ExpectedDateTime'] or '' groupofline", "= siteid self._enabled_sensor = enabled_sensor self._sensorproperty = sensorproperty self._departure_table =", "not '-': refresh = refresh.strftime('%Y-%m-%d %H:%M:%S') # Setup the unit", "Exception: _LOGGER.warning(\"Failed to parse departure time (%s) \", t) return", "self._deviations_table: return 'mdi:bus-alert' return 'mdi:bus' @property def state(self): \"\"\" Return", "SI2 sensor: %s\", e.details) errorOccured = True except Exception as", "from homeassistant.const import (ATTR_FRIENDLY_NAME, CONF_SCAN_INTERVAL, CONF_SENSOR_TYPE, CONF_SENSORS, STATE_OFF, STATE_ON) from", "siteid self._enabled_sensor = enabled_sensor self._sensorproperty = sensorproperty self._departure_table = []" ]
[ "query: http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=sn%201998S \"\"\" import re from urllib2 import urlopen def", "the value. \"\"\" simbad_uri = \"http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=%s\" regex_coords = \"Coordinates\\(FK5.+\\): .+\"", "urlopen( simbad_uri % host.replace(' ','%20') ).read() resred = re.search( regex_redshift,", "redshift is not given for SN, attempts to resolve link", "regex_host = \"apparent\\s+host\\s+galaxy\\s+.+?\\{(.*?)\\}\" result = urlopen( simbad_uri % name.replace(' ','%20')", "citation = None try: host = reshost.group().split('{')[1].split('}')[0] except AttributeError: host", "host galaxy and report its redshift. Returns ( (ra,dec), redshift,", "urlopen( simbad_uri % name.replace(' ','%20') ).read() rescoords = re.search( regex_coords,", ") reshost = re.search( regex_host, result ) try: cs =", "with searching simbad for info about a SN and parsing", "host galaxy. If redshift is not given for SN, attempts", "parsing the results. Author: <NAME>, <EMAIL>, 2014 example SIMBAD uri", "if (redshift == None) and (host != None): # get", "\"\"\" simbad_uri = \"http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=%s\" regex_coords = \"Coordinates\\(FK5.+\\): .+\" regex_redshift =", "galaxy and report its redshift. Returns ( (ra,dec), redshift, host_name,", "\"Redshift:\\s+\\d+\\.\\d+.+\" regex_host = \"apparent\\s+host\\s+galaxy\\s+.+?\\{(.*?)\\}\" result = urlopen( simbad_uri % name.replace('", "')[0]) citation = resred.group().split(' ')[-1] except AttributeError: pass return ((ra,dec),", "for SN, attempts to resolve link to host galaxy and", "regex_coords, result ) resred = re.search( regex_redshift, result ) reshost", "def get_SN_info( name ): \"\"\" Queries simbad for SN coords,", "quick library to deal with searching simbad for info about", "\"\"\" import re from urllib2 import urlopen def get_SN_info( name", "results. Author: <NAME>, <EMAIL>, 2014 example SIMBAD uri query: http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=sn%201998S", "<EMAIL>, 2014 example SIMBAD uri query: http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=sn%201998S \"\"\" import re", "the results. Author: <NAME>, <EMAIL>, 2014 example SIMBAD uri query:", "regex_redshift, result ) reshost = re.search( regex_host, result ) try:", "try: host = reshost.group().split('{')[1].split('}')[0] except AttributeError: host = None if", "= resred.group().split(' ')[-1] except AttributeError: pass return ((ra,dec), redshift, host,", "SN coords, redshift, and host galaxy. If redshift is not", "')[-1] except AttributeError: redshift = None citation = None try:", "redshift, host_name, redshift_citation ), with values of None inserted whenever", "None,None try: redshift = float(resred.group().strip('Redshift: ').split(' ')[0]) citation = resred.group().split('", "deal with searching simbad for info about a SN and", "\"\"\" Queries simbad for SN coords, redshift, and host galaxy.", "reshost = re.search( regex_host, result ) try: cs = rescoords.group().split(':')[1].strip()", "dec = cs[12:].strip() except: ra,dec = None,None try: redshift =", "= re.search( regex_coords, result ) resred = re.search( regex_redshift, result", ".+\" regex_redshift = \"Redshift:\\s+\\d+\\.\\d+.+\" regex_host = \"apparent\\s+host\\s+galaxy\\s+.+?\\{(.*?)\\}\" result = urlopen(", "float(resred.group().strip('Redshift: ').split(' ')[0]) citation = resred.group().split(' ')[-1] except AttributeError: pass", "AttributeError: host = None if (redshift == None) and (host", "host_name, redshift_citation ), with values of None inserted whenever it", "None) and (host != None): # get the redshift from", "Author: <NAME>, <EMAIL>, 2014 example SIMBAD uri query: http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=sn%201998S \"\"\"", "(redshift == None) and (host != None): # get the", "= urlopen( simbad_uri % host.replace(' ','%20') ).read() resred = re.search(", "is not given for SN, attempts to resolve link to", "float(resred.group().strip('Redshift: ').split(' ')[0]) citation = resred.group().split(' ')[-1] except AttributeError: redshift", "with values of None inserted whenever it cannot resolve the", "and parsing the results. Author: <NAME>, <EMAIL>, 2014 example SIMBAD", "info about a SN and parsing the results. Author: <NAME>,", "get_SN_info( name ): \"\"\" Queries simbad for SN coords, redshift,", "rescoords = re.search( regex_coords, result ) resred = re.search( regex_redshift,", "citation = resred.group().split(' ')[-1] except AttributeError: redshift = None citation", "')[0]) citation = resred.group().split(' ')[-1] except AttributeError: redshift = None", "redshift_citation ), with values of None inserted whenever it cannot", "inserted whenever it cannot resolve the value. \"\"\" simbad_uri =", "host galaxy result = urlopen( simbad_uri % host.replace(' ','%20') ).read()", "If redshift is not given for SN, attempts to resolve", "regex_redshift, result ) try: redshift = float(resred.group().strip('Redshift: ').split(' ')[0]) citation", "citation = resred.group().split(' ')[-1] except AttributeError: pass return ((ra,dec), redshift,", "simbad for SN coords, redshift, and host galaxy. If redshift", "resred.group().split(' ')[-1] except AttributeError: redshift = None citation = None", "): \"\"\" Queries simbad for SN coords, redshift, and host", "redshift from the host galaxy result = urlopen( simbad_uri %", ") try: cs = rescoords.group().split(':')[1].strip() ra = cs[:12].strip() dec =", "\"Coordinates\\(FK5.+\\): .+\" regex_redshift = \"Redshift:\\s+\\d+\\.\\d+.+\" regex_host = \"apparent\\s+host\\s+galaxy\\s+.+?\\{(.*?)\\}\" result =", "try: redshift = float(resred.group().strip('Redshift: ').split(' ')[0]) citation = resred.group().split(' ')[-1]", "except AttributeError: redshift = None citation = None try: host", "host = reshost.group().split('{')[1].split('}')[0] except AttributeError: host = None if (redshift", "values of None inserted whenever it cannot resolve the value.", "% host.replace(' ','%20') ).read() resred = re.search( regex_redshift, result )", "urlopen def get_SN_info( name ): \"\"\" Queries simbad for SN", "for SN coords, redshift, and host galaxy. If redshift is", "regex_host, result ) try: cs = rescoords.group().split(':')[1].strip() ra = cs[:12].strip()", "re.search( regex_host, result ) try: cs = rescoords.group().split(':')[1].strip() ra =", "','%20') ).read() rescoords = re.search( regex_coords, result ) resred =", "its redshift. Returns ( (ra,dec), redshift, host_name, redshift_citation ), with", "cs[12:].strip() except: ra,dec = None,None try: redshift = float(resred.group().strip('Redshift: ').split('", "result ) resred = re.search( regex_redshift, result ) reshost =", "simbad_uri = \"http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=%s\" regex_coords = \"Coordinates\\(FK5.+\\): .+\" regex_redshift = \"Redshift:\\s+\\d+\\.\\d+.+\"", "searching simbad for info about a SN and parsing the", "regex_coords = \"Coordinates\\(FK5.+\\): .+\" regex_redshift = \"Redshift:\\s+\\d+\\.\\d+.+\" regex_host = \"apparent\\s+host\\s+galaxy\\s+.+?\\{(.*?)\\}\"", "galaxy result = urlopen( simbad_uri % host.replace(' ','%20') ).read() resred", "about a SN and parsing the results. Author: <NAME>, <EMAIL>,", "whenever it cannot resolve the value. \"\"\" simbad_uri = \"http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=%s\"", "library to deal with searching simbad for info about a", "% name.replace(' ','%20') ).read() rescoords = re.search( regex_coords, result )", "A quick library to deal with searching simbad for info", "try: cs = rescoords.group().split(':')[1].strip() ra = cs[:12].strip() dec = cs[12:].strip()", "ra = cs[:12].strip() dec = cs[12:].strip() except: ra,dec = None,None", "None citation = None try: host = reshost.group().split('{')[1].split('}')[0] except AttributeError:", "cannot resolve the value. \"\"\" simbad_uri = \"http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=%s\" regex_coords =", "import urlopen def get_SN_info( name ): \"\"\" Queries simbad for", "SN and parsing the results. Author: <NAME>, <EMAIL>, 2014 example", "AttributeError: redshift = None citation = None try: host =", "= re.search( regex_host, result ) try: cs = rescoords.group().split(':')[1].strip() ra", "2014 example SIMBAD uri query: http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=sn%201998S \"\"\" import re from", "a SN and parsing the results. Author: <NAME>, <EMAIL>, 2014", "from urllib2 import urlopen def get_SN_info( name ): \"\"\" Queries", "it cannot resolve the value. \"\"\" simbad_uri = \"http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=%s\" regex_coords", "<NAME>, <EMAIL>, 2014 example SIMBAD uri query: http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=sn%201998S \"\"\" import", "simbad for info about a SN and parsing the results.", "result ) try: redshift = float(resred.group().strip('Redshift: ').split(' ')[0]) citation =", "= None,None try: redshift = float(resred.group().strip('Redshift: ').split(' ')[0]) citation =", "result = urlopen( simbad_uri % name.replace(' ','%20') ).read() rescoords =", "attempts to resolve link to host galaxy and report its", "result = urlopen( simbad_uri % host.replace(' ','%20') ).read() resred =", "), with values of None inserted whenever it cannot resolve", "= reshost.group().split('{')[1].split('}')[0] except AttributeError: host = None if (redshift ==", "redshift = float(resred.group().strip('Redshift: ').split(' ')[0]) citation = resred.group().split(' ')[-1] except", "redshift. Returns ( (ra,dec), redshift, host_name, redshift_citation ), with values", "\"apparent\\s+host\\s+galaxy\\s+.+?\\{(.*?)\\}\" result = urlopen( simbad_uri % name.replace(' ','%20') ).read() rescoords", "= re.search( regex_redshift, result ) try: redshift = float(resred.group().strip('Redshift: ').split('", "urllib2 import urlopen def get_SN_info( name ): \"\"\" Queries simbad", "re.search( regex_coords, result ) resred = re.search( regex_redshift, result )", "= cs[12:].strip() except: ra,dec = None,None try: redshift = float(resred.group().strip('Redshift:", "link to host galaxy and report its redshift. Returns (", "to host galaxy and report its redshift. Returns ( (ra,dec),", "= urlopen( simbad_uri % name.replace(' ','%20') ).read() rescoords = re.search(", "redshift, and host galaxy. If redshift is not given for", "\"\"\" A quick library to deal with searching simbad for", "= re.search( regex_redshift, result ) reshost = re.search( regex_host, result", "http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=sn%201998S \"\"\" import re from urllib2 import urlopen def get_SN_info(", "redshift = None citation = None try: host = reshost.group().split('{')[1].split('}')[0]", "and host galaxy. If redshift is not given for SN,", "host = None if (redshift == None) and (host !=", "result ) reshost = re.search( regex_host, result ) try: cs", "= resred.group().split(' ')[-1] except AttributeError: redshift = None citation =", "= \"apparent\\s+host\\s+galaxy\\s+.+?\\{(.*?)\\}\" result = urlopen( simbad_uri % name.replace(' ','%20') ).read()", "the redshift from the host galaxy result = urlopen( simbad_uri", "cs[:12].strip() dec = cs[12:].strip() except: ra,dec = None,None try: redshift", "the host galaxy result = urlopen( simbad_uri % host.replace(' ','%20')", "(host != None): # get the redshift from the host", ") try: redshift = float(resred.group().strip('Redshift: ').split(' ')[0]) citation = resred.group().split('", "value. \"\"\" simbad_uri = \"http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=%s\" regex_coords = \"Coordinates\\(FK5.+\\): .+\" regex_redshift", "= cs[:12].strip() dec = cs[12:].strip() except: ra,dec = None,None try:", "galaxy. If redshift is not given for SN, attempts to", "re.search( regex_redshift, result ) try: redshift = float(resred.group().strip('Redshift: ').split(' ')[0])", "name.replace(' ','%20') ).read() rescoords = re.search( regex_coords, result ) resred", "= rescoords.group().split(':')[1].strip() ra = cs[:12].strip() dec = cs[12:].strip() except: ra,dec", "= None try: host = reshost.group().split('{')[1].split('}')[0] except AttributeError: host =", "and report its redshift. Returns ( (ra,dec), redshift, host_name, redshift_citation", "for info about a SN and parsing the results. Author:", "# get the redshift from the host galaxy result =", "None try: host = reshost.group().split('{')[1].split('}')[0] except AttributeError: host = None", "import re from urllib2 import urlopen def get_SN_info( name ):", "to deal with searching simbad for info about a SN", "simbad_uri % host.replace(' ','%20') ).read() resred = re.search( regex_redshift, result", "None if (redshift == None) and (host != None): #", "').split(' ')[0]) citation = resred.group().split(' ')[-1] except AttributeError: pass return", "ra,dec = None,None try: redshift = float(resred.group().strip('Redshift: ').split(' ')[0]) citation", "(ra,dec), redshift, host_name, redshift_citation ), with values of None inserted", "regex_redshift = \"Redshift:\\s+\\d+\\.\\d+.+\" regex_host = \"apparent\\s+host\\s+galaxy\\s+.+?\\{(.*?)\\}\" result = urlopen( simbad_uri", "cs = rescoords.group().split(':')[1].strip() ra = cs[:12].strip() dec = cs[12:].strip() except:", "( (ra,dec), redshift, host_name, redshift_citation ), with values of None", "rescoords.group().split(':')[1].strip() ra = cs[:12].strip() dec = cs[12:].strip() except: ra,dec =", "reshost.group().split('{')[1].split('}')[0] except AttributeError: host = None if (redshift == None)", "','%20') ).read() resred = re.search( regex_redshift, result ) try: redshift", "result ) try: cs = rescoords.group().split(':')[1].strip() ra = cs[:12].strip() dec", "given for SN, attempts to resolve link to host galaxy", "except: ra,dec = None,None try: redshift = float(resred.group().strip('Redshift: ').split(' ')[0])", "Returns ( (ra,dec), redshift, host_name, redshift_citation ), with values of", "re.search( regex_redshift, result ) reshost = re.search( regex_host, result )", "= None citation = None try: host = reshost.group().split('{')[1].split('}')[0] except", "and (host != None): # get the redshift from the", "except AttributeError: host = None if (redshift == None) and", "resolve link to host galaxy and report its redshift. Returns", ") resred = re.search( regex_redshift, result ) reshost = re.search(", ").read() resred = re.search( regex_redshift, result ) try: redshift =", "= \"Redshift:\\s+\\d+\\.\\d+.+\" regex_host = \"apparent\\s+host\\s+galaxy\\s+.+?\\{(.*?)\\}\" result = urlopen( simbad_uri %", "resred = re.search( regex_redshift, result ) try: redshift = float(resred.group().strip('Redshift:", "name ): \"\"\" Queries simbad for SN coords, redshift, and", "re from urllib2 import urlopen def get_SN_info( name ): \"\"\"", "SIMBAD uri query: http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=sn%201998S \"\"\" import re from urllib2 import", "of None inserted whenever it cannot resolve the value. \"\"\"", "= float(resred.group().strip('Redshift: ').split(' ')[0]) citation = resred.group().split(' ')[-1] except AttributeError:", "None): # get the redshift from the host galaxy result", "simbad_uri % name.replace(' ','%20') ).read() rescoords = re.search( regex_coords, result", "\"http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=%s\" regex_coords = \"Coordinates\\(FK5.+\\): .+\" regex_redshift = \"Redshift:\\s+\\d+\\.\\d+.+\" regex_host =", "= \"Coordinates\\(FK5.+\\): .+\" regex_redshift = \"Redshift:\\s+\\d+\\.\\d+.+\" regex_host = \"apparent\\s+host\\s+galaxy\\s+.+?\\{(.*?)\\}\" result", "example SIMBAD uri query: http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=sn%201998S \"\"\" import re from urllib2", "coords, redshift, and host galaxy. If redshift is not given", "= \"http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=%s\" regex_coords = \"Coordinates\\(FK5.+\\): .+\" regex_redshift = \"Redshift:\\s+\\d+\\.\\d+.+\" regex_host", "host.replace(' ','%20') ).read() resred = re.search( regex_redshift, result ) try:", "None inserted whenever it cannot resolve the value. \"\"\" simbad_uri", "get the redshift from the host galaxy result = urlopen(", "').split(' ')[0]) citation = resred.group().split(' ')[-1] except AttributeError: redshift =", "from the host galaxy result = urlopen( simbad_uri % host.replace('", ").read() rescoords = re.search( regex_coords, result ) resred = re.search(", "Queries simbad for SN coords, redshift, and host galaxy. If", "resred = re.search( regex_redshift, result ) reshost = re.search( regex_host,", "= None if (redshift == None) and (host != None):", "== None) and (host != None): # get the redshift", "SN, attempts to resolve link to host galaxy and report", "not given for SN, attempts to resolve link to host", "resolve the value. \"\"\" simbad_uri = \"http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=%s\" regex_coords = \"Coordinates\\(FK5.+\\):", "report its redshift. Returns ( (ra,dec), redshift, host_name, redshift_citation ),", "!= None): # get the redshift from the host galaxy", "resred.group().split(' ')[-1] except AttributeError: pass return ((ra,dec), redshift, host, citation)", "uri query: http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=sn%201998S \"\"\" import re from urllib2 import urlopen", "to resolve link to host galaxy and report its redshift." ]
[ "0, 30] }, {'withName': 'C', 'withCableGeometry': [[0, -97, 45]], 'withAPullPointLocation':", "= [[0., 0., 0.]] * 10 for k in range(0,", "create a MechanicalObject, a componant holding the degree of freedom", "\")) # Create a CableConstraint object with a name. #", "mapForces='false', mapMasses='false') actuator_names += childname + '/cable,' self.actuator_list.append(cableS.cable) self.robot.actuator_list =", "range(len(self.actuatorsParam)): cable = actuators.addChild(self.actuatorsParam[i]['withName']) cable.addObject('MechanicalObject', position=self.actuatorsParam[i]['withCableGeometry']) cable.addObject('CableConstraint', name='cable', indices=list(range(len(self.actuatorsParam[i]['withCableGeometry']))), pullPoint=self.actuatorsParam[i]['withAPullPointLocation'],", "9, 2): v = Vec3(direction[0], direction[1] * 17.5 * (k", "referring to the MechanicalObject's positions. # The last indice is", "self.robot.dt.value) cableL.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false') actuator_names += childname + '/cable,'", "indices=list(range(10)), maxPositiveDisp='40', maxDispVariation=\"1\", valueType='force', minForce=self.robot.min_force[i + 4] * self.robot.dt.value) cableS.addObject('BarycentricMapping',", "a BarycentricMapping. A BarycentricMapping is a key element as it", "TemplateEnvironment: def __init__(self, name='Template', rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01): self.name = name", "if nodes is None: return measurement_models.linearModel(range(self.nb_nodes), self.nb_nodes, pos=pos, vel=vel) else:", "position[k + 1] = v.rotateFromQuat(q) cableS = actuators.addChild(childname) cableS.addObject('MechanicalObject', name='meca',", "self.robot.addChild('actuators') for i in range(len(self.actuatorsParam)): cable = actuators.addChild(self.actuatorsParam[i]['withName']) cable.addObject('MechanicalObject', position=self.actuatorsParam[i]['withCableGeometry'])", "trunkVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/trunk.stl\") trunkVisu.addObject('OglModel', template='Vec3d', color=[1., 1., 1., 0.8])", "a set of positions specifying # the points where the", "by. cable.addObject('MechanicalObject', name='meca', position=( \"-17.5 12.5 2.5 \" + \"-32.5", "-20, 0], [20, 20, 20]], drawBoxes=False) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12') ##########################################", "'withCableGeometry': [[0, 97, 45]], 'withAPullPointLocation': [0, 10, 30] }, {'withName':", "'/cable,' self.actuator_list.append(cableL.cable) if all_cables: for i in range(0, nbCables): childname", "\" + \"-77.5 12.5 12.5 \" + \"-62.5 12.5 12.5", "self.robot.min_force = [0.] # Without premultiplication with dt self.robot.addObject('MeshVTKLoader', name='loader',", "way that follow the ones of the parent mechanical model.", "= self.robot.addChild('actuators') cable = actuators.addChild('cable') # This create a MechanicalObject,", "self.robot.addObject('MechanicalObject', src='@loader') # Gives a mass to the model self.robot.addObject('UniformMass',", "degree of freedom of our # mechanical modelling. In the", "# Create an empty child node to store this rendering", "'A', 'withCableGeometry': [[0, 97, 45]], 'withAPullPointLocation': [0, 10, 30] },", "cable's DoFs and the finger's ones so that movements of", "vel=vel) class Trunk(TemplateEnvironment): def __init__(self, name='Trunk', all_cables=True): super(Trunk, self).__init__(name=name) self.nb_nodes", "DoFs and the finger's ones so that movements of the", "poissonRatio=0.45, youngModulus=450, rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01): super(Diamond, self).__init__(name=name, rayleighMass=rayleighMass, rayleighStiffness=rayleighStiffness, dt=dt)", "for i in range(len(self.actuatorsParam)): cable = actuators.addChild(self.actuatorsParam[i]['withName']) cable.addObject('MechanicalObject', position=self.actuatorsParam[i]['withCableGeometry']) cable.addObject('CableConstraint',", "points='@boxROI.indices', stiffness='1e12') ########################################## # Cable # ########################################## actuator_names = ''", "21) position[k] = v.rotateFromQuat(q) v = Vec3(direction[0], direction[1] * 17.5", "option 1 (we believe) # self.robot.addObject('MechanicalObject', src='@loader') # Gives a", "self.robot.addObject('EulerImplicitSolver', name='odesolver', firstOrder=\"0\", rayleighMass=str(rayleighMass), rayleighStiffness=str(rayleighStiffness)) self.robot.addObject('SparseLDLSolver', name='preconditioner') self.robot.addObject('GenericConstraintCorrection', solverName=\"preconditioner\") self.actuator_list", "self.robot.addObject('UniformMass', totalMass=0.042) # Add a TetrahedronFEMForceField componant which implement an", "= '' length1 = 10. length2 = 2. lengthTrunk =", "super(Finger, self).__init__(name=name) self.nb_nodes = 158 self.robot.min_force = [0.] # Without", "self.robot.addObject('TetrahedronSetGeometryAlgorithms') self.robot.addObject('MechanicalObject', template='Vec3d', name='tetras', showIndices='false', showIndicesScale='4e-5') self.robot.addObject('UniformMass', totalMass=totalMass, name='mass') self.robot.addObject('TetrahedronFEMForceField',", "node a rendering model made of triangles and loaded from", "+ \"-77.5 12.5 12.5 \" + \"-62.5 12.5 12.5 \"", "Quat, Vec3 from sofacontrol import measurement_models path = os.path.dirname(os.path.abspath(__file__)) class", "of the parent mechanical model. fingerVisu.addObject('BarycentricMapping') class Diamond(TemplateEnvironment): def __init__(self,", "length1, 0.], [-length1, 0., 0.], [0., -length1, 0.], [length1, 0.,", "[20, 20, 20]], drawBoxes=False) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12') ########################################## # Cable", "fingerVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/finger.stl\") fingerVisu.addObject('OglModel', template='Vec3d', color=[1., 1., 1., 0.8])", "showIndicesScale='4e-5') self.robot.addObject('UniformMass', totalMass=0.075) # Add a TetrahedronFEMForceField componant which implement", "super(Trunk, self).__init__(name=name) self.nb_nodes = 709 self.gravity = [0., 0., 9810.]", "/ 2) + length1, direction[2] * 17.5 * (k /", "empty node a rendering model made of triangles and loaded", "def __init__(self, name='Template', rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01): self.name = name self.robot", "0.0] translation = [0.0, 0.0, 35] self.robot.min_force = [0, 0,", "method='large', poissonRatio=0.45, youngModulus=600) # Fix the base of the trunk", "# ########################################## # This creates a new node in the", "self.robot.addObject('UniformMass', totalMass=totalMass, name='mass') self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', method='large', name='forcefield', poissonRatio=poissonRatio, youngModulus=youngModulus) #", "'cableS' + str(i) theta = 1.57 * i q =", "= None self.gravity = [0., -9810., 0.] # default self.dt", "Vec3(direction[0], direction[1] * 17.5 * (k / 2) + length1,", "+ '/mesh/trunk.vtk') self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') # Option", "by adding a rendering model. # Create an empty child", "0, 45]], 'withAPullPointLocation': [10, 0, 30] } ] actuators =", "1., 0.8]) # Add a BarycentricMapping to deform rendering model", "is where the pullPoint is connected. cable.addObject('CableConstraint', name=\"cable\", indices=list(range(14)), pullPoint=\"0.0", "BarycentricMapping to deform rendering model in way that follow the", "self.actuator_list = [] self.nb_nodes = None self.gravity = [0., -9810.,", "name='mapping', mapForces='false', mapMasses='false') self.actuator_list.append(cable.cable) self.robot.actuator_list = self.actuator_list ########################################## # Visualization", "rayleighStiffness=str(rayleighStiffness)) self.robot.addObject('SparseLDLSolver', name='preconditioner') self.robot.addObject('GenericConstraintCorrection', solverName=\"preconditioner\") self.actuator_list = [] self.nb_nodes =", "cableL.addObject('MechanicalObject', name='meca', position=pullPoint[i] + [pos.toList() for pos in position]) cableL.addObject('CableConstraint',", "# ########################################## diamondVisu = self.robot.addChild('VisualModel') diamondVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/diamond.stl\") diamondVisu.addObject('OglModel',", "totalMass=0.075) # Add a TetrahedronFEMForceField componant which implement an elastic", "+ 27) position[k + 1] = v.rotateFromQuat(q) cableS = actuators.addChild(childname)", "cableL = actuators.addChild(childname) cableL.addObject('MechanicalObject', name='meca', position=pullPoint[i] + [pos.toList() for pos", "# This create a MechanicalObject, a componant holding the degree", "so that movements of the cable's DoFs will be mapped", "12.5 12.5 \" + \"-62.5 12.5 12.5 \" + \"-47.5", "finger's node. actuators = self.robot.addChild('actuators') cable = actuators.addChild('cable') # This", "Sofa.Core.Node(name) # set-up solvers self.robot.addObject('EulerImplicitSolver', name='odesolver', firstOrder=\"0\", rayleighMass=str(rayleighMass), rayleighStiffness=str(rayleighStiffness)) self.robot.addObject('SparseLDLSolver',", "# Add a TetrahedronFEMForceField componant which implement an elastic material", "/ 2.)) position = [[0., 0., 0.]] * 10 for", "of positions specifying # the points where the cable is", "Create an empty child node to store this rendering model.", "name=\"cable\", hasPullPoint=\"0\", indices=list(range(21)), maxPositiveDisp='70', maxDispVariation=\"1\", valueType='force', minForce=self.robot.min_force[i] * self.robot.dt.value) cableL.addObject('BarycentricMapping',", "-9810.] rotation = [90, 0.0, 0.0] translation = [0.0, 0.0,", "[90, 0.0, 0.0] translation = [0.0, 0.0, 35] self.robot.min_force =", "[pos.toList() for pos in position]) cableS.addObject('CableConstraint', template='Vec3d', name=\"cable\", hasPullPoint=\"0\", indices=list(range(10)),", "This node is appended to the finger's node. actuators =", "which implement an elastic material model solved using the Finite", "the Finite Element Method on tetrahedrons. self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large',", "# the indices are referring to the MechanicalObject's positions. #", "for pos in position]) cableL.addObject('CableConstraint', template='Vec3d', name=\"cable\", hasPullPoint=\"0\", indices=list(range(21)), maxPositiveDisp='70',", "on tetrahedrons. self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large', poissonRatio=0.45, youngModulus=450) # Fix", "* (k / 2) + length1, direction[2] * 17.5 *", "self.actuator_list.append(cableL.cable) if all_cables: for i in range(0, nbCables): childname =", "a cable it is a set of positions specifying #", "connected. cable.addObject('CableConstraint', name=\"cable\", indices=list(range(14)), pullPoint=\"0.0 12.5 2.5\", valueType='force', minForce=self.robot.min_force[0] *", "= [[0., length1, 0.], [-length1, 0., 0.], [0., -length1, 0.],", "measurement_models.linearModel(range(self.nb_nodes), self.nb_nodes, pos=pos, vel=vel) else: return measurement_models.linearModel(nodes, self.nb_nodes, pos=pos, vel=vel)", "cable.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false') self.actuator_list.append(cable.cable) self.robot.actuator_list = self.actuator_list ########################################## #", "of triangles and loaded from an stl file. fingerVisu.addObject('MeshSTLLoader', filename=path", "rayleighMass=rayleighMass, rayleighStiffness=rayleighStiffness, dt=dt) self.nb_nodes = 1628 self.gravity = [0., 0.,", "# Without premultiplication with dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path + \"/mesh/diamond.vtu\",", "[5, 10, 15]], drawBoxes=False) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12') ########################################## # Cable", "self.actuator_list ########################################## # Visualization # ########################################## diamondVisu = self.robot.addChild('VisualModel') diamondVisu.addObject('MeshSTLLoader',", "a name. # the indices are referring to the MechanicalObject's", "2.5 \" + \"-47.5 12.5 2.5 \" + \"-62.5 12.5", "self.nb_nodes = 1628 self.gravity = [0., 0., -9810.] rotation =", "else: return measurement_models.linearModel(nodes, self.nb_nodes, pos=pos, vel=vel) class Trunk(TemplateEnvironment): def __init__(self,", "[0., 0., 9810.] self.robot.min_force = [0.] * 8 # Without", "name='meca', position=pullPoint[i] + [pos.toList() for pos in position]) cableS.addObject('CableConstraint', template='Vec3d',", "0] # Without premultiplication with dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path +", "self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12') ########################################## # Cable # ########################################## # This", "get_measurement_model(self, nodes=None, pos=True, vel=True): if nodes is None: return measurement_models.linearModel(range(self.nb_nodes),", "in a region of interest (ROI) self.robot.addObject('BoxROI', name='boxROI', box=[[-15, 0,", "cable = actuators.addChild(self.actuatorsParam[i]['withName']) cable.addObject('MechanicalObject', position=self.actuatorsParam[i]['withCableGeometry']) cable.addObject('CableConstraint', name='cable', indices=list(range(len(self.actuatorsParam[i]['withCableGeometry']))), pullPoint=self.actuatorsParam[i]['withAPullPointLocation'], valueType='force',", "for i in range(0, nbCables): childname = 'cableS' + str(i)", "0, 30] } ] actuators = self.robot.addChild('actuators') for i in", "(k / 2) + 27) position[k + 1] = v.rotateFromQuat(q)", "by adding constraints in a region of interest (ROI) self.robot.addObject('BoxROI',", "ones of the parent mechanical model. fingerVisu.addObject('BarycentricMapping') class Diamond(TemplateEnvironment): def", "drawBoxes=False) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12') ########################################## # Cable # ########################################## actuator_names", "minForce=self.robot.min_force[i] * self.robot.dt.value) cableL.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false') actuator_names += childname", "# This create a BarycentricMapping. A BarycentricMapping is a key", "self.nb_nodes, pos=pos, vel=vel) else: return measurement_models.linearModel(nodes, self.nb_nodes, pos=pos, vel=vel) class", "2) + 27) position[k + 1] = v.rotateFromQuat(q) cableL =", "bi-directional link # between the cable's DoFs and the finger's", "# Without premultiplication with dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path + '/mesh/finger.vtk')", "\"-32.5 12.5 12.5 \" + \"-17.5 12.5 12.5 \")) #", "self.actuator_list.append(cable.cable) self.robot.actuator_list = self.actuator_list ########################################## # Visualization # ########################################## #", "maxPositiveDisp='40', maxDispVariation=\"1\", valueType='force', minForce=self.robot.min_force[i + 4] * self.robot.dt.value) cableS.addObject('BarycentricMapping', name='mapping',", "0, 0, 0] # Without premultiplication with dt class Finger(TemplateEnvironment):", "from splib.numerics import Quat, Vec3 from sofacontrol import measurement_models path", "range(0, 20, 2): v = Vec3(direction[0], direction[1] * 17.5 *", "\"/mesh/diamond.vtu\", rotation=rotation, translation=translation) self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') self.robot.addObject('MechanicalObject',", "2) + 27) position[k + 1] = v.rotateFromQuat(q) cableS =", "[-10, 0, 30] }, {'withName': 'C', 'withCableGeometry': [[0, -97, 45]],", "1., 1., 0.8]) trunkVisu.addObject('BarycentricMapping') class Trunk4Cables(Trunk): def __init__(self, name='Trunk4Cables'): super(Trunk4Cables,", "passing by. cable.addObject('MechanicalObject', name='meca', position=( \"-17.5 12.5 2.5 \" +", "default self.dt = dt def get_measurement_model(self, nodes=None, pos=True, vel=True): if", "35] self.robot.min_force = [0, 0, 0, 0] # Without premultiplication", "with a name. # the indices are referring to the", "12.5 6.5 \" + \"-85.5 12.5 8.5 \" + \"-83.5", "Add to this empty node a rendering model made of", "\" + \"-62.5 12.5 2.5 \" + \"-77.5 12.5 2.5", "rendering model in way that follow the ones of the", "a rendering model. # Create an empty child node to", "# ########################################## self.actuatorsParam = [ {'withName': 'A', 'withCableGeometry': [[0, 97,", "Method on tetrahedrons. self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large', poissonRatio=0.45, youngModulus=450) #", "method='large', poissonRatio=0.45, youngModulus=450) # Fix the base of the trunk", "of our # mechanical modelling. In the case of a", "the parent mechanical model. fingerVisu.addObject('BarycentricMapping') class Diamond(TemplateEnvironment): def __init__(self, name='Diamond',", "name='Finger'): super(Finger, self).__init__(name=name) self.nb_nodes = 158 self.robot.min_force = [0.] #", "None self.gravity = [0., -9810., 0.] # default self.dt =", "solverName=\"preconditioner\") self.actuator_list = [] self.nb_nodes = None self.gravity = [0.,", "to option 1 (we believe) # self.robot.addObject('MechanicalObject', src='@loader') # Gives", "super(Diamond, self).__init__(name=name, rayleighMass=rayleighMass, rayleighStiffness=rayleighStiffness, dt=dt) self.nb_nodes = 1628 self.gravity =", "with dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path + \"/mesh/diamond.vtu\", rotation=rotation, translation=translation) self.robot.addObject('TetrahedronSetTopologyContainer',", "totalMass=totalMass, name='mass') self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', method='large', name='forcefield', poissonRatio=poissonRatio, youngModulus=youngModulus) # Fix", "2) + 21) position[k] = v.rotateFromQuat(q) v = Vec3(direction[0], direction[1]", "/ 2) + 27) position[k + 1] = v.rotateFromQuat(q) cableS", "node in the scene. This node is appended to the", "[0, -10, 30] }, {'withName': 'D', 'withCableGeometry': [[97, 0, 45]],", "child node to store this rendering model. fingerVisu = self.robot.addChild('VisualModel')", "= actuators.addChild(childname) cableS.addObject('MechanicalObject', name='meca', position=pullPoint[i] + [pos.toList() for pos in", "box=[-15, -15, -40, 15, 15, 10], drawBoxes=True) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.<EMAIL>', stiffness='1e12')", "the MechanicalObject's positions. # The last indice is where the", "premultiplication with dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path + '/mesh/trunk.vtk') self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader',", "length1 = 10. length2 = 2. lengthTrunk = 195. pullPoint", "A BarycentricMapping is a key element as it will create", "k in range(0, 20, 2): v = Vec3(direction[0], direction[1] *", "visualization is handled by adding a rendering model. # Create", "the Finite # Element Method on tetrahedrons. self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM',", "nbCables = 4 actuators = self.robot.addChild('actuators') for i in range(0,", "model. fingerVisu = self.robot.addChild('VisualModel') # Add to this empty node", "\"-77.5 12.5 12.5 \" + \"-62.5 12.5 12.5 \" +", "########################################## self.actuatorsParam = [ {'withName': 'A', 'withCableGeometry': [[0, 97, 45]],", "# Without premultiplication with dt class Finger(TemplateEnvironment): def __init__(self, name='Finger'):", "cableL.addObject('CableConstraint', template='Vec3d', name=\"cable\", hasPullPoint=\"0\", indices=list(range(21)), maxPositiveDisp='70', maxDispVariation=\"1\", valueType='force', minForce=self.robot.min_force[i] *", "maxPositiveDisp='70', maxDispVariation=\"1\", valueType='force', minForce=self.robot.min_force[i] * self.robot.dt.value) cableL.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false')", "[[-97, 0, 45]], 'withAPullPointLocation': [-10, 0, 30] }, {'withName': 'C',", "30] }, {'withName': 'D', 'withCableGeometry': [[97, 0, 45]], 'withAPullPointLocation': [10,", "12.5 12.5 \" + \"-17.5 12.5 12.5 \")) # Create", "this rendering model. fingerVisu = self.robot.addChild('VisualModel') # Add to this", "'' length1 = 10. length2 = 2. lengthTrunk = 195.", "actuator_names = '' length1 = 10. length2 = 2. lengthTrunk", "self.robot.addObject('SparseLDLSolver', name='preconditioner') self.robot.addObject('GenericConstraintCorrection', solverName=\"preconditioner\") self.actuator_list = [] self.nb_nodes = None", "self.actuator_list.append(cable.cable) self.robot.actuator_list = self.actuator_list ########################################## # Visualization # ########################################## diamondVisu", "in a region of interest (ROI) self.robot.addObject('BoxROI', name='boxROI', box=[[-20, -20,", "1] = v.rotateFromQuat(q) cableL = actuators.addChild(childname) cableL.addObject('MechanicalObject', name='meca', position=pullPoint[i] +", "with dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path + '/mesh/finger.vtk') self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container')", "self.nb_nodes = None self.gravity = [0., -9810., 0.] # default", "rendering model. # Create an empty child node to store", "direction.normalize() nbCables = 4 actuators = self.robot.addChild('actuators') for i in", "template='Vec3d', name='tetras', showIndices='false', showIndicesScale='4e-5') self.robot.addObject('UniformMass', totalMass=totalMass, name='mass') self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', method='large',", "= 195. pullPoint = [[0., length1, 0.], [-length1, 0., 0.],", "handled by adding a rendering model. # Create an empty", "length2 - length1, lengthTrunk) direction.normalize() nbCables = 4 actuators =", "+ '/mesh/finger.vtk') self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') self.robot.addObject('MechanicalObject', name='tetras',", "drawBoxes=False) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12') ########################################## # Cable # ########################################## #", "model. # Create an empty child node to store this", "name='Trunk4Cables'): super(Trunk4Cables, self).__init__(name=name, all_cables=False) self.robot.min_force = [0, 0, 0, 0]", "are referring to the MechanicalObject's positions. # The last indice", "+ \"/mesh/diamond.stl\") diamondVisu.addObject('OglModel', template='Vec3d', color=[0.7, 0.7, 0.7, 0.7], updateNormals=False) diamondVisu.addObject('BarycentricMapping')", "= 'cableL' + str(i) theta = 1.57 * i q", "1 (we believe) # self.robot.addObject('MechanicalObject', src='@loader') # Gives a mass", "= [0.] * 8 # Without premultiplication with dt self.robot.addObject('MeshVTKLoader',", "DoFs will be mapped # to the finger and vice-versa;", "self.actuatorsParam = [ {'withName': 'A', 'withCableGeometry': [[0, 97, 45]], 'withAPullPointLocation':", "cableS.addObject('CableConstraint', template='Vec3d', name=\"cable\", hasPullPoint=\"0\", indices=list(range(10)), maxPositiveDisp='40', maxDispVariation=\"1\", valueType='force', minForce=self.robot.min_force[i +", "2.5 \" + \"-32.5 12.5 2.5 \" + \"-47.5 12.5", "* i q = Quat(0., 0., sin(theta / 2.), cos(theta", "the indices are referring to the MechanicalObject's positions. # The", "* 17.5 * (k / 2) + 27) position[k +", "\" + \"-62.5 12.5 12.5 \" + \"-47.5 12.5 12.5", "# ########################################## # In Sofa, visualization is handled by adding", "childname + '/cable,' self.actuator_list.append(cableL.cable) if all_cables: for i in range(0,", "\" + \"-85.5 12.5 6.5 \" + \"-85.5 12.5 8.5", "name. # the indices are referring to the MechanicalObject's positions.", "self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') # Option 1: self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false', showIndicesScale='4e-5')", "an elastic material model solved using the Finite Element Method", "\" + \"-77.5 12.5 2.5 \" + \"-83.5 12.5 4.5", "10.5 \" + \"-77.5 12.5 12.5 \" + \"-62.5 12.5", "youngModulus=600) # Fix the base of the trunk by adding", "########################################## # Visualization # ########################################## # In Sofa, visualization is", "length1, lengthTrunk) direction.normalize() nbCables = 4 actuators = self.robot.addChild('actuators') for", "self.robot.actuator_list = self.actuator_list ########################################## # Visualization # ########################################## trunkVisu =", "name='meca', position=( \"-17.5 12.5 2.5 \" + \"-32.5 12.5 2.5", "= self.actuator_list ########################################## # Visualization # ########################################## diamondVisu = self.robot.addChild('VisualModel')", "= 1.57 * i q = Quat(0., 0., sin(theta /", "# Visualization # ########################################## # In Sofa, visualization is handled", "# Visualization # ########################################## diamondVisu = self.robot.addChild('VisualModel') diamondVisu.addObject('MeshSTLLoader', filename=path +", "name='tetras', showIndices='false', showIndicesScale='4e-5') self.robot.addObject('UniformMass', totalMass=totalMass, name='mass') self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', method='large', name='forcefield',", "componant which implement an elastic material model solved using the", "def get_measurement_model(self, nodes=None, pos=True, vel=True): if nodes is None: return", "trunkVisu.addObject('OglModel', template='Vec3d', color=[1., 1., 1., 0.8]) trunkVisu.addObject('BarycentricMapping') class Trunk4Cables(Trunk): def", "= v.rotateFromQuat(q) v = Vec3(direction[0], direction[1] * 17.5 * (k", "Element Method on tetrahedrons. self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large', poissonRatio=0.45, youngModulus=450)", "finger's ones so that movements of the cable's DoFs will", "template='Vec3d', name=\"cable\", hasPullPoint=\"0\", indices=list(range(10)), maxPositiveDisp='40', maxDispVariation=\"1\", valueType='force', minForce=self.robot.min_force[i + 4]", "rayleighStiffness=0.1, dt=0.01): self.name = name self.robot = Sofa.Core.Node(name) # set-up", "import Sofa.Core from splib.numerics import Quat, Vec3 from sofacontrol import", "self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12') ########################################## # Cable # ########################################## actuator_names =", "mechanical model. fingerVisu.addObject('BarycentricMapping') class Diamond(TemplateEnvironment): def __init__(self, name='Diamond', totalMass=0.5, poissonRatio=0.45,", "\"-85.5 12.5 6.5 \" + \"-85.5 12.5 8.5 \" +", "+ \"/mesh/finger.stl\") fingerVisu.addObject('OglModel', template='Vec3d', color=[1., 1., 1., 0.8]) # Add", "self.robot.addObject('MeshVTKLoader', name='loader', filename=path + \"/mesh/diamond.vtu\", rotation=rotation, translation=translation) self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container')", "measurement_models path = os.path.dirname(os.path.abspath(__file__)) class TemplateEnvironment: def __init__(self, name='Template', rayleighMass=0.1,", "* self.robot.dt.value) cableL.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false') actuator_names += childname +", "dt class Finger(TemplateEnvironment): def __init__(self, name='Finger'): super(Finger, self).__init__(name=name) self.nb_nodes =", "new node in the scene. This node is appended to", "a componant holding the degree of freedom of our #", "name='boxROI', box=[-15, -15, -40, 15, 15, 10], drawBoxes=True) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.<EMAIL>',", "This create a MechanicalObject, a componant holding the degree of", ") cable.addObject('BarycentricMapping', name=\"Mapping\", mapForces=False, mapMasses=False) self.actuator_list.append(cable.cable) self.robot.actuator_list = self.actuator_list ##########################################", "mapMasses='false') self.actuator_list.append(cable.cable) self.robot.actuator_list = self.actuator_list ########################################## # Visualization # ##########################################", "1628 self.gravity = [0., 0., -9810.] rotation = [90, 0.0,", "'withCableGeometry': [[-97, 0, 45]], 'withAPullPointLocation': [-10, 0, 30] }, {'withName':", "the points where the cable is passing by. cable.addObject('MechanicalObject', name='meca',", "10, 30] }, {'withName': 'B', 'withCableGeometry': [[-97, 0, 45]], 'withAPullPointLocation':", "__init__(self, name='Finger'): super(Finger, self).__init__(name=name) self.nb_nodes = 158 self.robot.min_force = [0.]", "# Add to this empty node a rendering model made", "to deform rendering model in way that follow the ones", "self.robot = Sofa.Core.Node(name) # set-up solvers self.robot.addObject('EulerImplicitSolver', name='odesolver', firstOrder=\"0\", rayleighMass=str(rayleighMass),", "+ [pos.toList() for pos in position]) cableS.addObject('CableConstraint', template='Vec3d', name=\"cable\", hasPullPoint=\"0\",", "position[k + 1] = v.rotateFromQuat(q) cableL = actuators.addChild(childname) cableL.addObject('MechanicalObject', name='meca',", "for k in range(0, 20, 2): v = Vec3(direction[0], direction[1]", "= actuators.addChild(self.actuatorsParam[i]['withName']) cable.addObject('MechanicalObject', position=self.actuatorsParam[i]['withCableGeometry']) cable.addObject('CableConstraint', name='cable', indices=list(range(len(self.actuatorsParam[i]['withCableGeometry']))), pullPoint=self.actuatorsParam[i]['withAPullPointLocation'], valueType='force', hasPullPoint=True,", "+ str(i) theta = 1.57 * i q = Quat(0.,", "vel=True): if nodes is None: return measurement_models.linearModel(range(self.nb_nodes), self.nb_nodes, pos=pos, vel=vel)", "the degree of freedom of our # mechanical modelling. In", "template='Vec3d', color=[1., 1., 1., 0.8]) trunkVisu.addObject('BarycentricMapping') class Trunk4Cables(Trunk): def __init__(self,", "poissonRatio=0.45, youngModulus=600) # Fix the base of the trunk by", "region of interest (ROI) self.robot.addObject('BoxROI', name='boxROI', box=[[-15, 0, 0], [5,", "ones so that movements of the cable's DoFs will be", "cable.addObject('MechanicalObject', position=self.actuatorsParam[i]['withCableGeometry']) cable.addObject('CableConstraint', name='cable', indices=list(range(len(self.actuatorsParam[i]['withCableGeometry']))), pullPoint=self.actuatorsParam[i]['withAPullPointLocation'], valueType='force', hasPullPoint=True, minForce=self.robot.min_force[i] *", "pullPoint=\"0.0 12.5 2.5\", valueType='force', minForce=self.robot.min_force[0] * self.robot.dt.value) # This create", "our # mechanical modelling. In the case of a cable", "dt def get_measurement_model(self, nodes=None, pos=True, vel=True): if nodes is None:", "actuator_names += childname + '/cable,' self.actuator_list.append(cableS.cable) self.robot.actuator_list = self.actuator_list ##########################################", "0.], [0., -length1, 0.], [length1, 0., 0.]] direction = Vec3(0.,", "# mechanical modelling. In the case of a cable it", "[length1, 0., 0.]] direction = Vec3(0., length2 - length1, lengthTrunk)", "0., 0.]] direction = Vec3(0., length2 - length1, lengthTrunk) direction.normalize()", "name='meca', position=pullPoint[i] + [pos.toList() for pos in position]) cableL.addObject('CableConstraint', template='Vec3d',", "a bi-directional link # between the cable's DoFs and the", "-length1, 0.], [length1, 0., 0.]] direction = Vec3(0., length2 -", "* 8 # Without premultiplication with dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path", "maxDispVariation=\"1\", valueType='force', minForce=self.robot.min_force[i] * self.robot.dt.value) cableL.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false') actuator_names", "implement an elastic material model solved using the Finite Element", "Element Method on tetrahedrons. self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large', poissonRatio=0.45, youngModulus=600)", "stiffness='1e12') ########################################## # Cable # ########################################## # This creates a", "self.robot.addObject('MechanicalObject', template='Vec3d', name='tetras', showIndices='false', showIndicesScale='4e-5') self.robot.addObject('UniformMass', totalMass=totalMass, name='mass') self.robot.addObject('TetrahedronFEMForceField', template='Vec3d',", "{'withName': 'C', 'withCableGeometry': [[0, -97, 45]], 'withAPullPointLocation': [0, -10, 30]", "self.robot.dt.value) # This create a BarycentricMapping. A BarycentricMapping is a", "elastic material model solved using the Finite # Element Method", "creates a new node in the scene. This node is", "name='Trunk', all_cables=True): super(Trunk, self).__init__(name=name) self.nb_nodes = 709 self.gravity = [0.,", "dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path + '/mesh/trunk.vtk') self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier')", "premultiplication with dt class Finger(TemplateEnvironment): def __init__(self, name='Finger'): super(Finger, self).__init__(name=name)", "-97, 45]], 'withAPullPointLocation': [0, -10, 30] }, {'withName': 'D', 'withCableGeometry':", "Cable # ########################################## self.actuatorsParam = [ {'withName': 'A', 'withCableGeometry': [[0,", "name='odesolver', firstOrder=\"0\", rayleighMass=str(rayleighMass), rayleighStiffness=str(rayleighStiffness)) self.robot.addObject('SparseLDLSolver', name='preconditioner') self.robot.addObject('GenericConstraintCorrection', solverName=\"preconditioner\") self.actuator_list =", "a rendering model made of triangles and loaded from an", "model solved using the Finite Element Method on tetrahedrons. self.robot.addObject('TetrahedronFEMForceField',", "valueType='force', minForce=self.robot.min_force[0] * self.robot.dt.value) # This create a BarycentricMapping. A", "(k / 2) + 21) position[k] = v.rotateFromQuat(q) v =", "0, 0] # Without premultiplication with dt class Finger(TemplateEnvironment): def", "Method on tetrahedrons. self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large', poissonRatio=0.45, youngModulus=600) #", "pos=pos, vel=vel) else: return measurement_models.linearModel(nodes, self.nb_nodes, pos=pos, vel=vel) class Trunk(TemplateEnvironment):", "\" + \"-47.5 12.5 12.5 \" + \"-32.5 12.5 12.5", "import sin import Sofa.Core from splib.numerics import Quat, Vec3 from", "cableL.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false') actuator_names += childname + '/cable,' self.actuator_list.append(cableL.cable)", "+ \"-83.5 12.5 10.5 \" + \"-77.5 12.5 12.5 \"", "will be mapped # to the finger and vice-versa; cable.addObject('BarycentricMapping',", "Add a TetrahedronFEMForceField componant which implement an elastic material model", "i q = Quat(0., 0., sin(theta / 2.), cos(theta /", "name='preconditioner') self.robot.addObject('GenericConstraintCorrection', solverName=\"preconditioner\") self.actuator_list = [] self.nb_nodes = None self.gravity", "17.5 * (k / 2) + 21) position[k] = v.rotateFromQuat(q)", "filename=path + '/mesh/trunk.vtk') self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') #", "12.5 12.5 \" + \"-32.5 12.5 12.5 \" + \"-17.5", "poissonRatio=0.45, youngModulus=450) # Fix the base of the trunk by", "fingerVisu.addObject('BarycentricMapping') class Diamond(TemplateEnvironment): def __init__(self, name='Diamond', totalMass=0.5, poissonRatio=0.45, youngModulus=450, rayleighMass=0.1,", "name='loader', filename=path + '/mesh/finger.vtk') self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms')", "195. pullPoint = [[0., length1, 0.], [-length1, 0., 0.], [0.,", "# default self.dt = dt def get_measurement_model(self, nodes=None, pos=True, vel=True):", "to the MechanicalObject's positions. # The last indice is where", "'withAPullPointLocation': [0, -10, 30] }, {'withName': 'D', 'withCableGeometry': [[97, 0,", "cable.addObject('CableConstraint', name='cable', indices=list(range(len(self.actuatorsParam[i]['withCableGeometry']))), pullPoint=self.actuatorsParam[i]['withAPullPointLocation'], valueType='force', hasPullPoint=True, minForce=self.robot.min_force[i] * self.robot.dt.value )", "cos(theta / 2.)) position = [[0., 0., 0.]] * 20", "# ########################################## trunkVisu = self.robot.addChild('VisualModel') trunkVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/trunk.stl\") trunkVisu.addObject('OglModel',", "actuators = self.robot.addChild('actuators') for i in range(0, nbCables): childname =", "solved using the Finite Element Method on tetrahedrons. self.robot.addObject('TetrahedronFEMForceField', template='Vec3d',", "2.5 \" + \"-62.5 12.5 2.5 \" + \"-77.5 12.5", "10 for k in range(0, 9, 2): v = Vec3(direction[0],", "[0., 0., -9810.] rotation = [90, 0.0, 0.0] translation =", "in position]) cableL.addObject('CableConstraint', template='Vec3d', name=\"cable\", hasPullPoint=\"0\", indices=list(range(21)), maxPositiveDisp='70', maxDispVariation=\"1\", valueType='force',", "import measurement_models path = os.path.dirname(os.path.abspath(__file__)) class TemplateEnvironment: def __init__(self, name='Template',", "12.5 12.5 \" + \"-47.5 12.5 12.5 \" + \"-32.5", "= self.actuator_list ########################################## # Visualization # ########################################## trunkVisu = self.robot.addChild('VisualModel')", "self.robot.addObject('MeshVTKLoader', name='loader', filename=path + '/mesh/trunk.vtk') self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms')", "triangles and loaded from an stl file. fingerVisu.addObject('MeshSTLLoader', filename=path +", "= dt def get_measurement_model(self, nodes=None, pos=True, vel=True): if nodes is", "2: Equivalent to option 1 (we believe) # self.robot.addObject('MechanicalObject', src='@loader')", "the case of a cable it is a set of", "name='mapping', mapForces='false', mapMasses='false') actuator_names += childname + '/cable,' self.actuator_list.append(cableS.cable) self.robot.actuator_list", "\"-77.5 12.5 2.5 \" + \"-83.5 12.5 4.5 \" +", "+ \"-32.5 12.5 12.5 \" + \"-17.5 12.5 12.5 \"))", "4 actuators = self.robot.addChild('actuators') for i in range(0, nbCables): childname", "= name self.robot = Sofa.Core.Node(name) # set-up solvers self.robot.addObject('EulerImplicitSolver', name='odesolver',", "actuators.addChild(childname) cableS.addObject('MechanicalObject', name='meca', position=pullPoint[i] + [pos.toList() for pos in position])", "showIndices='false', showIndicesScale='4e-5') # Option 2: Equivalent to option 1 (we", "valueType='force', minForce=self.robot.min_force[i] * self.robot.dt.value) cableL.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false') actuator_names +=", "self.nb_nodes, pos=pos, vel=vel) class Trunk(TemplateEnvironment): def __init__(self, name='Trunk', all_cables=True): super(Trunk,", "trunk by adding constraints in a region of interest (ROI)", "17.5 * (k / 2) + length1, direction[2] * 17.5", "(k / 2) + length1, direction[2] * 17.5 * (k", "the cable is passing by. cable.addObject('MechanicalObject', name='meca', position=( \"-17.5 12.5", "= v.rotateFromQuat(q) cableS = actuators.addChild(childname) cableS.addObject('MechanicalObject', name='meca', position=pullPoint[i] + [pos.toList()", "# Without premultiplication with dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path + '/mesh/trunk.vtk')", "import Quat, Vec3 from sofacontrol import measurement_models path = os.path.dirname(os.path.abspath(__file__))", "# Add a BarycentricMapping to deform rendering model in way", "2. lengthTrunk = 195. pullPoint = [[0., length1, 0.], [-length1,", "totalMass=0.5, poissonRatio=0.45, youngModulus=450, rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01): super(Diamond, self).__init__(name=name, rayleighMass=rayleighMass, rayleighStiffness=rayleighStiffness,", "direction[1] * 17.5 * (k / 2) + length1, direction[2]", "pos=True, vel=True): if nodes is None: return measurement_models.linearModel(range(self.nb_nodes), self.nb_nodes, pos=pos,", "and vice-versa; cable.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false') self.actuator_list.append(cable.cable) self.robot.actuator_list = self.actuator_list", "hasPullPoint=\"0\", indices=list(range(10)), maxPositiveDisp='40', maxDispVariation=\"1\", valueType='force', minForce=self.robot.min_force[i + 4] * self.robot.dt.value)", "i in range(len(self.actuatorsParam)): cable = actuators.addChild(self.actuatorsParam[i]['withName']) cable.addObject('MechanicalObject', position=self.actuatorsParam[i]['withCableGeometry']) cable.addObject('CableConstraint', name='cable',", "the finger's ones so that movements of the cable's DoFs", "12.5 \")) # Create a CableConstraint object with a name.", "# This creates a new node in the scene. This", "adding a rendering model. # Create an empty child node", "0], [20, 20, 20]], drawBoxes=False) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12') ########################################## #", "where the pullPoint is connected. cable.addObject('CableConstraint', name=\"cable\", indices=list(range(14)), pullPoint=\"0.0 12.5", "cable's DoFs will be mapped # to the finger and", "lengthTrunk = 195. pullPoint = [[0., length1, 0.], [-length1, 0.,", "= Vec3(direction[0], direction[1] * 17.5 * (k / 2) +", "= 709 self.gravity = [0., 0., 9810.] self.robot.min_force = [0.]", "in range(0, 9, 2): v = Vec3(direction[0], direction[1] * 17.5", "0, 0] # Without premultiplication with dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path", "None: return measurement_models.linearModel(range(self.nb_nodes), self.nb_nodes, pos=pos, vel=vel) else: return measurement_models.linearModel(nodes, self.nb_nodes,", "if all_cables: for i in range(0, nbCables): childname = 'cableS'", "rayleighMass=str(rayleighMass), rayleighStiffness=str(rayleighStiffness)) self.robot.addObject('SparseLDLSolver', name='preconditioner') self.robot.addObject('GenericConstraintCorrection', solverName=\"preconditioner\") self.actuator_list = [] self.nb_nodes", "20 for k in range(0, 20, 2): v = Vec3(direction[0],", "position=pullPoint[i] + [pos.toList() for pos in position]) cableS.addObject('CableConstraint', template='Vec3d', name=\"cable\",", "* self.robot.dt.value ) cable.addObject('BarycentricMapping', name=\"Mapping\", mapForces=False, mapMasses=False) self.actuator_list.append(cable.cable) self.robot.actuator_list =", "def __init__(self, name='Trunk', all_cables=True): super(Trunk, self).__init__(name=name) self.nb_nodes = 709 self.gravity", "mapMasses='false') actuator_names += childname + '/cable,' self.actuator_list.append(cableL.cable) if all_cables: for", "0., 9810.] self.robot.min_force = [0.] * 8 # Without premultiplication", "0., 0.]] * 10 for k in range(0, 9, 2):", "cos from math import sin import Sofa.Core from splib.numerics import", "0.] # default self.dt = dt def get_measurement_model(self, nodes=None, pos=True,", "v.rotateFromQuat(q) cableS = actuators.addChild(childname) cableS.addObject('MechanicalObject', name='meca', position=pullPoint[i] + [pos.toList() for", "= [[0., 0., 0.]] * 20 for k in range(0,", "1., 1., 0.8]) # Add a BarycentricMapping to deform rendering", "in a region of interest (ROI) self.robot.addObject('BoxROI', name='boxROI', box=[-15, -15,", "/ 2) + 21) position[k] = v.rotateFromQuat(q) v = Vec3(direction[0],", "0., -9810.] rotation = [90, 0.0, 0.0] translation = [0.0,", "+= childname + '/cable,' self.actuator_list.append(cableS.cable) self.robot.actuator_list = self.actuator_list ########################################## #", "v.rotateFromQuat(q) v = Vec3(direction[0], direction[1] * 17.5 * (k /", "0., 0.]] * 20 for k in range(0, 20, 2):", "########################################## # Cable # ########################################## self.actuatorsParam = [ {'withName': 'A',", "\"-83.5 12.5 10.5 \" + \"-77.5 12.5 12.5 \" +", "name='mass') self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', method='large', name='forcefield', poissonRatio=poissonRatio, youngModulus=youngModulus) # Fix the", "material model solved using the Finite # Element Method on", "\"-83.5 12.5 4.5 \" + \"-85.5 12.5 6.5 \" +", "Option 2: Equivalent to option 1 (we believe) # self.robot.addObject('MechanicalObject',", "[-length1, 0., 0.], [0., -length1, 0.], [length1, 0., 0.]] direction", "mechanical modelling. In the case of a cable it is", "childname = 'cableS' + str(i) theta = 1.57 * i", "# Cable # ########################################## # This creates a new node", "trunkVisu.addObject('BarycentricMapping') class Trunk4Cables(Trunk): def __init__(self, name='Trunk4Cables'): super(Trunk4Cables, self).__init__(name=name, all_cables=False) self.robot.min_force", "in position]) cableS.addObject('CableConstraint', template='Vec3d', name=\"cable\", hasPullPoint=\"0\", indices=list(range(10)), maxPositiveDisp='40', maxDispVariation=\"1\", valueType='force',", "name='loader', filename=path + '/mesh/trunk.vtk') self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms')", "= 4 actuators = self.robot.addChild('actuators') for i in range(0, nbCables):", "using the Finite Element Method on tetrahedrons. self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM',", "[0, 0, 0, 0] # Without premultiplication with dt class", "in way that follow the ones of the parent mechanical", "region of interest (ROI) self.robot.addObject('BoxROI', name='boxROI', box=[[-20, -20, 0], [20,", "Sofa.Core from splib.numerics import Quat, Vec3 from sofacontrol import measurement_models", "cable = actuators.addChild('cable') # This create a MechanicalObject, a componant", "= os.path.dirname(os.path.abspath(__file__)) class TemplateEnvironment: def __init__(self, name='Template', rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01):", "adding constraints in a region of interest (ROI) self.robot.addObject('BoxROI', name='boxROI',", "self.robot.addChild('actuators') for i in range(0, nbCables): childname = 'cableL' +", "12.5 2.5 \" + \"-77.5 12.5 2.5 \" + \"-83.5", "filename=path + \"/mesh/finger.stl\") fingerVisu.addObject('OglModel', template='Vec3d', color=[1., 1., 1., 0.8]) #", "a region of interest (ROI) self.robot.addObject('BoxROI', name='boxROI', box=[[-20, -20, 0],", "{'withName': 'B', 'withCableGeometry': [[-97, 0, 45]], 'withAPullPointLocation': [-10, 0, 30]", "Fix the base of the trunk by adding constraints in", "= v.rotateFromQuat(q) cableL = actuators.addChild(childname) cableL.addObject('MechanicalObject', name='meca', position=pullPoint[i] + [pos.toList()", "2.)) position = [[0., 0., 0.]] * 10 for k", "import os from math import cos from math import sin", "showIndices='false', showIndicesScale='4e-5') self.robot.addObject('UniformMass', totalMass=0.075) # Add a TetrahedronFEMForceField componant which", "* 17.5 * (k / 2) + 21) position[k] =", "stl file. fingerVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/finger.stl\") fingerVisu.addObject('OglModel', template='Vec3d', color=[1., 1.,", "name='tetras', template='Vec3d', showIndices='false', showIndicesScale='4e-5') # Option 2: Equivalent to option", "'cableL' + str(i) theta = 1.57 * i q =", "mapped # to the finger and vice-versa; cable.addObject('BarycentricMapping', name='mapping', mapForces='false',", "0.8]) # Add a BarycentricMapping to deform rendering model in", "BarycentricMapping is a key element as it will create a", "constraints in a region of interest (ROI) self.robot.addObject('BoxROI', name='boxROI', box=[-15,", "__init__(self, name='Template', rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01): self.name = name self.robot =", "create a bi-directional link # between the cable's DoFs and", "name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') self.robot.addObject('MechanicalObject', template='Vec3d', name='tetras', showIndices='false', showIndicesScale='4e-5') self.robot.addObject('UniformMass',", "holding the degree of freedom of our # mechanical modelling.", "cable is passing by. cable.addObject('MechanicalObject', name='meca', position=( \"-17.5 12.5 2.5", "[[97, 0, 45]], 'withAPullPointLocation': [10, 0, 30] } ] actuators", "# ########################################## actuator_names = '' length1 = 10. length2 =", "\"-47.5 12.5 12.5 \" + \"-32.5 12.5 12.5 \" +", "position=( \"-17.5 12.5 2.5 \" + \"-32.5 12.5 2.5 \"", "v = Vec3(direction[0], direction[1] * 17.5 * (k / 2)", "2.5 \" + \"-77.5 12.5 2.5 \" + \"-83.5 12.5", "\"/mesh/trunk.stl\") trunkVisu.addObject('OglModel', template='Vec3d', color=[1., 1., 1., 0.8]) trunkVisu.addObject('BarycentricMapping') class Trunk4Cables(Trunk):", "elastic material model solved using the Finite Element Method on", "using the Finite # Element Method on tetrahedrons. self.robot.addObject('TetrahedronFEMForceField', template='Vec3d',", "] actuators = self.robot.addChild('actuators') for i in range(len(self.actuatorsParam)): cable =", "rayleighStiffness=0.1, dt=0.01): super(Diamond, self).__init__(name=name, rayleighMass=rayleighMass, rayleighStiffness=rayleighStiffness, dt=dt) self.nb_nodes = 1628", "'withCableGeometry': [[0, -97, 45]], 'withAPullPointLocation': [0, -10, 30] }, {'withName':", "= 2. lengthTrunk = 195. pullPoint = [[0., length1, 0.],", "30] }, {'withName': 'B', 'withCableGeometry': [[-97, 0, 45]], 'withAPullPointLocation': [-10,", "# Element Method on tetrahedrons. self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large', poissonRatio=0.45,", "12.5 12.5 \")) # Create a CableConstraint object with a", "self.dt = dt def get_measurement_model(self, nodes=None, pos=True, vel=True): if nodes", "MechanicalObject, a componant holding the degree of freedom of our", "}, {'withName': 'D', 'withCableGeometry': [[97, 0, 45]], 'withAPullPointLocation': [10, 0,", "[0.] * 8 # Without premultiplication with dt self.robot.addObject('MeshVTKLoader', name='loader',", "length2 = 2. lengthTrunk = 195. pullPoint = [[0., length1,", "Without premultiplication with dt class Finger(TemplateEnvironment): def __init__(self, name='Finger'): super(Finger,", "+ \"-85.5 12.5 8.5 \" + \"-83.5 12.5 10.5 \"", "self.robot.addObject('MeshVTKLoader', name='loader', filename=path + '/mesh/finger.vtk') self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms')", "+ \"/mesh/diamond.vtu\", rotation=rotation, translation=translation) self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms')", "minForce=self.robot.min_force[i + 4] * self.robot.dt.value) cableS.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false') actuator_names", "node. actuators = self.robot.addChild('actuators') cable = actuators.addChild('cable') # This create", "12.5 \" + \"-47.5 12.5 12.5 \" + \"-32.5 12.5", "youngModulus=450, rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01): super(Diamond, self).__init__(name=name, rayleighMass=rayleighMass, rayleighStiffness=rayleighStiffness, dt=dt) self.nb_nodes", "Visualization # ########################################## diamondVisu = self.robot.addChild('VisualModel') diamondVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/diamond.stl\")", "element as it will create a bi-directional link # between", "nodes is None: return measurement_models.linearModel(range(self.nb_nodes), self.nb_nodes, pos=pos, vel=vel) else: return", "12.5 \" + \"-32.5 12.5 12.5 \" + \"-17.5 12.5", "0, 45]], 'withAPullPointLocation': [-10, 0, 30] }, {'withName': 'C', 'withCableGeometry':", "0.]] * 10 for k in range(0, 9, 2): v", "return measurement_models.linearModel(nodes, self.nb_nodes, pos=pos, vel=vel) class Trunk(TemplateEnvironment): def __init__(self, name='Trunk',", "* 20 for k in range(0, 20, 2): v =", "+ \"-77.5 12.5 2.5 \" + \"-83.5 12.5 4.5 \"", "rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01): self.name = name self.robot = Sofa.Core.Node(name) #", "+ \"-62.5 12.5 12.5 \" + \"-47.5 12.5 12.5 \"", "\"-17.5 12.5 12.5 \")) # Create a CableConstraint object with", "deform rendering model in way that follow the ones of", "Vec3 from sofacontrol import measurement_models path = os.path.dirname(os.path.abspath(__file__)) class TemplateEnvironment:", "= Quat(0., 0., sin(theta / 2.), cos(theta / 2.)) position", "cable.addObject('CableConstraint', name=\"cable\", indices=list(range(14)), pullPoint=\"0.0 12.5 2.5\", valueType='force', minForce=self.robot.min_force[0] * self.robot.dt.value)", "(ROI) self.robot.addObject('BoxROI', name='boxROI', box=[-15, -15, -40, 15, 15, 10], drawBoxes=True)", "it is a set of positions specifying # the points", "+ \"-17.5 12.5 12.5 \")) # Create a CableConstraint object", "template='Vec3d', name='FEM', method='large', poissonRatio=0.45, youngModulus=600) # Fix the base of", "premultiplication with dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path + '/mesh/finger.vtk') self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader',", "case of a cable it is a set of positions", "cableS = actuators.addChild(childname) cableS.addObject('MechanicalObject', name='meca', position=pullPoint[i] + [pos.toList() for pos", "[ {'withName': 'A', 'withCableGeometry': [[0, 97, 45]], 'withAPullPointLocation': [0, 10,", "a key element as it will create a bi-directional link", "'/mesh/finger.vtk') self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d',", "[10, 0, 30] } ] actuators = self.robot.addChild('actuators') for i", "self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') self.robot.addObject('MechanicalObject', template='Vec3d', name='tetras', showIndices='false', showIndicesScale='4e-5') self.robot.addObject('UniformMass', totalMass=totalMass, name='mass')", "+ \"-85.5 12.5 6.5 \" + \"-85.5 12.5 8.5 \"", "Quat(0., 0., sin(theta / 2.), cos(theta / 2.)) position =", "\"-85.5 12.5 8.5 \" + \"-83.5 12.5 10.5 \" +", "position]) cableS.addObject('CableConstraint', template='Vec3d', name=\"cable\", hasPullPoint=\"0\", indices=list(range(10)), maxPositiveDisp='40', maxDispVariation=\"1\", valueType='force', minForce=self.robot.min_force[i", "of interest (ROI) self.robot.addObject('BoxROI', name='boxROI', box=[[-20, -20, 0], [20, 20,", "# Option 1: self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false', showIndicesScale='4e-5') # Option", "30] } ] actuators = self.robot.addChild('actuators') for i in range(len(self.actuatorsParam)):", "nodes=None, pos=True, vel=True): if nodes is None: return measurement_models.linearModel(range(self.nb_nodes), self.nb_nodes,", "hasPullPoint=True, minForce=self.robot.min_force[i] * self.robot.dt.value ) cable.addObject('BarycentricMapping', name=\"Mapping\", mapForces=False, mapMasses=False) self.actuator_list.append(cable.cable)", "+ length1, direction[2] * 17.5 * (k / 2) +", "indices=list(range(len(self.actuatorsParam[i]['withCableGeometry']))), pullPoint=self.actuatorsParam[i]['withAPullPointLocation'], valueType='force', hasPullPoint=True, minForce=self.robot.min_force[i] * self.robot.dt.value ) cable.addObject('BarycentricMapping', name=\"Mapping\",", "BarycentricMapping. A BarycentricMapping is a key element as it will", "tetrahedrons. self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large', poissonRatio=0.45, youngModulus=450) # Fix the", "the scene. This node is appended to the finger's node.", "sin(theta / 2.), cos(theta / 2.)) position = [[0., 0.,", "# Gives a mass to the model self.robot.addObject('UniformMass', totalMass=0.042) #", "and loaded from an stl file. fingerVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/finger.stl\")", "= self.robot.addChild('VisualModel') diamondVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/diamond.stl\") diamondVisu.addObject('OglModel', template='Vec3d', color=[0.7, 0.7,", "this empty node a rendering model made of triangles and", "0.8]) trunkVisu.addObject('BarycentricMapping') class Trunk4Cables(Trunk): def __init__(self, name='Trunk4Cables'): super(Trunk4Cables, self).__init__(name=name, all_cables=False)", "math import sin import Sofa.Core from splib.numerics import Quat, Vec3", "= 1628 self.gravity = [0., 0., -9810.] rotation = [90,", "class Trunk(TemplateEnvironment): def __init__(self, name='Trunk', all_cables=True): super(Trunk, self).__init__(name=name) self.nb_nodes =", "range(0, nbCables): childname = 'cableS' + str(i) theta = 1.57", "########################################## actuator_names = '' length1 = 10. length2 = 2.", "self.robot.min_force = [0, 0, 0, 0] # Without premultiplication with", "class Diamond(TemplateEnvironment): def __init__(self, name='Diamond', totalMass=0.5, poissonRatio=0.45, youngModulus=450, rayleighMass=0.1, rayleighStiffness=0.1,", "+ 1] = v.rotateFromQuat(q) cableL = actuators.addChild(childname) cableL.addObject('MechanicalObject', name='meca', position=pullPoint[i]", "- length1, lengthTrunk) direction.normalize() nbCables = 4 actuators = self.robot.addChild('actuators')", "# In Sofa, visualization is handled by adding a rendering", "def __init__(self, name='Diamond', totalMass=0.5, poissonRatio=0.45, youngModulus=450, rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01): super(Diamond,", "hasPullPoint=\"0\", indices=list(range(21)), maxPositiveDisp='70', maxDispVariation=\"1\", valueType='force', minForce=self.robot.min_force[i] * self.robot.dt.value) cableL.addObject('BarycentricMapping', name='mapping',", "########################################## # Cable # ########################################## # This creates a new", "########################################## diamondVisu = self.robot.addChild('VisualModel') diamondVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/diamond.stl\") diamondVisu.addObject('OglModel', template='Vec3d',", "= self.robot.addChild('VisualModel') # Add to this empty node a rendering", "box=[[-20, -20, 0], [20, 20, 20]], drawBoxes=False) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12')", "position[k] = v.rotateFromQuat(q) v = Vec3(direction[0], direction[1] * 17.5 *", "2) + length1, direction[2] * 17.5 * (k / 2)", "cable.addObject('MechanicalObject', name='meca', position=( \"-17.5 12.5 2.5 \" + \"-32.5 12.5", "return measurement_models.linearModel(range(self.nb_nodes), self.nb_nodes, pos=pos, vel=vel) else: return measurement_models.linearModel(nodes, self.nb_nodes, pos=pos,", "10. length2 = 2. lengthTrunk = 195. pullPoint = [[0.,", "on tetrahedrons. self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large', poissonRatio=0.45, youngModulus=600) # Fix", "\" + \"-32.5 12.5 12.5 \" + \"-17.5 12.5 12.5", "Finite Element Method on tetrahedrons. self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large', poissonRatio=0.45,", "direction[2] * 17.5 * (k / 2) + 21) position[k]", "node to store this rendering model. fingerVisu = self.robot.addChild('VisualModel') #", "cable.addObject('BarycentricMapping', name=\"Mapping\", mapForces=False, mapMasses=False) self.actuator_list.append(cable.cable) self.robot.actuator_list = self.actuator_list ########################################## #", "name='cable', indices=list(range(len(self.actuatorsParam[i]['withCableGeometry']))), pullPoint=self.actuatorsParam[i]['withAPullPointLocation'], valueType='force', hasPullPoint=True, minForce=self.robot.min_force[i] * self.robot.dt.value ) cable.addObject('BarycentricMapping',", "709 self.gravity = [0., 0., 9810.] self.robot.min_force = [0.] *", "+ \"-32.5 12.5 2.5 \" + \"-47.5 12.5 2.5 \"", "all_cables=True): super(Trunk, self).__init__(name=name) self.nb_nodes = 709 self.gravity = [0., 0.,", "1: self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false', showIndicesScale='4e-5') # Option 2: Equivalent", "dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path + '/mesh/finger.vtk') self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier')", "filename=path + '/mesh/finger.vtk') self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') self.robot.addObject('MechanicalObject',", "set of positions specifying # the points where the cable", "to the finger's node. actuators = self.robot.addChild('actuators') cable = actuators.addChild('cable')", "that movements of the cable's DoFs will be mapped #", "self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false', showIndicesScale='4e-5') self.robot.addObject('UniformMass', totalMass=0.075) # Add a", "########################################## # In Sofa, visualization is handled by adding a", "cableS.addObject('MechanicalObject', name='meca', position=pullPoint[i] + [pos.toList() for pos in position]) cableS.addObject('CableConstraint',", "points='@boxROI.indices', stiffness='1e12') ########################################## # Cable # ########################################## # This creates", "implement an elastic material model solved using the Finite #", "is a set of positions specifying # the points where", "# between the cable's DoFs and the finger's ones so", "length1, direction[2] * 17.5 * (k / 2) + 21)", "__init__(self, name='Diamond', totalMass=0.5, poissonRatio=0.45, youngModulus=450, rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01): super(Diamond, self).__init__(name=name,", "self).__init__(name=name, all_cables=False) self.robot.min_force = [0, 0, 0, 0] # Without", "[0.] # Without premultiplication with dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path +", "\" + \"-32.5 12.5 2.5 \" + \"-47.5 12.5 2.5", "a CableConstraint object with a name. # the indices are", "the cable's DoFs and the finger's ones so that movements", "fingerVisu.addObject('OglModel', template='Vec3d', color=[1., 1., 1., 0.8]) # Add a BarycentricMapping", "= [ {'withName': 'A', 'withCableGeometry': [[0, 97, 45]], 'withAPullPointLocation': [0,", "with dt class Finger(TemplateEnvironment): def __init__(self, name='Finger'): super(Finger, self).__init__(name=name) self.nb_nodes", "self.robot.dt.value ) cable.addObject('BarycentricMapping', name=\"Mapping\", mapForces=False, mapMasses=False) self.actuator_list.append(cable.cable) self.robot.actuator_list = self.actuator_list", "Finger(TemplateEnvironment): def __init__(self, name='Finger'): super(Finger, self).__init__(name=name) self.nb_nodes = 158 self.robot.min_force", "the finger's node. actuators = self.robot.addChild('actuators') cable = actuators.addChild('cable') #", "0.0, 35] self.robot.min_force = [0, 0, 0, 0] # Without", "= 10. length2 = 2. lengthTrunk = 195. pullPoint =", "src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') self.robot.addObject('MechanicalObject', template='Vec3d', name='tetras', showIndices='false', showIndicesScale='4e-5')", "name=\"cable\", hasPullPoint=\"0\", indices=list(range(10)), maxPositiveDisp='40', maxDispVariation=\"1\", valueType='force', minForce=self.robot.min_force[i + 4] *", "self.name = name self.robot = Sofa.Core.Node(name) # set-up solvers self.robot.addObject('EulerImplicitSolver',", "self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large', poissonRatio=0.45, youngModulus=450) # Fix the base", "q = Quat(0., 0., sin(theta / 2.), cos(theta / 2.))", "rendering model. fingerVisu = self.robot.addChild('VisualModel') # Add to this empty", "Without premultiplication with dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path + '/mesh/finger.vtk') self.robot.addObject('TetrahedronSetTopologyContainer',", "0] # Without premultiplication with dt class Finger(TemplateEnvironment): def __init__(self,", "'withAPullPointLocation': [0, 10, 30] }, {'withName': 'B', 'withCableGeometry': [[-97, 0,", "self.actuator_list ########################################## # Visualization # ########################################## trunkVisu = self.robot.addChild('VisualModel') trunkVisu.addObject('MeshSTLLoader',", "MechanicalObject's positions. # The last indice is where the pullPoint", "for i in range(0, nbCables): childname = 'cableL' + str(i)", "movements of the cable's DoFs will be mapped # to", "dt=dt) self.nb_nodes = 1628 self.gravity = [0., 0., -9810.] rotation", "translation=translation) self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') self.robot.addObject('MechanicalObject', template='Vec3d', name='tetras',", "2.5 \" + \"-83.5 12.5 4.5 \" + \"-85.5 12.5", "self.robot.addChild('VisualModel') trunkVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/trunk.stl\") trunkVisu.addObject('OglModel', template='Vec3d', color=[1., 1., 1.,", "valueType='force', minForce=self.robot.min_force[i + 4] * self.robot.dt.value) cableS.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false')", "The last indice is where the pullPoint is connected. cable.addObject('CableConstraint',", "pos in position]) cableL.addObject('CableConstraint', template='Vec3d', name=\"cable\", hasPullPoint=\"0\", indices=list(range(21)), maxPositiveDisp='70', maxDispVariation=\"1\",", "with dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path + '/mesh/trunk.vtk') self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container')", "########################################## trunkVisu = self.robot.addChild('VisualModel') trunkVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/trunk.stl\") trunkVisu.addObject('OglModel', template='Vec3d',", "will create a bi-directional link # between the cable's DoFs", "[[0, -97, 45]], 'withAPullPointLocation': [0, -10, 30] }, {'withName': 'D',", "cos(theta / 2.)) position = [[0., 0., 0.]] * 10", "indices=list(range(21)), maxPositiveDisp='70', maxDispVariation=\"1\", valueType='force', minForce=self.robot.min_force[i] * self.robot.dt.value) cableL.addObject('BarycentricMapping', name='mapping', mapForces='false',", "in range(0, nbCables): childname = 'cableL' + str(i) theta =", "filename=path + \"/mesh/trunk.stl\") trunkVisu.addObject('OglModel', template='Vec3d', color=[1., 1., 1., 0.8]) trunkVisu.addObject('BarycentricMapping')", "self).__init__(name=name, rayleighMass=rayleighMass, rayleighStiffness=rayleighStiffness, dt=dt) self.nb_nodes = 1628 self.gravity = [0.,", "self.nb_nodes = 709 self.gravity = [0., 0., 9810.] self.robot.min_force =", "(we believe) # self.robot.addObject('MechanicalObject', src='@loader') # Gives a mass to", "name='forcefield', poissonRatio=poissonRatio, youngModulus=youngModulus) # Fix the base of the trunk", "self.robot.min_force = [0.] * 8 # Without premultiplication with dt", "'/cable,' self.actuator_list.append(cableS.cable) self.robot.actuator_list = self.actuator_list ########################################## # Visualization # ##########################################", "10], drawBoxes=True) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.<EMAIL>', stiffness='1e12') ########################################## # Cable # ##########################################", "}, {'withName': 'B', 'withCableGeometry': [[-97, 0, 45]], 'withAPullPointLocation': [-10, 0,", "\" + \"-85.5 12.5 8.5 \" + \"-83.5 12.5 10.5", "+ '/cable,' self.actuator_list.append(cableS.cable) self.robot.actuator_list = self.actuator_list ########################################## # Visualization #", "to store this rendering model. fingerVisu = self.robot.addChild('VisualModel') # Add", "translation = [0.0, 0.0, 35] self.robot.min_force = [0, 0, 0,", "17.5 * (k / 2) + 27) position[k + 1]", "constraints in a region of interest (ROI) self.robot.addObject('BoxROI', name='boxROI', box=[[-20,", "range(0, 9, 2): v = Vec3(direction[0], direction[1] * 17.5 *", "20, 2): v = Vec3(direction[0], direction[1] * 17.5 * (k", "mapForces='false', mapMasses='false') self.actuator_list.append(cable.cable) self.robot.actuator_list = self.actuator_list ########################################## # Visualization #", "template='Vec3d', showIndices='false', showIndicesScale='4e-5') # Option 2: Equivalent to option 1", "an elastic material model solved using the Finite # Element", "[pos.toList() for pos in position]) cableL.addObject('CableConstraint', template='Vec3d', name=\"cable\", hasPullPoint=\"0\", indices=list(range(21)),", "model in way that follow the ones of the parent", "i in range(0, nbCables): childname = 'cableL' + str(i) theta", "mapMasses='false') actuator_names += childname + '/cable,' self.actuator_list.append(cableS.cable) self.robot.actuator_list = self.actuator_list", "drawBoxes=True) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.<EMAIL>', stiffness='1e12') ########################################## # Cable # ########################################## self.actuatorsParam", "CableConstraint object with a name. # the indices are referring", "1., 0.8]) trunkVisu.addObject('BarycentricMapping') class Trunk4Cables(Trunk): def __init__(self, name='Trunk4Cables'): super(Trunk4Cables, self).__init__(name=name,", "Without premultiplication with dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path + '/mesh/trunk.vtk') self.robot.addObject('TetrahedronSetTopologyContainer',", "appended to the finger's node. actuators = self.robot.addChild('actuators') cable =", "a region of interest (ROI) self.robot.addObject('BoxROI', name='boxROI', box=[-15, -15, -40,", "template='Vec3d', name='FEM', method='large', poissonRatio=0.45, youngModulus=450) # Fix the base of", "from an stl file. fingerVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/finger.stl\") fingerVisu.addObject('OglModel', template='Vec3d',", "model made of triangles and loaded from an stl file.", "# set-up solvers self.robot.addObject('EulerImplicitSolver', name='odesolver', firstOrder=\"0\", rayleighMass=str(rayleighMass), rayleighStiffness=str(rayleighStiffness)) self.robot.addObject('SparseLDLSolver', name='preconditioner')", "= Vec3(0., length2 - length1, lengthTrunk) direction.normalize() nbCables = 4", "cableS.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false') actuator_names += childname + '/cable,' self.actuator_list.append(cableS.cable)", "last indice is where the pullPoint is connected. cable.addObject('CableConstraint', name=\"cable\",", "follow the ones of the parent mechanical model. fingerVisu.addObject('BarycentricMapping') class", "27) position[k + 1] = v.rotateFromQuat(q) cableL = actuators.addChild(childname) cableL.addObject('MechanicalObject',", "rotation=rotation, translation=translation) self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') self.robot.addObject('MechanicalObject', template='Vec3d',", "In the case of a cable it is a set", "self).__init__(name=name) self.nb_nodes = 709 self.gravity = [0., 0., 9810.] self.robot.min_force", "src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false', showIndicesScale='4e-5')", "self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large', poissonRatio=0.45, youngModulus=600) # Fix the base", "+ \"-47.5 12.5 12.5 \" + \"-32.5 12.5 12.5 \"", "v.rotateFromQuat(q) cableL = actuators.addChild(childname) cableL.addObject('MechanicalObject', name='meca', position=pullPoint[i] + [pos.toList() for", "+ 1] = v.rotateFromQuat(q) cableS = actuators.addChild(childname) cableS.addObject('MechanicalObject', name='meca', position=pullPoint[i]", "tetrahedrons. self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large', poissonRatio=0.45, youngModulus=600) # Fix the", "vice-versa; cable.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false') self.actuator_list.append(cable.cable) self.robot.actuator_list = self.actuator_list ##########################################", "poissonRatio=poissonRatio, youngModulus=youngModulus) # Fix the base of the trunk by", "Visualization # ########################################## trunkVisu = self.robot.addChild('VisualModel') trunkVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/trunk.stl\")", "= actuators.addChild('cable') # This create a MechanicalObject, a componant holding", "actuators.addChild(self.actuatorsParam[i]['withName']) cable.addObject('MechanicalObject', position=self.actuatorsParam[i]['withCableGeometry']) cable.addObject('CableConstraint', name='cable', indices=list(range(len(self.actuatorsParam[i]['withCableGeometry']))), pullPoint=self.actuatorsParam[i]['withAPullPointLocation'], valueType='force', hasPullPoint=True, minForce=self.robot.min_force[i]", "os from math import cos from math import sin import", "template='Vec3d', color=[1., 1., 1., 0.8]) # Add a BarycentricMapping to", "4] * self.robot.dt.value) cableS.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false') actuator_names += childname", "measurement_models.linearModel(nodes, self.nb_nodes, pos=pos, vel=vel) class Trunk(TemplateEnvironment): def __init__(self, name='Trunk', all_cables=True):", "model self.robot.addObject('UniformMass', totalMass=0.042) # Add a TetrahedronFEMForceField componant which implement", "childname + '/cable,' self.actuator_list.append(cableS.cable) self.robot.actuator_list = self.actuator_list ########################################## # Visualization", "class Finger(TemplateEnvironment): def __init__(self, name='Finger'): super(Finger, self).__init__(name=name) self.nb_nodes = 158", "self.gravity = [0., -9810., 0.] # default self.dt = dt", "Equivalent to option 1 (we believe) # self.robot.addObject('MechanicalObject', src='@loader') #", "stiffness='1e12') ########################################## # Cable # ########################################## self.actuatorsParam = [ {'withName':", "########################################## # Visualization # ########################################## diamondVisu = self.robot.addChild('VisualModel') diamondVisu.addObject('MeshSTLLoader', filename=path", "self.robot.addObject('BoxROI', name='boxROI', box=[[-20, -20, 0], [20, 20, 20]], drawBoxes=False) self.robot.addObject('RestShapeSpringsForceField',", "\"-47.5 12.5 2.5 \" + \"-62.5 12.5 2.5 \" +", "12.5 10.5 \" + \"-77.5 12.5 12.5 \" + \"-62.5", "0.]] * 20 for k in range(0, 20, 2): v", "of the cable's DoFs will be mapped # to the", "[0., -9810., 0.] # default self.dt = dt def get_measurement_model(self,", "self).__init__(name=name) self.nb_nodes = 158 self.robot.min_force = [0.] # Without premultiplication", "a region of interest (ROI) self.robot.addObject('BoxROI', name='boxROI', box=[[-15, 0, 0],", "freedom of our # mechanical modelling. In the case of", "name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') # Option 1: self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d',", "8 # Without premultiplication with dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path +", "import cos from math import sin import Sofa.Core from splib.numerics", "0.]] direction = Vec3(0., length2 - length1, lengthTrunk) direction.normalize() nbCables", "name='Diamond', totalMass=0.5, poissonRatio=0.45, youngModulus=450, rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01): super(Diamond, self).__init__(name=name, rayleighMass=rayleighMass,", "mass to the model self.robot.addObject('UniformMass', totalMass=0.042) # Add a TetrahedronFEMForceField", "Diamond(TemplateEnvironment): def __init__(self, name='Diamond', totalMass=0.5, poissonRatio=0.45, youngModulus=450, rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01):", "an empty child node to store this rendering model. fingerVisu", "youngModulus=youngModulus) # Fix the base of the trunk by adding", "= self.robot.addChild('actuators') for i in range(len(self.actuatorsParam)): cable = actuators.addChild(self.actuatorsParam[i]['withName']) cable.addObject('MechanicalObject',", "= 'cableS' + str(i) theta = 1.57 * i q", "minForce=self.robot.min_force[0] * self.robot.dt.value) # This create a BarycentricMapping. A BarycentricMapping", "Cable # ########################################## actuator_names = '' length1 = 10. length2", "2): v = Vec3(direction[0], direction[1] * 17.5 * (k /", "self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') # Option 1: self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false',", "math import cos from math import sin import Sofa.Core from", "[] self.nb_nodes = None self.gravity = [0., -9810., 0.] #", "color=[1., 1., 1., 0.8]) trunkVisu.addObject('BarycentricMapping') class Trunk4Cables(Trunk): def __init__(self, name='Trunk4Cables'):", "(ROI) self.robot.addObject('BoxROI', name='boxROI', box=[[-15, 0, 0], [5, 10, 15]], drawBoxes=False)", "length1, direction[2] * 17.5 * (k / 2) + 27)", "os.path.dirname(os.path.abspath(__file__)) class TemplateEnvironment: def __init__(self, name='Template', rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01): self.name", "# Cable # ########################################## self.actuatorsParam = [ {'withName': 'A', 'withCableGeometry':", "\"-32.5 12.5 2.5 \" + \"-47.5 12.5 2.5 \" +", "6.5 \" + \"-85.5 12.5 8.5 \" + \"-83.5 12.5", "is a key element as it will create a bi-directional", "model solved using the Finite # Element Method on tetrahedrons.", "from math import cos from math import sin import Sofa.Core", "of freedom of our # mechanical modelling. In the case", "This creates a new node in the scene. This node", "self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false',", "minForce=self.robot.min_force[i] * self.robot.dt.value ) cable.addObject('BarycentricMapping', name=\"Mapping\", mapForces=False, mapMasses=False) self.actuator_list.append(cable.cable) self.robot.actuator_list", "template='Vec3d', name=\"cable\", hasPullPoint=\"0\", indices=list(range(21)), maxPositiveDisp='70', maxDispVariation=\"1\", valueType='force', minForce=self.robot.min_force[i] * self.robot.dt.value)", "self.robot.actuator_list = self.actuator_list ########################################## # Visualization # ########################################## # In", "material model solved using the Finite Element Method on tetrahedrons.", "file. fingerVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/finger.stl\") fingerVisu.addObject('OglModel', template='Vec3d', color=[1., 1., 1.,", "rendering model made of triangles and loaded from an stl", "self.nb_nodes = 158 self.robot.min_force = [0.] # Without premultiplication with", "name='Template', rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01): self.name = name self.robot = Sofa.Core.Node(name)", "########################################## # Visualization # ########################################## trunkVisu = self.robot.addChild('VisualModel') trunkVisu.addObject('MeshSTLLoader', filename=path", "= [] self.nb_nodes = None self.gravity = [0., -9810., 0.]", "sofacontrol import measurement_models path = os.path.dirname(os.path.abspath(__file__)) class TemplateEnvironment: def __init__(self,", "to the model self.robot.addObject('UniformMass', totalMass=0.042) # Add a TetrahedronFEMForceField componant", "Finite # Element Method on tetrahedrons. self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large',", "[[0., 0., 0.]] * 20 for k in range(0, 20,", "\"-62.5 12.5 2.5 \" + \"-77.5 12.5 2.5 \" +", "{'withName': 'D', 'withCableGeometry': [[97, 0, 45]], 'withAPullPointLocation': [10, 0, 30]", "from sofacontrol import measurement_models path = os.path.dirname(os.path.abspath(__file__)) class TemplateEnvironment: def", "the trunk by adding constraints in a region of interest", "is connected. cable.addObject('CableConstraint', name=\"cable\", indices=list(range(14)), pullPoint=\"0.0 12.5 2.5\", valueType='force', minForce=self.robot.min_force[0]", "/ 2.)) position = [[0., 0., 0.]] * 20 for", "\"/mesh/finger.stl\") fingerVisu.addObject('OglModel', template='Vec3d', color=[1., 1., 1., 0.8]) # Add a", "'C', 'withCableGeometry': [[0, -97, 45]], 'withAPullPointLocation': [0, -10, 30] },", "interest (ROI) self.robot.addObject('BoxROI', name='boxROI', box=[[-15, 0, 0], [5, 10, 15]],", "12.5 4.5 \" + \"-85.5 12.5 6.5 \" + \"-85.5", "name='loader', filename=path + \"/mesh/diamond.vtu\", rotation=rotation, translation=translation) self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier')", "template='Vec3d', method='large', name='forcefield', poissonRatio=poissonRatio, youngModulus=youngModulus) # Fix the base of", "self.robot.addObject('UniformMass', totalMass=0.075) # Add a TetrahedronFEMForceField componant which implement an", "2.5\", valueType='force', minForce=self.robot.min_force[0] * self.robot.dt.value) # This create a BarycentricMapping.", "= self.actuator_list ########################################## # Visualization # ########################################## # In Sofa,", "to this empty node a rendering model made of triangles", "a new node in the scene. This node is appended", "Option 1: self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false', showIndicesScale='4e-5') # Option 2:", "+ 4] * self.robot.dt.value) cableS.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false') actuator_names +=", "position = [[0., 0., 0.]] * 10 for k in", "\"-17.5 12.5 2.5 \" + \"-32.5 12.5 2.5 \" +", "direction[2] * 17.5 * (k / 2) + 27) position[k", "# Option 2: Equivalent to option 1 (we believe) #", "12.5 2.5 \" + \"-32.5 12.5 2.5 \" + \"-47.5", "model. fingerVisu.addObject('BarycentricMapping') class Diamond(TemplateEnvironment): def __init__(self, name='Diamond', totalMass=0.5, poissonRatio=0.45, youngModulus=450,", "# the points where the cable is passing by. cable.addObject('MechanicalObject',", "########################################## # Cable # ########################################## actuator_names = '' length1 =", "showIndicesScale='4e-5') # Option 2: Equivalent to option 1 (we believe)", "for pos in position]) cableS.addObject('CableConstraint', template='Vec3d', name=\"cable\", hasPullPoint=\"0\", indices=list(range(10)), maxPositiveDisp='40',", "showIndices='false', showIndicesScale='4e-5') self.robot.addObject('UniformMass', totalMass=totalMass, name='mass') self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', method='large', name='forcefield', poissonRatio=poissonRatio,", "name='boxROI', box=[[-15, 0, 0], [5, 10, 15]], drawBoxes=False) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices',", "self.robot.addObject('GenericConstraintCorrection', solverName=\"preconditioner\") self.actuator_list = [] self.nb_nodes = None self.gravity =", "# Create a CableConstraint object with a name. # the", "30] }, {'withName': 'C', 'withCableGeometry': [[0, -97, 45]], 'withAPullPointLocation': [0,", "actuators = self.robot.addChild('actuators') for i in range(len(self.actuatorsParam)): cable = actuators.addChild(self.actuatorsParam[i]['withName'])", "the pullPoint is connected. cable.addObject('CableConstraint', name=\"cable\", indices=list(range(14)), pullPoint=\"0.0 12.5 2.5\",", "scene. This node is appended to the finger's node. actuators", "pullPoint is connected. cable.addObject('CableConstraint', name=\"cable\", indices=list(range(14)), pullPoint=\"0.0 12.5 2.5\", valueType='force',", "method='large', name='forcefield', poissonRatio=poissonRatio, youngModulus=youngModulus) # Fix the base of the", "* 17.5 * (k / 2) + length1, direction[2] *", "is None: return measurement_models.linearModel(range(self.nb_nodes), self.nb_nodes, pos=pos, vel=vel) else: return measurement_models.linearModel(nodes,", "vel=vel) else: return measurement_models.linearModel(nodes, self.nb_nodes, pos=pos, vel=vel) class Trunk(TemplateEnvironment): def", "template='Vec3d', showIndices='false', showIndicesScale='4e-5') self.robot.addObject('UniformMass', totalMass=0.075) # Add a TetrahedronFEMForceField componant", "is appended to the finger's node. actuators = self.robot.addChild('actuators') cable", "9810.] self.robot.min_force = [0.] * 8 # Without premultiplication with", "dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path + \"/mesh/diamond.vtu\", rotation=rotation, translation=translation) self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader',", "modelling. In the case of a cable it is a", "15, 15, 10], drawBoxes=True) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.<EMAIL>', stiffness='1e12') ########################################## # Cable", "0.], [length1, 0., 0.]] direction = Vec3(0., length2 - length1,", "mapForces=False, mapMasses=False) self.actuator_list.append(cable.cable) self.robot.actuator_list = self.actuator_list ########################################## # Visualization #", "name='FEM', method='large', poissonRatio=0.45, youngModulus=600) # Fix the base of the", "color=[1., 1., 1., 0.8]) # Add a BarycentricMapping to deform", "45]], 'withAPullPointLocation': [10, 0, 30] } ] actuators = self.robot.addChild('actuators')", "constraints in a region of interest (ROI) self.robot.addObject('BoxROI', name='boxROI', box=[[-15,", "solved using the Finite # Element Method on tetrahedrons. self.robot.addObject('TetrahedronFEMForceField',", "maxDispVariation=\"1\", valueType='force', minForce=self.robot.min_force[i + 4] * self.robot.dt.value) cableS.addObject('BarycentricMapping', name='mapping', mapForces='false',", "made of triangles and loaded from an stl file. fingerVisu.addObject('MeshSTLLoader',", "the ones of the parent mechanical model. fingerVisu.addObject('BarycentricMapping') class Diamond(TemplateEnvironment):", "direction = Vec3(0., length2 - length1, lengthTrunk) direction.normalize() nbCables =", "* (k / 2) + 27) position[k + 1] =", "position=self.actuatorsParam[i]['withCableGeometry']) cable.addObject('CableConstraint', name='cable', indices=list(range(len(self.actuatorsParam[i]['withCableGeometry']))), pullPoint=self.actuatorsParam[i]['withAPullPointLocation'], valueType='force', hasPullPoint=True, minForce=self.robot.min_force[i] * self.robot.dt.value", "childname = 'cableL' + str(i) theta = 1.57 * i", "showIndicesScale='4e-5') self.robot.addObject('UniformMass', totalMass=totalMass, name='mass') self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', method='large', name='forcefield', poissonRatio=poissonRatio, youngModulus=youngModulus)", "is handled by adding a rendering model. # Create an", "k in range(0, 9, 2): v = Vec3(direction[0], direction[1] *", "-9810., 0.] # default self.dt = dt def get_measurement_model(self, nodes=None,", "parent mechanical model. fingerVisu.addObject('BarycentricMapping') class Diamond(TemplateEnvironment): def __init__(self, name='Diamond', totalMass=0.5,", "self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') self.robot.addObject('MechanicalObject', template='Vec3d', name='tetras', showIndices='false',", "create a BarycentricMapping. A BarycentricMapping is a key element as", "# The last indice is where the pullPoint is connected.", "20]], drawBoxes=False) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12') ########################################## # Cable # ##########################################", "actuator_names += childname + '/cable,' self.actuator_list.append(cableL.cable) if all_cables: for i", "* self.robot.dt.value) cableS.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false') actuator_names += childname +", "key element as it will create a bi-directional link #", "= [0.] # Without premultiplication with dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path", "\" + \"-83.5 12.5 4.5 \" + \"-85.5 12.5 6.5", "actuators = self.robot.addChild('actuators') cable = actuators.addChild('cable') # This create a", "the finger and vice-versa; cable.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false') self.actuator_list.append(cable.cable) self.robot.actuator_list", "in range(0, 20, 2): v = Vec3(direction[0], direction[1] * 17.5", "self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') # Option 1: self.robot.addObject('MechanicalObject',", "nbCables): childname = 'cableS' + str(i) theta = 1.57 *", "8.5 \" + \"-83.5 12.5 10.5 \" + \"-77.5 12.5", "Trunk4Cables(Trunk): def __init__(self, name='Trunk4Cables'): super(Trunk4Cables, self).__init__(name=name, all_cables=False) self.robot.min_force = [0,", "12.5 2.5 \" + \"-47.5 12.5 2.5 \" + \"-62.5", "'withAPullPointLocation': [-10, 0, 30] }, {'withName': 'C', 'withCableGeometry': [[0, -97,", "= 158 self.robot.min_force = [0.] # Without premultiplication with dt", "firstOrder=\"0\", rayleighMass=str(rayleighMass), rayleighStiffness=str(rayleighStiffness)) self.robot.addObject('SparseLDLSolver', name='preconditioner') self.robot.addObject('GenericConstraintCorrection', solverName=\"preconditioner\") self.actuator_list = []", "158 self.robot.min_force = [0.] # Without premultiplication with dt self.robot.addObject('MeshVTKLoader',", "in the scene. This node is appended to the finger's", "self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false', showIndicesScale='4e-5') self.robot.addObject('UniformMass', totalMass=0.075) #", "Trunk(TemplateEnvironment): def __init__(self, name='Trunk', all_cables=True): super(Trunk, self).__init__(name=name) self.nb_nodes = 709", "2.), cos(theta / 2.)) position = [[0., 0., 0.]] *", "a MechanicalObject, a componant holding the degree of freedom of", "interest (ROI) self.robot.addObject('BoxROI', name='boxROI', box=[-15, -15, -40, 15, 15, 10],", "self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.<EMAIL>', stiffness='1e12') ########################################## # Cable # ########################################## self.actuatorsParam =", "+ 21) position[k] = v.rotateFromQuat(q) v = Vec3(direction[0], direction[1] *", "mapMasses=False) self.actuator_list.append(cable.cable) self.robot.actuator_list = self.actuator_list ########################################## # Visualization # ##########################################", "pos=pos, vel=vel) class Trunk(TemplateEnvironment): def __init__(self, name='Trunk', all_cables=True): super(Trunk, self).__init__(name=name)", "all_cables: for i in range(0, nbCables): childname = 'cableS' +", "rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01): super(Diamond, self).__init__(name=name, rayleighMass=rayleighMass, rayleighStiffness=rayleighStiffness, dt=dt) self.nb_nodes =", "self.robot.addObject('BoxROI', name='boxROI', box=[-15, -15, -40, 15, 15, 10], drawBoxes=True) self.robot.addObject('RestShapeSpringsForceField',", "+ 27) position[k + 1] = v.rotateFromQuat(q) cableL = actuators.addChild(childname)", "rotation = [90, 0.0, 0.0] translation = [0.0, 0.0, 35]", "* self.robot.dt.value) # This create a BarycentricMapping. A BarycentricMapping is", "store this rendering model. fingerVisu = self.robot.addChild('VisualModel') # Add to", "= [90, 0.0, 0.0] translation = [0.0, 0.0, 35] self.robot.min_force", "range(0, nbCables): childname = 'cableL' + str(i) theta = 1.57", "base of the trunk by adding constraints in a region", "splib.numerics import Quat, Vec3 from sofacontrol import measurement_models path =", "= actuators.addChild(childname) cableL.addObject('MechanicalObject', name='meca', position=pullPoint[i] + [pos.toList() for pos in", "region of interest (ROI) self.robot.addObject('BoxROI', name='boxROI', box=[-15, -15, -40, 15,", "in range(len(self.actuatorsParam)): cable = actuators.addChild(self.actuatorsParam[i]['withName']) cable.addObject('MechanicalObject', position=self.actuatorsParam[i]['withCableGeometry']) cable.addObject('CableConstraint', name='cable', indices=list(range(len(self.actuatorsParam[i]['withCableGeometry']))),", "position=pullPoint[i] + [pos.toList() for pos in position]) cableL.addObject('CableConstraint', template='Vec3d', name=\"cable\",", "empty child node to store this rendering model. fingerVisu =", "self.gravity = [0., 0., -9810.] rotation = [90, 0.0, 0.0]", "\" + \"-47.5 12.5 2.5 \" + \"-62.5 12.5 2.5", "src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') # Option 1: self.robot.addObject('MechanicalObject', name='tetras',", "a mass to the model self.robot.addObject('UniformMass', totalMass=0.042) # Add a", "[0., -length1, 0.], [length1, 0., 0.]] direction = Vec3(0., length2", "0], [5, 10, 15]], drawBoxes=False) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12') ########################################## #", "[[0, 97, 45]], 'withAPullPointLocation': [0, 10, 30] }, {'withName': 'B',", "# Fix the base of the trunk by adding constraints", "1.57 * i q = Quat(0., 0., sin(theta / 2.),", "# Visualization # ########################################## trunkVisu = self.robot.addChild('VisualModel') trunkVisu.addObject('MeshSTLLoader', filename=path +", "is passing by. cable.addObject('MechanicalObject', name='meca', position=( \"-17.5 12.5 2.5 \"", "where the cable is passing by. cable.addObject('MechanicalObject', name='meca', position=( \"-17.5", "= [0., 0., 9810.] self.robot.min_force = [0.] * 8 #", "lengthTrunk) direction.normalize() nbCables = 4 actuators = self.robot.addChild('actuators') for i", "name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false', showIndicesScale='4e-5') self.robot.addObject('UniformMass',", "an stl file. fingerVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/finger.stl\") fingerVisu.addObject('OglModel', template='Vec3d', color=[1.,", "45]], 'withAPullPointLocation': [-10, 0, 30] }, {'withName': 'C', 'withCableGeometry': [[0,", "}, {'withName': 'C', 'withCableGeometry': [[0, -97, 45]], 'withAPullPointLocation': [0, -10,", "self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', method='large', name='forcefield', poissonRatio=poissonRatio, youngModulus=youngModulus) # Fix the base", "Add a BarycentricMapping to deform rendering model in way that", "__init__(self, name='Trunk4Cables'): super(Trunk4Cables, self).__init__(name=name, all_cables=False) self.robot.min_force = [0, 0, 0,", "0., sin(theta / 2.), cos(theta / 2.)) position = [[0.,", "self.robot.addObject('BoxROI', name='boxROI', box=[[-15, 0, 0], [5, 10, 15]], drawBoxes=False) self.robot.addObject('RestShapeSpringsForceField',", "class Trunk4Cables(Trunk): def __init__(self, name='Trunk4Cables'): super(Trunk4Cables, self).__init__(name=name, all_cables=False) self.robot.min_force =", "+ \"-83.5 12.5 4.5 \" + \"-85.5 12.5 6.5 \"", "self.actuator_list ########################################## # Visualization # ########################################## # In Sofa, visualization", "'withAPullPointLocation': [10, 0, 30] } ] actuators = self.robot.addChild('actuators') for", "[0, 0, 0, 0] # Without premultiplication with dt self.robot.addObject('MeshVTKLoader',", "src='@loader') # Gives a mass to the model self.robot.addObject('UniformMass', totalMass=0.042)", "45]], 'withAPullPointLocation': [0, -10, 30] }, {'withName': 'D', 'withCableGeometry': [[97,", "'/mesh/trunk.vtk') self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') # Option 1:", "in range(0, nbCables): childname = 'cableS' + str(i) theta =", "that follow the ones of the parent mechanical model. fingerVisu.addObject('BarycentricMapping')", "for k in range(0, 9, 2): v = Vec3(direction[0], direction[1]", "Vec3(0., length2 - length1, lengthTrunk) direction.normalize() nbCables = 4 actuators", "of the trunk by adding constraints in a region of", "to the finger and vice-versa; cable.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false') self.actuator_list.append(cable.cable)", "self.robot.addChild('actuators') cable = actuators.addChild('cable') # This create a MechanicalObject, a", "sin import Sofa.Core from splib.numerics import Quat, Vec3 from sofacontrol", "* 10 for k in range(0, 9, 2): v =", "the base of the trunk by adding constraints in a", "-15, -40, 15, 15, 10], drawBoxes=True) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.<EMAIL>', stiffness='1e12') ##########################################", "Without premultiplication with dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path + \"/mesh/diamond.vtu\", rotation=rotation,", "[[0., 0., 0.]] * 10 for k in range(0, 9,", "i in range(0, nbCables): childname = 'cableS' + str(i) theta", "self.robot.addChild('VisualModel') # Add to this empty node a rendering model", "object with a name. # the indices are referring to", "premultiplication with dt self.robot.addObject('MeshVTKLoader', name='loader', filename=path + \"/mesh/diamond.vtu\", rotation=rotation, translation=translation)", "-10, 30] }, {'withName': 'D', 'withCableGeometry': [[97, 0, 45]], 'withAPullPointLocation':", "Cable # ########################################## # This creates a new node in", "indice is where the pullPoint is connected. cable.addObject('CableConstraint', name=\"cable\", indices=list(range(14)),", "pos in position]) cableS.addObject('CableConstraint', template='Vec3d', name=\"cable\", hasPullPoint=\"0\", indices=list(range(10)), maxPositiveDisp='40', maxDispVariation=\"1\",", "be mapped # to the finger and vice-versa; cable.addObject('BarycentricMapping', name='mapping',", "self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false', showIndicesScale='4e-5') self.robot.addObject('UniformMass', totalMass=0.075)", "set-up solvers self.robot.addObject('EulerImplicitSolver', name='odesolver', firstOrder=\"0\", rayleighMass=str(rayleighMass), rayleighStiffness=str(rayleighStiffness)) self.robot.addObject('SparseLDLSolver', name='preconditioner') self.robot.addObject('GenericConstraintCorrection',", "pullPoint = [[0., length1, 0.], [-length1, 0., 0.], [0., -length1,", "solvers self.robot.addObject('EulerImplicitSolver', name='odesolver', firstOrder=\"0\", rayleighMass=str(rayleighMass), rayleighStiffness=str(rayleighStiffness)) self.robot.addObject('SparseLDLSolver', name='preconditioner') self.robot.addObject('GenericConstraintCorrection', solverName=\"preconditioner\")", "########################################## # This creates a new node in the scene.", "indices are referring to the MechanicalObject's positions. # The last", "as it will create a bi-directional link # between the", "name self.robot = Sofa.Core.Node(name) # set-up solvers self.robot.addObject('EulerImplicitSolver', name='odesolver', firstOrder=\"0\",", "Gives a mass to the model self.robot.addObject('UniformMass', totalMass=0.042) # Add", "indices=list(range(14)), pullPoint=\"0.0 12.5 2.5\", valueType='force', minForce=self.robot.min_force[0] * self.robot.dt.value) # This", "self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms') self.robot.addObject('TetrahedronSetGeometryAlgorithms') self.robot.addObject('MechanicalObject', template='Vec3d', name='tetras', showIndices='false', showIndicesScale='4e-5') self.robot.addObject('UniformMass', totalMass=totalMass,", "= Sofa.Core.Node(name) # set-up solvers self.robot.addObject('EulerImplicitSolver', name='odesolver', firstOrder=\"0\", rayleighMass=str(rayleighMass), rayleighStiffness=str(rayleighStiffness))", "45]], 'withAPullPointLocation': [0, 10, 30] }, {'withName': 'B', 'withCableGeometry': [[-97,", "1] = v.rotateFromQuat(q) cableS = actuators.addChild(childname) cableS.addObject('MechanicalObject', name='meca', position=pullPoint[i] +", "'D', 'withCableGeometry': [[97, 0, 45]], 'withAPullPointLocation': [10, 0, 30] }", "* (k / 2) + 21) position[k] = v.rotateFromQuat(q) v", "it will create a bi-directional link # between the cable's", "dt=0.01): super(Diamond, self).__init__(name=name, rayleighMass=rayleighMass, rayleighStiffness=rayleighStiffness, dt=dt) self.nb_nodes = 1628 self.gravity", "[[0., length1, 0.], [-length1, 0., 0.], [0., -length1, 0.], [length1,", "trunkVisu = self.robot.addChild('VisualModel') trunkVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/trunk.stl\") trunkVisu.addObject('OglModel', template='Vec3d', color=[1.,", "finger and vice-versa; cable.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false') self.actuator_list.append(cable.cable) self.robot.actuator_list =", "link # between the cable's DoFs and the finger's ones", "'withCableGeometry': [[97, 0, 45]], 'withAPullPointLocation': [10, 0, 30] } ]", "interest (ROI) self.robot.addObject('BoxROI', name='boxROI', box=[[-20, -20, 0], [20, 20, 20]],", "Create a CableConstraint object with a name. # the indices", "0.0, 0.0] translation = [0.0, 0.0, 35] self.robot.min_force = [0,", "__init__(self, name='Trunk', all_cables=True): super(Trunk, self).__init__(name=name) self.nb_nodes = 709 self.gravity =", "positions specifying # the points where the cable is passing", "self.robot.addChild('VisualModel') diamondVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/diamond.stl\") diamondVisu.addObject('OglModel', template='Vec3d', color=[0.7, 0.7, 0.7,", "12.5 \" + \"-17.5 12.5 12.5 \")) # Create a", "believe) # self.robot.addObject('MechanicalObject', src='@loader') # Gives a mass to the", "+= childname + '/cable,' self.actuator_list.append(cableL.cable) if all_cables: for i in", "pullPoint=self.actuatorsParam[i]['withAPullPointLocation'], valueType='force', hasPullPoint=True, minForce=self.robot.min_force[i] * self.robot.dt.value ) cable.addObject('BarycentricMapping', name=\"Mapping\", mapForces=False,", "# Cable # ########################################## actuator_names = '' length1 = 10.", "/ 2.), cos(theta / 2.)) position = [[0., 0., 0.]]", "[0, 10, 30] }, {'withName': 'B', 'withCableGeometry': [[-97, 0, 45]],", "str(i) theta = 1.57 * i q = Quat(0., 0.,", "12.5 2.5\", valueType='force', minForce=self.robot.min_force[0] * self.robot.dt.value) # This create a", "20, 20]], drawBoxes=False) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12') ########################################## # Cable #", "0., 0.], [0., -length1, 0.], [length1, 0., 0.]] direction =", "positions. # The last indice is where the pullPoint is", "12.5 \" + \"-62.5 12.5 12.5 \" + \"-47.5 12.5", "27) position[k + 1] = v.rotateFromQuat(q) cableS = actuators.addChild(childname) cableS.addObject('MechanicalObject',", "0, 0, 0] # Without premultiplication with dt self.robot.addObject('MeshVTKLoader', name='loader',", "4.5 \" + \"-85.5 12.5 6.5 \" + \"-85.5 12.5", "97, 45]], 'withAPullPointLocation': [0, 10, 30] }, {'withName': 'B', 'withCableGeometry':", "0.], [-length1, 0., 0.], [0., -length1, 0.], [length1, 0., 0.]]", "def __init__(self, name='Trunk4Cables'): super(Trunk4Cables, self).__init__(name=name, all_cables=False) self.robot.min_force = [0, 0,", "specifying # the points where the cable is passing by.", "fingerVisu = self.robot.addChild('VisualModel') # Add to this empty node a", "name=\"cable\", indices=list(range(14)), pullPoint=\"0.0 12.5 2.5\", valueType='force', minForce=self.robot.min_force[0] * self.robot.dt.value) #", "name='FEM', method='large', poissonRatio=0.45, youngModulus=450) # Fix the base of the", "Sofa, visualization is handled by adding a rendering model. #", "the model self.robot.addObject('UniformMass', totalMass=0.042) # Add a TetrahedronFEMForceField componant which", "= [0., 0., -9810.] rotation = [90, 0.0, 0.0] translation", "stiffness='1e12') ########################################## # Cable # ########################################## actuator_names = '' length1", "+ [pos.toList() for pos in position]) cableL.addObject('CableConstraint', template='Vec3d', name=\"cable\", hasPullPoint=\"0\",", "all_cables=False) self.robot.min_force = [0, 0, 0, 0] # Without premultiplication", "'B', 'withCableGeometry': [[-97, 0, 45]], 'withAPullPointLocation': [-10, 0, 30] },", "+ '/cable,' self.actuator_list.append(cableL.cable) if all_cables: for i in range(0, nbCables):", "dt=0.01): self.name = name self.robot = Sofa.Core.Node(name) # set-up solvers", "0, 0], [5, 10, 15]], drawBoxes=False) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12') ##########################################", "self.robot.actuator_list = self.actuator_list ########################################## # Visualization # ########################################## diamondVisu =", "of a cable it is a set of positions specifying", "diamondVisu = self.robot.addChild('VisualModel') diamondVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/diamond.stl\") diamondVisu.addObject('OglModel', template='Vec3d', color=[0.7,", "self.gravity = [0., 0., 9810.] self.robot.min_force = [0.] * 8", "youngModulus=450) # Fix the base of the trunk by adding", "componant holding the degree of freedom of our # mechanical", "\" + \"-83.5 12.5 10.5 \" + \"-77.5 12.5 12.5", "from math import sin import Sofa.Core from splib.numerics import Quat,", "2.)) position = [[0., 0., 0.]] * 20 for k", "of interest (ROI) self.robot.addObject('BoxROI', name='boxROI', box=[-15, -15, -40, 15, 15,", "totalMass=0.042) # Add a TetrahedronFEMForceField componant which implement an elastic", "self.robot.addObject('TetrahedronSetGeometryAlgorithms') self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false', showIndicesScale='4e-5') self.robot.addObject('UniformMass', totalMass=0.075) # Add", "filename=path + \"/mesh/diamond.vtu\", rotation=rotation, translation=translation) self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container') self.robot.addObject('TetrahedronSetTopologyModifier') self.robot.addObject('TetrahedronSetTopologyAlgorithms')", "# to the finger and vice-versa; cable.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false')", "name='boxROI', box=[[-20, -20, 0], [20, 20, 20]], drawBoxes=False) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices',", "name='tetras', template='Vec3d', showIndices='false', showIndicesScale='4e-5') self.robot.addObject('UniformMass', totalMass=0.075) # Add a TetrahedronFEMForceField", "= [0.0, 0.0, 35] self.robot.min_force = [0, 0, 0, 0]", "super(Trunk4Cables, self).__init__(name=name, all_cables=False) self.robot.min_force = [0, 0, 0, 0] #", "actuators.addChild('cable') # This create a MechanicalObject, a componant holding the", "/ 2) + 27) position[k + 1] = v.rotateFromQuat(q) cableL", "the cable's DoFs will be mapped # to the finger", "= [0, 0, 0, 0] # Without premultiplication with dt", "and the finger's ones so that movements of the cable's", "position = [[0., 0., 0.]] * 20 for k in", "rayleighStiffness=rayleighStiffness, dt=dt) self.nb_nodes = 1628 self.gravity = [0., 0., -9810.]", "class TemplateEnvironment: def __init__(self, name='Template', rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01): self.name =", "cable it is a set of positions specifying # the", "This create a BarycentricMapping. A BarycentricMapping is a key element", "10, 15]], drawBoxes=False) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12') ########################################## # Cable #", "# self.robot.addObject('MechanicalObject', src='@loader') # Gives a mass to the model", "12.5 8.5 \" + \"-83.5 12.5 10.5 \" + \"-77.5", "\"-62.5 12.5 12.5 \" + \"-47.5 12.5 12.5 \" +", "\" + \"-17.5 12.5 12.5 \")) # Create a CableConstraint", "Visualization # ########################################## # In Sofa, visualization is handled by", "name='mapping', mapForces='false', mapMasses='false') actuator_names += childname + '/cable,' self.actuator_list.append(cableL.cable) if", "box=[[-15, 0, 0], [5, 10, 15]], drawBoxes=False) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12')", "points where the cable is passing by. cable.addObject('MechanicalObject', name='meca', position=(", "} ] actuators = self.robot.addChild('actuators') for i in range(len(self.actuatorsParam)): cable", "+ \"-47.5 12.5 2.5 \" + \"-62.5 12.5 2.5 \"", "TetrahedronFEMForceField componant which implement an elastic material model solved using", "actuators.addChild(childname) cableL.addObject('MechanicalObject', name='meca', position=pullPoint[i] + [pos.toList() for pos in position])", "= self.robot.addChild('actuators') for i in range(0, nbCables): childname = 'cableL'", "theta = 1.57 * i q = Quat(0., 0., sin(theta", "+ \"/mesh/trunk.stl\") trunkVisu.addObject('OglModel', template='Vec3d', color=[1., 1., 1., 0.8]) trunkVisu.addObject('BarycentricMapping') class", "name=\"Mapping\", mapForces=False, mapMasses=False) self.actuator_list.append(cable.cable) self.robot.actuator_list = self.actuator_list ########################################## # Visualization", "of interest (ROI) self.robot.addObject('BoxROI', name='boxROI', box=[[-15, 0, 0], [5, 10,", "15]], drawBoxes=False) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12') ########################################## # Cable # ##########################################", "a TetrahedronFEMForceField componant which implement an elastic material model solved", "-40, 15, 15, 10], drawBoxes=True) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.<EMAIL>', stiffness='1e12') ########################################## #", "loaded from an stl file. fingerVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/finger.stl\") fingerVisu.addObject('OglModel',", "self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false', showIndicesScale='4e-5') # Option 2: Equivalent to", "position]) cableL.addObject('CableConstraint', template='Vec3d', name=\"cable\", hasPullPoint=\"0\", indices=list(range(21)), maxPositiveDisp='70', maxDispVariation=\"1\", valueType='force', minForce=self.robot.min_force[i]", "def __init__(self, name='Finger'): super(Finger, self).__init__(name=name) self.nb_nodes = 158 self.robot.min_force =", "points='@boxROI.<EMAIL>', stiffness='1e12') ########################################## # Cable # ########################################## self.actuatorsParam = [", "nbCables): childname = 'cableL' + str(i) theta = 1.57 *", "= [0., -9810., 0.] # default self.dt = dt def", "node is appended to the finger's node. actuators = self.robot.addChild('actuators')", "{'withName': 'A', 'withCableGeometry': [[0, 97, 45]], 'withAPullPointLocation': [0, 10, 30]", "In Sofa, visualization is handled by adding a rendering model.", "15, 10], drawBoxes=True) self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.<EMAIL>', stiffness='1e12') ########################################## # Cable #", "(ROI) self.robot.addObject('BoxROI', name='boxROI', box=[[-20, -20, 0], [20, 20, 20]], drawBoxes=False)", "self.robot.addObject('TetrahedronSetGeometryAlgorithms') # Option 1: self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false', showIndicesScale='4e-5') #", "self.robot.dt.value) cableS.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false') actuator_names += childname + '/cable,'", "self.actuator_list.append(cableS.cable) self.robot.actuator_list = self.actuator_list ########################################## # Visualization # ########################################## trunkVisu", "12.5 2.5 \" + \"-83.5 12.5 4.5 \" + \"-85.5", "diamondVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/diamond.stl\") diamondVisu.addObject('OglModel', template='Vec3d', color=[0.7, 0.7, 0.7, 0.7],", "+ \"-62.5 12.5 2.5 \" + \"-77.5 12.5 2.5 \"", "a BarycentricMapping to deform rendering model in way that follow", "path = os.path.dirname(os.path.abspath(__file__)) class TemplateEnvironment: def __init__(self, name='Template', rayleighMass=0.1, rayleighStiffness=0.1,", "valueType='force', hasPullPoint=True, minForce=self.robot.min_force[i] * self.robot.dt.value ) cable.addObject('BarycentricMapping', name=\"Mapping\", mapForces=False, mapMasses=False)", "filename=path + \"/mesh/diamond.stl\") diamondVisu.addObject('OglModel', template='Vec3d', color=[0.7, 0.7, 0.7, 0.7], updateNormals=False)", "12.5 2.5 \" + \"-62.5 12.5 2.5 \" + \"-77.5", "[0.0, 0.0, 35] self.robot.min_force = [0, 0, 0, 0] #", "mapForces='false', mapMasses='false') actuator_names += childname + '/cable,' self.actuator_list.append(cableL.cable) if all_cables:", "= self.robot.addChild('VisualModel') trunkVisu.addObject('MeshSTLLoader', filename=path + \"/mesh/trunk.stl\") trunkVisu.addObject('OglModel', template='Vec3d', color=[1., 1.,", "between the cable's DoFs and the finger's ones so that" ]
[ "configurations of the project \"\"\" RSC_YEARS = [1660, 1670, 1680,", "stay within the range of e-4 (originally: 1e-4) 'opt_tol': 1e-9,", "a test dict, but that's not implemented! 'opt_gpu': False, #", "e-4 (originally: 1e-4) 'opt_tol': 1e-9, # no limits 'opt_round_g': False,", "but that's not implemented! 'opt_gpu': False, # GPU optimization not", "1780, 1790, 1800, 1810, 1820, 1830, 1840, 1850, 1860, 1870,", "300, # no limits; normally converges within 150 iterations 'fit_tol':", "a large value 'fit_print_every': 1, # no limits 'fit_verbose': True,", "1830, 1840, 1850, 1860, 1870, 1880, 1890, 1900, 1910, 1920]", "(1770,1800), (1800,1830), (1830,1860), (1860,1890), (1700,1800), (1800,1900), (1700,1900)] COUPLING_CONFIG = {", "object 'metric': \"cosine\", # 'euclidian', 'normalize_vecs': \"both\", # 'mean', 'whiten',", "'median' 'score_type': \"coupling\", # #TODO fill in the rest of", "not tested # parameters for calling fit() 'fit_maxiter': 300, #", "1690, 1700, 1710, 1720, 1730, 1740, 1750, 1760, 1770, 1780,", "Alternatives # parameters passed to the GWOT object 'metric': \"cosine\",", "# 'custom', 'zipf' 'share_vocs':False, # True 'size':1000, # 100 is", "fit() 'fit_maxiter': 300, # no limits; normally converges within 150", "small couplings (for projection) # parameters to be passed to", "# parameters passed to the GWOT object 'metric': \"cosine\", #", "(1700,1800), (1800,1900), (1700,1900)] COUPLING_CONFIG = { # Alternatives # parameters", "'size':1000, # 100 is small, 1e4 'max_anchors':100, # used with", "large value 'fit_print_every': 1, # no limits 'fit_verbose': True, #", "1800, 1810, 1820, 1830, 1840, 1850, 1860, 1870, 1880, 1890,", "information and default configurations of the project \"\"\" RSC_YEARS =", "'opt_tol': 1e-9, # no limits 'opt_round_g': False, # True 'opt_compute_accuracy':", "(1700,1900)] COUPLING_CONFIG = { # Alternatives # parameters passed to", "1e-9, # no limits 'fit_plot_every': 100000, # normally 20; 'deactivate'", "the project \"\"\" RSC_YEARS = [1660, 1670, 1680, 1690, 1700,", "optimizer 'opt_loss_fun': \"square_loss\", # 'kl_loss' 'opt_entropic': True, # False 'opt_entreg':", "= [(1740,1750), (1750,1760), (1680,1710), (1710,1740), (1740,1770), (1770,1800), (1800,1830), (1830,1860), (1860,1890),", "1790, 1800, 1810, 1820, 1830, 1840, 1850, 1860, 1870, 1880,", "the GWOT object 'metric': \"cosine\", # 'euclidian', 'normalize_vecs': \"both\", #", "#TODO fill in the rest of the options in the", "'euclidian', 'normalize_vecs': \"both\", # 'mean', 'whiten', 'whiten_zca' 'normalize_dists': \"mean\", #", "\"square_loss\", # 'kl_loss' 'opt_entropic': True, # False 'opt_entreg': 5e-4, #", "that's not implemented! 'opt_gpu': False, # GPU optimization not tested", "'deactivate' the file spam by choosing a large value 'fit_print_every':", "# no limits 'opt_round_g': False, # True 'opt_compute_accuracy': False, #", "parameters passed to the GWOT object 'metric': \"cosine\", # 'euclidian',", "parameters for calling fit() 'fit_maxiter': 300, # no limits; normally", "\"mean\", # 'max', 'median' 'score_type': \"coupling\", # #TODO fill in", "GPU optimization not tested # parameters for calling fit() 'fit_maxiter':", "'opt_gpu': False, # GPU optimization not tested # parameters for", "# GPU optimization not tested # parameters for calling fit()", "(1800,1900), (1700,1900)] COUPLING_CONFIG = { # Alternatives # parameters passed", "1920] # cf. Chapter 4.4.1 of the thesis SPACE_PAIR_SELECTION =", "'kl_loss' 'opt_entropic': True, # False 'opt_entreg': 5e-4, # stay within", "no limits 'fit_plot_every': 100000, # normally 20; 'deactivate' the file", "limits 'fit_verbose': True, # False 'fit_save_plots': None # \"/my_dir/my_optimizer_plots\" }", "no limits; normally converges within 150 iterations 'fit_tol': 1e-9, #", "converges within 150 iterations 'fit_tol': 1e-9, # no limits 'fit_plot_every':", "iterations 'fit_tol': 1e-9, # no limits 'fit_plot_every': 100000, # normally", "fill in the rest of the options in the comments", "in the comments 'adjust': None, # 'csls', ... 'distribs': \"uniform\",", "1810, 1820, 1830, 1840, 1850, 1860, 1870, 1880, 1890, 1900,", "1e-9, # no limits 'opt_round_g': False, # True 'opt_compute_accuracy': False,", "not implemented! 'opt_gpu': False, # GPU optimization not tested #", "file contains meta information and default configurations of the project", "1880, 1890, 1900, 1910, 1920] # cf. Chapter 4.4.1 of", "# True 'opt_compute_accuracy': False, # True would require a test", "dict, but that's not implemented! 'opt_gpu': False, # GPU optimization", "no limits 'fit_verbose': True, # False 'fit_save_plots': None # \"/my_dir/my_optimizer_plots\"", "\"coupling\", # #TODO fill in the rest of the options", "project \"\"\" RSC_YEARS = [1660, 1670, 1680, 1690, 1700, 1710,", "the options in the comments 'adjust': None, # 'csls', ...", "# 'max', 'median' 'score_type': \"coupling\", # #TODO fill in the", "cf. Chapter 4.4.1 of the thesis SPACE_PAIR_SELECTION = [(1740,1750), (1750,1760),", "by choosing a large value 'fit_print_every': 1, # no limits", "1820, 1830, 1840, 1850, 1860, 1870, 1880, 1890, 1900, 1910,", "# parameters to be passed to the optimizer 'opt_loss_fun': \"square_loss\",", "[1660, 1670, 1680, 1690, 1700, 1710, 1720, 1730, 1740, 1750,", "\"both\", # 'mean', 'whiten', 'whiten_zca' 'normalize_dists': \"mean\", # 'max', 'median'", "passed to the GWOT object 'metric': \"cosine\", # 'euclidian', 'normalize_vecs':", "1670, 1680, 1690, 1700, 1710, 1720, 1730, 1740, 1750, 1760,", "# 'csls', ... 'distribs': \"uniform\", # 'custom', 'zipf' 'share_vocs':False, #", "'distribs': \"uniform\", # 'custom', 'zipf' 'share_vocs':False, # True 'size':1000, #", "'fit_plot_every': 100000, # normally 20; 'deactivate' the file spam by", "'normalize_dists': \"mean\", # 'max', 'median' 'score_type': \"coupling\", # #TODO fill", "'opt_round_g': False, # True 'opt_compute_accuracy': False, # True would require", "options in the comments 'adjust': None, # 'csls', ... 'distribs':", "1860, 1870, 1880, 1890, 1900, 1910, 1920] # cf. Chapter", "'fit_print_every': 1, # no limits 'fit_verbose': True, # False 'fit_save_plots':", "the rest of the options in the comments 'adjust': None,", "to the GWOT object 'metric': \"cosine\", # 'euclidian', 'normalize_vecs': \"both\",", "range of e-4 (originally: 1e-4) 'opt_tol': 1e-9, # no limits", "to the optimizer 'opt_loss_fun': \"square_loss\", # 'kl_loss' 'opt_entropic': True, #", "(1800,1830), (1830,1860), (1860,1890), (1700,1800), (1800,1900), (1700,1900)] COUPLING_CONFIG = { #", "True 'size':1000, # 100 is small, 1e4 'max_anchors':100, # used", "150 iterations 'fit_tol': 1e-9, # no limits 'fit_plot_every': 100000, #", "'fit_save_plots': None # \"/my_dir/my_optimizer_plots\" } DIST_SHAPES = ['uniform', 'zipf', 'custom']", "True, # False 'opt_entreg': 5e-4, # stay within the range", "the comments 'adjust': None, # 'csls', ... 'distribs': \"uniform\", #", "limits 'fit_plot_every': 100000, # normally 20; 'deactivate' the file spam", "limits; normally converges within 150 iterations 'fit_tol': 1e-9, # no", "(1750,1760), (1680,1710), (1710,1740), (1740,1770), (1770,1800), (1800,1830), (1830,1860), (1860,1890), (1700,1800), (1800,1900),", "default configurations of the project \"\"\" RSC_YEARS = [1660, 1670,", "False, # GPU optimization not tested # parameters for calling", "= ['uniform', 'zipf', 'custom'] SHIFT_EXPERIMENTS = [\"all\", \"unsup_bi\", \"unsup_mono\", \"dis_tech\"]", "# normally 20; 'deactivate' the file spam by choosing a", "calling fit() 'fit_maxiter': 300, # no limits; normally converges within", "'max_anchors':100, # used with small couplings (for projection) # parameters", "20; 'deactivate' the file spam by choosing a large value", "None # \"/my_dir/my_optimizer_plots\" } DIST_SHAPES = ['uniform', 'zipf', 'custom'] SHIFT_EXPERIMENTS", "test dict, but that's not implemented! 'opt_gpu': False, # GPU", "'adjust': None, # 'csls', ... 'distribs': \"uniform\", # 'custom', 'zipf'", "{ # Alternatives # parameters passed to the GWOT object", "1, # no limits 'fit_verbose': True, # False 'fit_save_plots': None", "5e-4, # stay within the range of e-4 (originally: 1e-4)", "100000, # normally 20; 'deactivate' the file spam by choosing", "of the options in the comments 'adjust': None, # 'csls',", "1680, 1690, 1700, 1710, 1720, 1730, 1740, 1750, 1760, 1770,", "implemented! 'opt_gpu': False, # GPU optimization not tested # parameters", "# cf. Chapter 4.4.1 of the thesis SPACE_PAIR_SELECTION = [(1740,1750),", "False, # True 'opt_compute_accuracy': False, # True would require a", "\"\"\" RSC_YEARS = [1660, 1670, 1680, 1690, 1700, 1710, 1720,", "normally converges within 150 iterations 'fit_tol': 1e-9, # no limits", "'opt_entropic': True, # False 'opt_entreg': 5e-4, # stay within the", "True 'opt_compute_accuracy': False, # True would require a test dict,", "thesis SPACE_PAIR_SELECTION = [(1740,1750), (1750,1760), (1680,1710), (1710,1740), (1740,1770), (1770,1800), (1800,1830),", "'whiten', 'whiten_zca' 'normalize_dists': \"mean\", # 'max', 'median' 'score_type': \"coupling\", #", "# stay within the range of e-4 (originally: 1e-4) 'opt_tol':", "1e4 'max_anchors':100, # used with small couplings (for projection) #", "optimization not tested # parameters for calling fit() 'fit_maxiter': 300,", "# #TODO fill in the rest of the options in", "# parameters for calling fit() 'fit_maxiter': 300, # no limits;", "projection) # parameters to be passed to the optimizer 'opt_loss_fun':", "\"uniform\", # 'custom', 'zipf' 'share_vocs':False, # True 'size':1000, # 100", "'opt_entreg': 5e-4, # stay within the range of e-4 (originally:", "'share_vocs':False, # True 'size':1000, # 100 is small, 1e4 'max_anchors':100,", "\"/my_dir/my_optimizer_plots\" } DIST_SHAPES = ['uniform', 'zipf', 'custom'] SHIFT_EXPERIMENTS = [\"all\",", "= { # Alternatives # parameters passed to the GWOT", "1730, 1740, 1750, 1760, 1770, 1780, 1790, 1800, 1810, 1820,", "in the rest of the options in the comments 'adjust':", "the range of e-4 (originally: 1e-4) 'opt_tol': 1e-9, # no", "'metric': \"cosine\", # 'euclidian', 'normalize_vecs': \"both\", # 'mean', 'whiten', 'whiten_zca'", "'opt_loss_fun': \"square_loss\", # 'kl_loss' 'opt_entropic': True, # False 'opt_entreg': 5e-4,", "(1860,1890), (1700,1800), (1800,1900), (1700,1900)] COUPLING_CONFIG = { # Alternatives #", "True, # False 'fit_save_plots': None # \"/my_dir/my_optimizer_plots\" } DIST_SHAPES =", "1710, 1720, 1730, 1740, 1750, 1760, 1770, 1780, 1790, 1800,", "False, # True would require a test dict, but that's", "meta information and default configurations of the project \"\"\" RSC_YEARS", "# no limits; normally converges within 150 iterations 'fit_tol': 1e-9,", "would require a test dict, but that's not implemented! 'opt_gpu':", "True would require a test dict, but that's not implemented!", "(1680,1710), (1710,1740), (1740,1770), (1770,1800), (1800,1830), (1830,1860), (1860,1890), (1700,1800), (1800,1900), (1700,1900)]", "False 'opt_entreg': 5e-4, # stay within the range of e-4", "small, 1e4 'max_anchors':100, # used with small couplings (for projection)", "RSC_YEARS = [1660, 1670, 1680, 1690, 1700, 1710, 1720, 1730,", "be passed to the optimizer 'opt_loss_fun': \"square_loss\", # 'kl_loss' 'opt_entropic':", "tested # parameters for calling fit() 'fit_maxiter': 300, # no", "# 'kl_loss' 'opt_entropic': True, # False 'opt_entreg': 5e-4, # stay", "value 'fit_print_every': 1, # no limits 'fit_verbose': True, # False", "\"\"\" This file contains meta information and default configurations of", "passed to the optimizer 'opt_loss_fun': \"square_loss\", # 'kl_loss' 'opt_entropic': True,", "\"cosine\", # 'euclidian', 'normalize_vecs': \"both\", # 'mean', 'whiten', 'whiten_zca' 'normalize_dists':", "1760, 1770, 1780, 1790, 1800, 1810, 1820, 1830, 1840, 1850,", "(1740,1770), (1770,1800), (1800,1830), (1830,1860), (1860,1890), (1700,1800), (1800,1900), (1700,1900)] COUPLING_CONFIG =", "} DIST_SHAPES = ['uniform', 'zipf', 'custom'] SHIFT_EXPERIMENTS = [\"all\", \"unsup_bi\",", "(1830,1860), (1860,1890), (1700,1800), (1800,1900), (1700,1900)] COUPLING_CONFIG = { # Alternatives", "This file contains meta information and default configurations of the", "1720, 1730, 1740, 1750, 1760, 1770, 1780, 1790, 1800, 1810,", "parameters to be passed to the optimizer 'opt_loss_fun': \"square_loss\", #", "couplings (for projection) # parameters to be passed to the", "Chapter 4.4.1 of the thesis SPACE_PAIR_SELECTION = [(1740,1750), (1750,1760), (1680,1710),", "'custom', 'zipf' 'share_vocs':False, # True 'size':1000, # 100 is small,", "False 'fit_save_plots': None # \"/my_dir/my_optimizer_plots\" } DIST_SHAPES = ['uniform', 'zipf',", "is small, 1e4 'max_anchors':100, # used with small couplings (for", "'max', 'median' 'score_type': \"coupling\", # #TODO fill in the rest", "of the thesis SPACE_PAIR_SELECTION = [(1740,1750), (1750,1760), (1680,1710), (1710,1740), (1740,1770),", "the thesis SPACE_PAIR_SELECTION = [(1740,1750), (1750,1760), (1680,1710), (1710,1740), (1740,1770), (1770,1800),", "(originally: 1e-4) 'opt_tol': 1e-9, # no limits 'opt_round_g': False, #", "'fit_tol': 1e-9, # no limits 'fit_plot_every': 100000, # normally 20;", "to be passed to the optimizer 'opt_loss_fun': \"square_loss\", # 'kl_loss'", "# 'euclidian', 'normalize_vecs': \"both\", # 'mean', 'whiten', 'whiten_zca' 'normalize_dists': \"mean\",", "and default configurations of the project \"\"\" RSC_YEARS = [1660,", "None, # 'csls', ... 'distribs': \"uniform\", # 'custom', 'zipf' 'share_vocs':False,", "with small couplings (for projection) # parameters to be passed", "no limits 'opt_round_g': False, # True 'opt_compute_accuracy': False, # True", "= [1660, 1670, 1680, 1690, 1700, 1710, 1720, 1730, 1740,", "1890, 1900, 1910, 1920] # cf. Chapter 4.4.1 of the", "'zipf' 'share_vocs':False, # True 'size':1000, # 100 is small, 1e4", "[(1740,1750), (1750,1760), (1680,1710), (1710,1740), (1740,1770), (1770,1800), (1800,1830), (1830,1860), (1860,1890), (1700,1800),", "(for projection) # parameters to be passed to the optimizer", "'score_type': \"coupling\", # #TODO fill in the rest of the", "choosing a large value 'fit_print_every': 1, # no limits 'fit_verbose':", "# no limits 'fit_plot_every': 100000, # normally 20; 'deactivate' the", "SPACE_PAIR_SELECTION = [(1740,1750), (1750,1760), (1680,1710), (1710,1740), (1740,1770), (1770,1800), (1800,1830), (1830,1860),", "'normalize_vecs': \"both\", # 'mean', 'whiten', 'whiten_zca' 'normalize_dists': \"mean\", # 'max',", "# 'mean', 'whiten', 'whiten_zca' 'normalize_dists': \"mean\", # 'max', 'median' 'score_type':", "limits 'opt_round_g': False, # True 'opt_compute_accuracy': False, # True would", "4.4.1 of the thesis SPACE_PAIR_SELECTION = [(1740,1750), (1750,1760), (1680,1710), (1710,1740),", "the file spam by choosing a large value 'fit_print_every': 1,", "# False 'fit_save_plots': None # \"/my_dir/my_optimizer_plots\" } DIST_SHAPES = ['uniform',", "... 'distribs': \"uniform\", # 'custom', 'zipf' 'share_vocs':False, # True 'size':1000,", "(1710,1740), (1740,1770), (1770,1800), (1800,1830), (1830,1860), (1860,1890), (1700,1800), (1800,1900), (1700,1900)] COUPLING_CONFIG", "rest of the options in the comments 'adjust': None, #", "1770, 1780, 1790, 1800, 1810, 1820, 1830, 1840, 1850, 1860,", "# True would require a test dict, but that's not", "'mean', 'whiten', 'whiten_zca' 'normalize_dists': \"mean\", # 'max', 'median' 'score_type': \"coupling\",", "of the project \"\"\" RSC_YEARS = [1660, 1670, 1680, 1690,", "used with small couplings (for projection) # parameters to be", "1910, 1920] # cf. Chapter 4.4.1 of the thesis SPACE_PAIR_SELECTION", "100 is small, 1e4 'max_anchors':100, # used with small couplings", "# False 'opt_entreg': 5e-4, # stay within the range of", "require a test dict, but that's not implemented! 'opt_gpu': False,", "# no limits 'fit_verbose': True, # False 'fit_save_plots': None #", "# 100 is small, 1e4 'max_anchors':100, # used with small", "1840, 1850, 1860, 1870, 1880, 1890, 1900, 1910, 1920] #", "comments 'adjust': None, # 'csls', ... 'distribs': \"uniform\", # 'custom',", "'whiten_zca' 'normalize_dists': \"mean\", # 'max', 'median' 'score_type': \"coupling\", # #TODO", "contains meta information and default configurations of the project \"\"\"", "'csls', ... 'distribs': \"uniform\", # 'custom', 'zipf' 'share_vocs':False, # True", "within 150 iterations 'fit_tol': 1e-9, # no limits 'fit_plot_every': 100000,", "1750, 1760, 1770, 1780, 1790, 1800, 1810, 1820, 1830, 1840,", "1e-4) 'opt_tol': 1e-9, # no limits 'opt_round_g': False, # True", "1850, 1860, 1870, 1880, 1890, 1900, 1910, 1920] # cf.", "normally 20; 'deactivate' the file spam by choosing a large", "1740, 1750, 1760, 1770, 1780, 1790, 1800, 1810, 1820, 1830,", "# Alternatives # parameters passed to the GWOT object 'metric':", "spam by choosing a large value 'fit_print_every': 1, # no", "the optimizer 'opt_loss_fun': \"square_loss\", # 'kl_loss' 'opt_entropic': True, # False", "# True 'size':1000, # 100 is small, 1e4 'max_anchors':100, #", "of e-4 (originally: 1e-4) 'opt_tol': 1e-9, # no limits 'opt_round_g':", "for calling fit() 'fit_maxiter': 300, # no limits; normally converges", "DIST_SHAPES = ['uniform', 'zipf', 'custom'] SHIFT_EXPERIMENTS = [\"all\", \"unsup_bi\", \"unsup_mono\",", "COUPLING_CONFIG = { # Alternatives # parameters passed to the", "# used with small couplings (for projection) # parameters to", "GWOT object 'metric': \"cosine\", # 'euclidian', 'normalize_vecs': \"both\", # 'mean',", "1700, 1710, 1720, 1730, 1740, 1750, 1760, 1770, 1780, 1790,", "'opt_compute_accuracy': False, # True would require a test dict, but", "file spam by choosing a large value 'fit_print_every': 1, #", "within the range of e-4 (originally: 1e-4) 'opt_tol': 1e-9, #", "1900, 1910, 1920] # cf. Chapter 4.4.1 of the thesis", "'fit_maxiter': 300, # no limits; normally converges within 150 iterations", "# \"/my_dir/my_optimizer_plots\" } DIST_SHAPES = ['uniform', 'zipf', 'custom'] SHIFT_EXPERIMENTS =", "'fit_verbose': True, # False 'fit_save_plots': None # \"/my_dir/my_optimizer_plots\" } DIST_SHAPES", "1870, 1880, 1890, 1900, 1910, 1920] # cf. Chapter 4.4.1" ]
[ "Length.\",) parser.add_argument(\"--y_start\", type=int, default=1, help=\"Y pred start\", ) parser.add_argument(\"--dow\", action='store_true',)", "test = sel_partition_data(dataset, test_start_date, test_end_date) return train, val, test def", "n_seg, len_seq, 2] \"\"\" ds_pre = xr.open_zarr(pretrain_file) x_data = ds_pre[x_vars]", "x_data, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, ) x_scl, x_std,", "= np.arange(start=start, stop=data_array.shape[1] + 1, step=seq_len) split = np.split(data_array, indices_or_sections=idx,", "dataset: [xr dataset] input or output data :param std: [xr", "sel_partition_data(dataset, test_start_date, test_end_date) return train, val, test def split_into_batches(data_array, seq_len=365,", "absolute_import from __future__ import division from __future__ import print_function from", "periods) :param val_start_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to", "Do you want to overwrite it? (y/n)')).lower().strip() if reply[0] !=", "os.path import pandas as pd import numpy as np import", "\"YYYY-MM-DD\"; date(s) to end train period (can have multiple discontinuos", "to be batched :param seq_len: [int] length of sequences (i.e.,", "list, or tuple\") def separate_trn_tst( dataset, train_start_date, train_end_date, val_start_date, val_end_date,", "1 / (1 + np.exp(-adj[adj != 0])) I = np.eye(adj.shape[0])", "y=True, period=period), 'y_pre_train_test': convert_batch_reshape(y_pre_tst, offset=offset, seq_len=seq_length, y=True, period=period), \"y_std\": y_std.to_array().values,", "start = exclude_grp.get(\"start_date\") if start: start = datetime.datetime.strptime(start, \"%Y-%m-%d\") end", "dates and national seg ids for testing data [n_yrs, n_seg,", "training and testing data are scaled to have a std", "if not isinstance(std, xr.Dataset) or not isinstance(mean, xr.Dataset): std =", "if isinstance(start_dates, str): if isinstance(end_dates, str): return dataset.sel(date=slice(start_dates, end_dates)) else:", "dataset, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, ): \"\"\" separate", ":param log_q: [bool] whether or not to take the log", "def coord_as_reshaped_array(dataset, coord_name, seq_len=365, offset=1): # I need one variable", "scaled = (dataset - mean) / (std + 1e-10) check_if_finite(std)", "test period (can have multiple discontinuos periods) \"\"\" train =", "\"x_mean\": x_mean.to_array().values, \"x_cols\": np.array(x_vars), \"ids_train\": coord_as_reshaped_array(x_trn, \"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_train\":", "train_end_date) val = sel_partition_data(dataset, val_start_date, val_end_date) test = sel_partition_data(dataset, test_start_date,", "read in and process data into training and testing datasets.", "y = False, period = np.nan): \"\"\" convert xarray dataset", "= sel_partition_data(dataset, test_start_date, test_end_date) return train, val, test def split_into_batches(data_array,", "\"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"--output_dir\", type=str, default=\"data/METR-LA\", help=\"Output directory.\") parser.add_argument(\"--traffic_df_filename\",", "= np.diag(D_inv) A_hat = np.matmul(D_inv, A_hat) if out_file: out_dm =", "matrix \"\"\" adj_matrices = np.load(infile) adj = adj_matrices[dist_type] adj_full =", "offset=offset, seq_len=seq_length, y=True, period=period), 'y_pre_train_test': convert_batch_reshape(y_pre_tst, offset=offset, seq_len=seq_length, y=True, period=period),", "period (can have multiple discontinuos periods) :return: dataset of just", "= [adj_full[0], adj_full[1], A_hat] with open(out_file+'.pkl', 'wb') as f: pickle.dump(out_dm,", "in enumerate(df.columns): sensor_id_to_ind[sensor_id] = i return row_col_names, sensor_id_to_ind, df #check", "a mean of zero :param dataset: [xr dataset] input or", "if scaling test data with dims :param mean: [xr dataset]", "[] for i in range(len(start_dates)): date_slice = slice(start_dates[i], end_dates[i]) data_list.append(dataset.sel(date=date_slice))", "= D ** -1.0 D_inv = np.diag(D_inv) A_hat = np.matmul(D_inv,", "along with the means and standard deviations of the training", "xr.Dataset) or not isinstance(mean, xr.Dataset): std = dataset.std(skipna=True) mean =", "second will be 182-547) :return: [numpy array] batched data with", "train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, ): \"\"\" separate the", "seq_len=seq_length), \"ids_val\": coord_as_reshaped_array(x_val, \"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_val\": coord_as_reshaped_array(x_val, \"date\", offset=offset,", "end date \"\"\" start = exclude_grp.get(\"start_date\") if start: start =", "for s in split if s.shape[1] == seq_len]) combined =", "del ds[\"site_id\"] obs = xr.merge(obs, join=\"left\") obs = obs[[\"temp_c\", \"discharge_cms\"]]", "are in the testing. :param dataset: [xr dataset] input or", "= xr.merge(obs, join=\"left\") obs = obs[[\"temp_c\", \"discharge_cms\"]] obs = obs.rename(", "val_end_date, test_start_date, test_end_date, x_vars=None, y_vars= [\"seg_tave_water\", \"seg_outflow\"], seq_length = 365,", "output observation data for the training period 'y_trn_obs_std': standard deviation", "the distance matrix by seg_id_nat :return: \"\"\" df = pd.DataFrame(mat,", "values will be written :returns: training and testing data along", "training data [n_out] 'y_trn_obs_mean': mean of the observation training data", "obs_flow_file, pretrain_file, #distfile, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, x_vars=None,", "\"seg_outflow\"} ) return obs def reshape_for_training(data): \"\"\" reshape the data", "or not to take the log of discharge in training", "seg_id_nat ** :param infile: :param dist_type: [str] type of distance", "zero scaled = (dataset - mean) / (std + 1e-10)", "small number in case there is a std of zero", "variables. :param catch_prop_file: [str] the path to the catchment properties", "convert xr.dataset to numpy array dataset = dataset.transpose(\"seg_id_nat\", \"date\") arr", "train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, ): \"\"\" separate the train", "adj matrix. **The resulting matrix is sorted by seg_id_nat **", "date dimension :param start_dates: [str or list] fmt: \"YYYY-MM-DD\"; date(s)", "'upstream', 'data/DRB_gwn_full/adj_mx') #if __name__ == \"__main__\": check2 = prep_data(obs_temper_file='../../gits/river-dl/DRB_data/obs_temp_full', obs_flow_file='../../gits/river-dl/DRB_data/obs_flow_full',", "to the start and end dates. This assumes your training", "case there is a std of zero scaled = (dataset", "must be either str, list, or tuple\") def separate_trn_tst( dataset,", "period (can have multiple discontinuos periods) \"\"\" train = sel_partition_data(dataset,", "the data from a date range or a set of", "_, _ = scale(y_pre_trn, y_std, y_mean) else: _, y_std, y_mean", "output data 'y_trn_pre': batched, scaled, and centered output data for", "= np.eye(adj.shape[0]) A_hat = adj.copy() + I D = np.sum(A_hat,", "is a std of zero scaled = (dataset - mean)", "sorted by seg_id_nat ** :param infile: :param dist_type: [str] type", "division from __future__ import print_function from __future__ import unicode_literals import", "np.moveaxis(data['x_train'], [0,1,2,3], [0,2,1,3]) np.savez_compressed(os.path.join(out_file, 'pre_train.npz'), x=data['x_train'], y=data['y_pre_train']) np.savez_compressed(os.path.join(out_file,'train.npz'), x=data['x_train'], y=data['y_pre_train'],", "format multiple observation files. we read in the pretrain data", "resulting matrix is sorted by seg_id_nat ** :param infile: :param", "(batch_size), nfeat] \"\"\" combined = [] for i in range(int(1", "representing the exclude group from the exclude yml file :return:", "coord_as_reshaped_array(x_val, \"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_val\": coord_as_reshaped_array(x_val, \"date\", offset=offset, seq_len=seq_length), \"ids_test\":", "not isinstance(std, xr.Dataset) or not isinstance(mean, xr.Dataset): std = dataset.std(skipna=True)", "and std y_obs_trn, y_std, y_mean = scale(y_obs_trn) y_pre_trn, _, _", "into training and testing datasets. the training and testing data", "_ = scale(x_val, std=x_std, mean=x_mean) x_tst_scl, _, _ = scale(x_tst,", "parser.add_argument(\"--y_start\", type=int, default=1, help=\"Y pred start\", ) parser.add_argument(\"--dow\", action='store_true',) args", "it as is if dataset.date.size == 0: return arr #", "offset=offset, seq_len=seq_length, y=True, period=period), \"y_val\": convert_batch_reshape(y_obs_val, offset=offset, seq_len=seq_length, y=True, period=period),", "check = np.moveaxis(data['x_train'], [0,1,2,3], [0,2,1,3]) np.savez_compressed(os.path.join(out_file, 'pre_train.npz'), x=data['x_train'], y=data['y_pre_train']) np.savez_compressed(os.path.join(out_file,'train.npz'),", "'pre_train.npz'), x=data['x_train'], y=data['y_pre_train']) np.savez_compressed(os.path.join(out_file,'train.npz'), x=data['x_train'], y=data['y_pre_train'], ) np.savez_compressed(os.path.join(out_file, 'test.npz'), x=data['x_test'],", "path to the distance matrix .npz file :param train_start_date: [str", "generate_train_val_test(args) ##### Reformat our inputs to match theirs. df =", "seq_len=seq_length), \"dates_train\": coord_as_reshaped_array(x_trn, \"date\", offset=offset, seq_len=seq_length), \"ids_val\": coord_as_reshaped_array(x_val, \"seg_id_nat\", offset=offset,", "= dataset.std(skipna=True) mean = dataset.mean(skipna=True) # adding small number in", "= -adj mean_adj = np.mean(adj[adj != 0]) std_adj = np.std(adj[adj", "data and get the mean and std y_obs_trn, y_std, y_mean", "date(s) to end train period (can have multiple discontinuos periods)", "of sequences (i.e., 365) :param offset: [float] 0-1, how to", "train data from the test data according to the start", "= False, period = np.nan): \"\"\" convert xarray dataset into", "= separate_trn_tst( x_data, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, )", "x_trn, x_val, x_tst = separate_trn_tst( x_data, train_start_date, train_end_date, val_start_date, val_end_date,", "prep_adj_matrix('../../gits/river-dl/DRB_data/distance_matrix.npz', 'upstream', 'data/DRB_gwn_full/adj_mx') #if __name__ == \"__main__\": check2 = prep_data(obs_temper_file='../../gits/river-dl/DRB_data/obs_temp_full',", "+ I D = np.sum(A_hat, axis=1) D_inv = D **", "just those dates \"\"\" # if it just one date", "= adj[adj != 0] - mean_adj adj[adj != 0] =", "= separate_trn_tst( y_obs, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, )", "data [n_out] 'y_trn_obs_mean': mean of the observation training data [n_out]", "data is in one continuous block and all the dates", "\"%Y-%m-%d\") end = exclude_grp.get(\"end_date\") if end: end = datetime.datetime.strptime(end, \"%Y-%m-%d\")", "[xr dataset] data to be batched :param seq_len: [int] length", "= scale(y_obs_trn) y_pre_trn, _, _ = scale(y_pre_trn, y_std, y_mean) else:", "entire period of record of SNTemp [n_samples, seq_len, n_out] 'y_obs_trn':", "[nbatch, nseg, seq_len, nfeat] batched = split_into_batches(arr, seq_len=seq_len, offset=offset) #", "x_trn_scl, _, _ = scale(x_trn, std=x_std, mean=x_mean) x_val_scl, _, _", "x_mean = scale(x_data) x_trn_scl, _, _ = scale(x_trn, std=x_std, mean=x_mean)", "np.savez_compressed(os.path.join(out_file, 'pre_train.npz'), x=data['x_train'], y=data['y_pre_train']) np.savez_compressed(os.path.join(out_file,'train.npz'), x=data['x_train'], y=data['y_obs_train'], ) np.savez_compressed(os.path.join(out_file, 'test.npz'),", "dims [nbatches, nseg, seq_len (batch_size), nfeat] \"\"\" combined = []", "== False: os.makedirs(out_file) ''' np.savez_compressed(os.path.join(out_file, 'pre_train.npz'), x=data['x_train'], y=data['y_pre_train']) np.savez_compressed(os.path.join(out_file,'train.npz'), x=data['x_train'],", "we have the same indexing. :param obs_files: [list] list of", "== seq_len]) combined = np.asarray(combined) return combined def read_multiple_obs(obs_files, x_data):", "(dataset - mean) / (std + 1e-10) check_if_finite(std) check_if_finite(mean) return", "test_start_date, test_end_date, ) x_scl, x_std, x_mean = scale(x_data) x_trn_scl, _,", "obs_flow_file], x_data) y_obs = y_obs[y_vars] y_pre = ds_pre[y_vars] y_obs_trn, y_obs_val,", "start = datetime.datetime.strptime(start, \"%Y-%m-%d\") end = exclude_grp.get(\"end_date\") if end: end", "= next(iter(dataset.data_vars.keys())) coord_array = xr.broadcast(dataset[coord_name], dataset[first_var])[0] new_var_name = coord_name +", "\"seginc_potet\", \"seg_slope\", \"seg_humid\", \"seg_elev\"], y_vars=['seg_tave_water'], primary_variable='temp', seq_length=365, period=np.nan, offset=1, out_file", "np.asarray(combined) return combined def read_multiple_obs(obs_files, x_data): \"\"\" read and format", "start train period (can have multiple discontinuos periods) :param train_end_date:", "multiple discontinuos periods) :param end_dates: [str or list] fmt: \"YYYY-MM-DD\";", "uncentered observation data for the test period [n_yrs, n_seg, len_seq,", "\"\"\" train = sel_partition_data(dataset, train_start_date, train_end_date) val = sel_partition_data(dataset, val_start_date,", "nseg, seq_len, nfeat] batched = split_into_batches(arr, seq_len=seq_len, offset=offset) # reshape", "data # after [nbatch, nseg, seq_len, nfeat] batched = split_into_batches(arr,", "\"seg_length\", \"seginc_potet\", \"seg_slope\", \"seg_humid\", \"seg_elev\"], y_vars=['seg_tave_water'], primary_variable='temp', seq_length=365, period=np.nan, offset=1,", "will be 182-547) :return: [numpy array] batched and reshaped dataset", "dims :return: scaled data with original dims \"\"\" if not", "= scale(x_val, std=x_std, mean=x_mean) x_tst_scl, _, _ = scale(x_tst, std=x_std,", "\"\"\" scale the data so it has a standard deviation", "_, _ = scale(x_trn, std=x_std, mean=x_mean) x_val_scl, _, _ =", "the pretrain data to make sure we have the same", "len_seq, nfeat/nout] \"\"\" n_batch, n_seg, seq_len, n_feat = data.shape return", "x_vars=None, y_vars= [\"seg_tave_water\", \"seg_outflow\"], seq_length = 365, offset = 1,", "= coord_array reshaped_np_arr = convert_batch_reshape( dataset[[new_var_name]], seq_len=seq_len, offset=offset ) return", "= exclude_grp.get(\"start_date\") if start: start = datetime.datetime.strptime(start, \"%Y-%m-%d\") end =", "= np.where(np.isinf(adj), 0, adj) adj = -adj mean_adj = np.mean(adj[adj", "our inputs to match theirs. df = pd.read_hdf(\"data/metr-la.h5\") seq_length_x =", "offset=1): \"\"\" split training data into batches with size of", "argparse import numpy as np import os import pandas as", "y_std, y_mean = scale(y_obs_trn) data = { \"x_train\": convert_batch_reshape(x_trn_scl, offset=offset,", "coord_as_reshaped_array(x_tst, \"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_test\": coord_as_reshaped_array(x_tst, \"date\", offset=offset, seq_len=seq_length), \"y_pre_train\":", "\"y_vars\": np.array(y_vars), 'period': np.array([period]), 'y_pre_train_val': convert_batch_reshape(y_pre_val, offset=offset, seq_len=seq_length, y=True, period=period),", ":param pretrain_file: [str] the file with the pretraining data (SNTemp", "type=int, default=1, help=\"Y pred start\", ) parser.add_argument(\"--dow\", action='store_true',) args =", "range(int(1 / offset)): start = int(i * offset * seq_len)", "(can have multiple discontinuos periods) :param train_end_date: [str or list]", "since they will be smaller combined.extend([s for s in split", "is the order that the split into batches expects arr", "group from the exclude yml file :return: [tuple of datetime", "): \"\"\" prepare input and output data for DL model", "output data for entire period of record of SNTemp [n_samples,", "into batches with size of batch_size :param data_array: [numpy array]", "list] fmt: \"YYYY-MM-DD\"; date(s) to end test period (can have", "np.mean(adj[adj != 0]) std_adj = np.std(adj[adj != 0]) adj[adj !=", "batch since they will be smaller combined.extend([s for s in", "datetime objects] start date, end date \"\"\" start = exclude_grp.get(\"start_date\")", "mean of zero :param dataset: [xr dataset] input or output", "**data) return data def prep_adj_matrix(infile, dist_type, out_file=None): \"\"\" process adj", "to have a std of 1 and a mean of", "end test period (can have multiple discontinuos periods) :param x_vars:", "= 12 y_start = 1 LAtrain = np.load('data/METR-LA/train.npz') LAtest =", "exclude yml file :return: [tuple of datetime objects] start date,", ":return: [tuple of datetime objects] start date, end date \"\"\"", "= 1, period = None, primary_variable=\"temp\", #catch_prop_file=None, #exclude_file=None, #log_q=False, out_file=None,", "test period (can have multiple discontinuos periods) :param test_end_date: [str", "x_std, x_mean = scale(x_data) x_trn_scl, _, _ = scale(x_trn, std=x_std,", "in range(len(start_dates)): date_slice = slice(start_dates[i], end_dates[i]) data_list.append(dataset.sel(date=date_slice)) return xr.concat(data_list, dim=\"date\")", "written :returns: training and testing data along with the means", "y_mean = scale(y_obs_trn) data = { \"x_train\": convert_batch_reshape(x_trn_scl, offset=offset, seq_len=seq_length),", "of 1 and a mean of zero :param obs_temper_file: [str]", "I need one variable name. It can be any in", "pretrain_file: [str] the file with the pretraining data (SNTemp data)", "the training input and output data 'y_trn_pre': batched, scaled, and", "end dates. This assumes your training data is in one", "train period (can have multiple discontinuos periods) :param val_start_date: [str", "in obs_files: ds = xr.open_zarr(filename) obs.append(ds) if \"site_id\" in ds.variables:", "def prep_adj_matrix(infile, dist_type, out_file=None): \"\"\" process adj matrix. **The resulting", "observations file (csv) :param pretrain_file: [str] the file with the", "data) :param distfile: [str] path to the distance matrix .npz", "adj) adj = -adj mean_adj = np.mean(adj[adj != 0]) std_adj", "0] = adj[adj != 0] / std_adj adj[adj != 0]", "seq_len, n_out] 'y_obs_trn': batched, scaled, and centered output observation data", "[adj_full[0], adj_full[1], A_hat] with open(out_file+'.pkl', 'wb') as f: pickle.dump(out_dm, f,", "\"x_train\": convert_batch_reshape(x_trn_scl, offset=offset, seq_len=seq_length), \"x_val\": convert_batch_reshape(x_val_scl, offset=offset, seq_len=seq_length), \"x_test\": convert_batch_reshape(x_tst_scl,", "return train, val, test def split_into_batches(data_array, seq_len=365, offset=1): \"\"\" split", "parser.parse_args() if os.path.exists(args.output_dir): reply = str(input(f'{args.output_dir} exists. Do you want", "len_seq, 2] \"\"\" ds_pre = xr.open_zarr(pretrain_file) x_data = ds_pre[x_vars] #", "\"YYYY-MM-DD\"; date(s) to start period (can have multiple discontinuos periods)", "and national seg ids for testing data [n_yrs, n_seg, len_seq,", "of batch_size :param data_array: [numpy array] array of training data", "i return row_col_names, sensor_id_to_ind, df #check = prep_adj_matrix('../../gits/river-dl/DRB_data/distance_matrix.npz', 'upstream', 'data/DRB_gwn_full/adj_mx')", "file of pre_training data :return: [xr dataset] the observations in", "date ranges :param dataset: [xr dataset] input or output data", "seq_len]) combined = np.asarray(combined) return combined def read_multiple_obs(obs_files, x_data): \"\"\"", "[x_data.sortby([\"seg_id_nat\", \"date\"])] for filename in obs_files: ds = xr.open_zarr(filename) obs.append(ds)", "False, period = np.nan): \"\"\" convert xarray dataset into numpy", "xr.concat(data_list, dim=\"date\") else: raise ValueError(\"start_dates and end_dates must have same", "test_start_date=['1980-10-01', '2020-10-01'], test_end_date=['1985-09-30', '2021-09-30'], x_vars=[\"seg_rain\", \"seg_tave_air\", \"seginc_swrad\", \"seg_length\", \"seginc_potet\", \"seg_slope\",", "df = pd.DataFrame(mat, columns=row_col_names, index=row_col_names) df = df.sort_index(axis=0) df =", "seq_len=seq_length), \"y_pre_train\": convert_batch_reshape(y_pre_trn, offset=offset, seq_len=seq_length, y=True, period=period), \"y_train\": convert_batch_reshape(y_obs_trn, offset=offset,", "observation files :param pre_train_file: [str] the file of pre_training data", "train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, ) if normalize_y: # scale", "check_if_finite(std) check_if_finite(mean) return scaled, std, mean def sel_partition_data(dataset, start_dates, end_dates):", "dims: [nbatch, nseg, len_seq, nfeat/nout] :return: reshaped data [nbatch *", "from the exclude yml file :return: [tuple of datetime objects]", "dataset[new_var_name] = coord_array reshaped_np_arr = convert_batch_reshape( dataset[[new_var_name]], seq_len=seq_len, offset=offset )", "\"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_train\": coord_as_reshaped_array(x_trn, \"date\", offset=offset, seq_len=seq_length), \"ids_val\": coord_as_reshaped_array(x_val,", "scaled, and centered output observation data for the training period", "False: os.makedirs(out_file) ''' np.savez_compressed(os.path.join(out_file, 'pre_train.npz'), x=data['x_train'], y=data['y_pre_train']) np.savez_compressed(os.path.join(out_file,'train.npz'), x=data['x_train'], y=data['y_obs_train'],", "file (csv) :param obs_flow_file:[str] discharge observations file (csv) :param pretrain_file:", "import absolute_import from __future__ import division from __future__ import print_function", "period (can have multiple discontinuos periods) :param x_vars: [list] variables", "dataset: [xr dataset] data to be batched :param seq_len: [int]", "sel_partition_data(dataset, val_start_date, val_end_date) test = sel_partition_data(dataset, test_start_date, test_end_date) return train,", "isinstance(mean, xr.Dataset): std = dataset.std(skipna=True) mean = dataset.mean(skipna=True) # adding", "offset)): start = int(i * offset * seq_len) idx =", "inputs to match theirs. df = pd.read_hdf(\"data/metr-la.h5\") seq_length_x = 12", "\"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_test\": coord_as_reshaped_array(x_tst, \"date\", offset=offset, seq_len=seq_length), \"y_pre_train\": convert_batch_reshape(y_pre_trn,", "std_adj = np.std(adj[adj != 0]) adj[adj != 0] = adj[adj", "__future__ import print_function from __future__ import unicode_literals import argparse import", "isinstance(end_dates, str): return dataset.sel(date=slice(start_dates, end_dates)) else: raise ValueError(\"start_dates is str", "the # first first_var = next(iter(dataset.data_vars.keys())) coord_array = xr.broadcast(dataset[coord_name], dataset[first_var])[0]", "and a mean of zero :param dataset: [xr dataset] input", "ids for training data [n_samples, seq_len, 2] 'dates_ids_tst: un-batched dates", "seq_length_x = 12 seq_length_y = 12 y_start = 1 LAtrain", "properties will not be included as predictors :param exclude_file: [str]", ":param x_vars: [list] variables that should be used as input.", "df = df.sort_index(axis=1) sensor_id_to_ind = {} for i, sensor_id in", "x_tst_scl, _, _ = scale(x_tst, std=x_std, mean=x_mean) y_obs = read_multiple_obs([obs_temper_file,", "[n_out] 'y_obs_tst': un-batched, unscaled, uncentered observation data for the test", "if out_file: if os.path.isdir(out_file) == False: os.makedirs(out_file) ''' np.savez_compressed(os.path.join(out_file, 'pre_train.npz'),", "_, _ = scale(x_val, std=x_std, mean=x_mean) x_tst_scl, _, _ =", "SNTemp [n_samples, seq_len, n_out] 'y_obs_trn': batched, scaled, and centered output", "data along with the means and standard deviations of the", "[nbatch * nseg, len_seq, nfeat/nout] \"\"\" n_batch, n_seg, seq_len, n_feat", "\"date\", offset=offset, seq_len=seq_length), \"y_pre_train\": convert_batch_reshape(y_pre_trn, offset=offset, seq_len=seq_length, y=True, period=period), \"y_train\":", "to end test period (can have multiple discontinuos periods) \"\"\"", "0]) adj[adj != 0] = adj[adj != 0] - mean_adj", "os.path.exists(args.output_dir): reply = str(input(f'{args.output_dir} exists. Do you want to overwrite", "type=str, default=\"data/METR-LA\", help=\"Output directory.\") parser.add_argument(\"--traffic_df_filename\", type=str, default=\"data/metr-la.h5\", help=\"Raw traffic readings.\",)", "out_file: if os.path.isdir(out_file) == False: os.makedirs(out_file) ''' np.savez_compressed(os.path.join(out_file, 'pre_train.npz'), x=data['x_train'],", "= int(i * offset * seq_len) idx = np.arange(start=start, stop=data_array.shape[1]", "A_hat def sort_dist_matrix(mat, row_col_names): \"\"\" sort the distance matrix by", "multiple discontinuos periods) :param test_end_date: [str or list] fmt: \"YYYY-MM-DD\";", "periods) \"\"\" train = sel_partition_data(dataset, train_start_date, train_end_date) val = sel_partition_data(dataset,", "period=period), \"y_val\": convert_batch_reshape(y_obs_val, offset=offset, seq_len=seq_length, y=True, period=period), \"y_test\": convert_batch_reshape(y_obs_tst, offset=offset,", "n_seg, seq_len, n_feat]) def get_exclude_start_end(exclude_grp): \"\"\" get the start and", "y & np.isfinite(period): reshaped = reshaped[:,-period:,...] return reshaped def coord_as_reshaped_array(dataset,", "be any in the dataset, but I'll use the #", "exclude group from the exclude yml file :return: [tuple of", "return np.reshape(data, [n_batch * n_seg, seq_len, n_feat]) def get_exclude_start_end(exclude_grp): \"\"\"", "test_start_date, test_end_date) return train, val, test def split_into_batches(data_array, seq_len=365, offset=1):", "to make sure we have the same indexing. :param obs_files:", "data for DL model training read in and process data", "of the variables will be used :param primary_variable: [str] which", "continuous block and all the dates that are not in", "argparse.ArgumentParser() parser.add_argument(\"--output_dir\", type=str, default=\"data/METR-LA\", help=\"Output directory.\") parser.add_argument(\"--traffic_df_filename\", type=str, default=\"data/metr-la.h5\", help=\"Raw", "\"dates_train\": coord_as_reshaped_array(x_trn, \"date\", offset=offset, seq_len=seq_length), \"ids_val\": coord_as_reshaped_array(x_val, \"seg_id_nat\", offset=offset, seq_len=seq_length),", "\"%Y-%m-%d\") return start, end def convert_batch_reshape(dataset, seq_len=365, offset=1, y =", "of datetime objects] start date, end date \"\"\" start =", "# I need one variable name. It can be any", "numpy array, swap the axes, batch the array and reshape", "have multiple discontinuos periods) :param test_start_date: [str or list] fmt:", "\"\"\" adj_matrices = np.load(infile) adj = adj_matrices[dist_type] adj_full = sort_dist_matrix(adj,", "array, swap the axes, batch the array and reshape for", "tuple\") def separate_trn_tst( dataset, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date,", "np.where(np.isinf(adj), 0, adj) adj = -adj mean_adj = np.mean(adj[adj !=", "model training read in and process data into training and", "= dataset.transpose(\"seg_id_nat\", \"date\") arr = dataset.to_array().values # if the dataset", "arr = dataset.to_array().values # if the dataset is empty, just", "12 seq_length_y = 12 y_start = 1 LAtrain = np.load('data/METR-LA/train.npz')", "[str] the path to the catchment properties file. If left", "data with date dimension :param start_dates: [str or list] fmt:", "data def prep_adj_matrix(infile, dist_type, out_file=None): \"\"\" process adj matrix. **The", "dataset into numpy array, swap the axes, batch the array", "to end validation period (can have multiple discontinuos periods) :param", "= 1 LAtrain = np.load('data/METR-LA/train.npz') LAtest = np.load('data/METR-LA/test.npz') LAval =", "not isinstance(mean, xr.Dataset): std = dataset.std(skipna=True) mean = dataset.mean(skipna=True) #", ":param dataset: [xr dataset] data to be batched :param seq_len:", "used as input. If None, all of the variables will", ") return obs def reshape_for_training(data): \"\"\" reshape the data for", "stop=data_array.shape[1] + 1, step=seq_len) split = np.split(data_array, indices_or_sections=idx, axis=1) #", "dataset[first_var])[0] new_var_name = coord_name + \"1\" dataset[new_var_name] = coord_array reshaped_np_arr", ") ''' np.savez_compressed(os.path.join(out_file,'data.npz'), **data) return data def prep_adj_matrix(infile, dist_type, out_file=None):", "[] for i in range(int(1 / offset)): start = int(i", "can be any in the dataset, but I'll use the", "offset: [float] 0-1, how to offset the batches (e.g., 0.5", "seq_len, nfeat] batched = split_into_batches(arr, seq_len=seq_len, offset=offset) # reshape data", "as input. If None, all of the variables will be", "observations training data [n_out] 'y_trn_obs_mean': mean of the observation training", "process adj matrix. **The resulting matrix is sorted by seg_id_nat", "number in case there is a std of zero scaled", "data with dims [nbatches, nseg, seq_len (batch_size), nfeat] \"\"\" combined", "be smaller combined.extend([s for s in split if s.shape[1] ==", "pred start\", ) parser.add_argument(\"--dow\", action='store_true',) args = parser.parse_args() if os.path.exists(args.output_dir):", "and format multiple observation files. we read in the pretrain", "it has a standard deviation of 1 and a mean", "to overwrite it? (y/n)')).lower().strip() if reply[0] != 'y': exit else:", "dataset: [xr dataset] input or output data with date dimension", "deviations of the training input and output data 'y_trn_pre': batched,", "return dataset.sel(date=slice(start_dates, end_dates)) else: raise ValueError(\"start_dates is str but not", "= df.sort_index(axis=0) df = df.sort_index(axis=1) sensor_id_to_ind = {} for i,", "exists. Do you want to overwrite it? (y/n)')).lower().strip() if reply[0]", "discontinuos periods) \"\"\" train = sel_partition_data(dataset, train_start_date, train_end_date) val =", ":param out_file: :return: [numpy array] processed adjacency matrix \"\"\" adj_matrices", "predictors :param exclude_file: [str] path to exclude file :param log_q:", "separate_trn_tst( y_obs, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, ) y_pre_trn,", "is if dataset.date.size == 0: return arr # before [nfeat,", "be written :returns: training and testing data along with the", "LAval = np.load('data/METR-LA/val.npz') LAtrain['x'].shape LAtrain['y'].shape LAtest['x'].shape LAtest['y'].shape check = np.moveaxis(data['x_train'],", "test_end_date, ) y_pre_trn, y_pre_val, y_pre_tst = separate_trn_tst( y_pre, train_start_date, train_end_date,", "be 0-365 and the second will be 182-547) :return: [numpy", "val_end_date='2016-09-30', test_start_date=['1980-10-01', '2020-10-01'], test_end_date=['1985-09-30', '2021-09-30'], x_vars=[\"seg_rain\", \"seg_tave_air\", \"seginc_swrad\", \"seg_length\", \"seginc_potet\",", "need one variable name. It can be any in the", "\"\"\" prepare input and output data for DL model training", "fmt: \"YYYY-MM-DD\"; date(s) to start test period (can have multiple", "xr.broadcast(dataset[coord_name], dataset[first_var])[0] new_var_name = coord_name + \"1\" dataset[new_var_name] = coord_array", "first and last batch since they will be smaller combined.extend([s", "np.diag(D_inv) A_hat = np.matmul(D_inv, A_hat) if out_file: out_dm = [adj_full[0],", "scale(x_trn, std=x_std, mean=x_mean) x_val_scl, _, _ = scale(x_val, std=x_std, mean=x_mean)", "of zero :param dataset: [xr dataset] input or output data", "= np.load('data/METR-LA/train.npz') LAtest = np.load('data/METR-LA/test.npz') LAval = np.load('data/METR-LA/val.npz') LAtrain['x'].shape LAtrain['y'].shape", "the axes, batch the array and reshape for training :param", "with the means and standard deviations of the training input", "y_obs[y_vars] y_pre = ds_pre[y_vars] y_obs_trn, y_obs_val, y_obs_tst = separate_trn_tst( y_obs,", "array] batched and reshaped dataset \"\"\" # convert xr.dataset to", "be included as predictors :param exclude_file: [str] path to exclude", "[str or list] fmt: \"YYYY-MM-DD\"; date(s) to end test period", "dataset] input or output data with date dimension :param start_dates:", "offset=offset, seq_len=seq_length, y=True, period=period), \"y_train\": convert_batch_reshape(y_obs_trn, offset=offset, seq_len=seq_length, y=True, period=period),", "end train period (can have multiple discontinuos periods) :param val_start_date:", "(can have multiple discontinuos periods) :return: dataset of just those", "= df.sort_index(axis=1) sensor_id_to_ind = {} for i, sensor_id in enumerate(df.columns):", "def prep_data( obs_temper_file, obs_flow_file, pretrain_file, #distfile, train_start_date, train_end_date, val_start_date, val_end_date,", "date(s) to end test period (can have multiple discontinuos periods)", "[numpy array] batched data with dims [nbatches, nseg, seq_len (batch_size),", "sure we have the same indexing. :param obs_files: [list] list", "[str or list] fmt: \"YYYY-MM-DD\"; date(s) to end train period", "adj = -adj mean_adj = np.mean(adj[adj != 0]) std_adj =", "\"site_id\" in ds.variables: del ds[\"site_id\"] obs = xr.merge(obs, join=\"left\") obs", "x_scl, x_std, x_mean = scale(x_data) x_trn_scl, _, _ = scale(x_trn,", ":param dataset: [xr dataset] input or output data with dims", "\"x_test\": convert_batch_reshape(x_tst_scl, offset=offset, seq_len=seq_length), \"x_std\": x_std.to_array().values, \"x_mean\": x_mean.to_array().values, \"x_cols\": np.array(x_vars),", "of discharge in training :param out_file: [str] file to where", "date(s) to start train period (can have multiple discontinuos periods)", "\"\"\" reshape the data for training :param data: training data", "np.savez_compressed(os.path.join(out_file,'val.npz'), x=data['x_val'], y=data['y_obs_val'], ) ''' np.savez_compressed(os.path.join(out_file,'data.npz'), **data) return data def", ":return: [xr dataset] the observations in the same time \"\"\"", "sensor_id_to_ind, df #check = prep_adj_matrix('../../gits/river-dl/DRB_data/distance_matrix.npz', 'upstream', 'data/DRB_gwn_full/adj_mx') #if __name__ ==", "out_file=None, #segs=None, normalize_y=False, ): \"\"\" prepare input and output data", "= prep_adj_matrix('../../gits/river-dl/DRB_data/distance_matrix.npz', 'upstream', 'data/DRB_gwn_full/adj_mx') #if __name__ == \"__main__\": check2 =", "\"YYYY-MM-DD\"; date(s) to start validation period (can have multiple discontinuos", "step=seq_len) split = np.split(data_array, indices_or_sections=idx, axis=1) # add all but", "1 and a mean of zero :param obs_temper_file: [str] temperature", "and end dates for the exclude group :param exclude_grp: [dict]", "xr.open_zarr(pretrain_file) x_data = ds_pre[x_vars] # make sure we don't have", "len(end_dates): data_list = [] for i in range(len(start_dates)): date_slice =", "as np import yaml import xarray as xr import datetime", "the second will be 182-547) :return: [numpy array] batched and", "to start period (can have multiple discontinuos periods) :param end_dates:", "[n_samples, seq_len, 2] 'dates_ids_tst: un-batched dates and national seg ids", "fmt: \"YYYY-MM-DD\"; date(s) to start period (can have multiple discontinuos", "os.path.isdir(out_file) == False: os.makedirs(out_file) ''' np.savez_compressed(os.path.join(out_file, 'pre_train.npz'), x=data['x_train'], y=data['y_pre_train']) np.savez_compressed(os.path.join(out_file,'train.npz'),", "else: _, y_std, y_mean = scale(y_obs_trn) data = { \"x_train\":", "and last batch since they will be smaller combined.extend([s for", "train_start_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to start train", "will be smaller combined.extend([s for s in split if s.shape[1]", "reply[0] != 'y': exit else: os.makedirs(args.output_dir) generate_train_val_test(args) ##### Reformat our", "dims :param train_start_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to", "end_dates: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to end period", "and reshaped dataset \"\"\" # convert xr.dataset to numpy array", "if \"site_id\" in ds.variables: del ds[\"site_id\"] obs = xr.merge(obs, join=\"left\")", "the training period 'y_trn_obs_std': standard deviation of the y observations", "''' np.savez_compressed(os.path.join(out_file,'data.npz'), **data) return data def prep_adj_matrix(infile, dist_type, out_file=None): \"\"\"", "np.load(infile) adj = adj_matrices[dist_type] adj_full = sort_dist_matrix(adj, adj_matrices[\"rowcolnames\"]) adj =", "df = df.sort_index(axis=0) df = df.sort_index(axis=1) sensor_id_to_ind = {} for", "objects] start date, end date \"\"\" start = exclude_grp.get(\"start_date\") if", "dataset = dataset.transpose(\"seg_id_nat\", \"date\") arr = dataset.to_array().values # if the", "def split_into_batches(data_array, seq_len=365, offset=1): \"\"\" split training data into batches", "to start test period (can have multiple discontinuos periods) :param", "np.isfinite(period): reshaped = reshaped[:,-period:,...] return reshaped def coord_as_reshaped_array(dataset, coord_name, seq_len=365,", "exclude_grp.get(\"end_date\") if end: end = datetime.datetime.strptime(end, \"%Y-%m-%d\") return start, end", "seq_length_y = 12 y_start = 1 LAtrain = np.load('data/METR-LA/train.npz') LAtest", "multiple discontinuos periods) :return: dataset of just those dates \"\"\"", "\"\"\" n_batch, n_seg, seq_len, n_feat = data.shape return np.reshape(data, [n_batch", "your training data is in one continuous block and all", "first batch will be 0-365 and the second will be", "obs_flow_file='../../gits/river-dl/DRB_data/obs_flow_full', pretrain_file='../../gits/river-dl/DRB_data/uncal_sntemp_input_output', train_start_date=['1985-10-01', '2016-10-01'], train_end_date=['2006-09-30', '2020-09-30'], val_start_date='2006-10-01', val_end_date='2016-09-30', test_start_date=['1980-10-01', '2020-10-01'],", "list) or isinstance(start_dates, tuple): if len(start_dates) == len(end_dates): data_list =", ":param val_end_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to end", "data so it has a standard deviation of 1 and", "offset=offset, seq_len=seq_length), \"ids_val\": coord_as_reshaped_array(x_val, \"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_val\": coord_as_reshaped_array(x_val, \"date\",", "np.exp(-adj[adj != 0])) I = np.eye(adj.shape[0]) A_hat = adj.copy() +", "{ \"x_train\": convert_batch_reshape(x_trn_scl, offset=offset, seq_len=seq_length), \"x_val\": convert_batch_reshape(x_val_scl, offset=offset, seq_len=seq_length), \"x_test\":", "fmt: \"YYYY-MM-DD\"; date(s) to end test period (can have multiple", "= None, primary_variable=\"temp\", #catch_prop_file=None, #exclude_file=None, #log_q=False, out_file=None, #segs=None, normalize_y=False, ):", "and process data into training and testing datasets. the training", "the mean and std y_obs_trn, y_std, y_mean = scale(y_obs_trn) y_pre_trn,", "if isinstance(end_dates, str): return dataset.sel(date=slice(start_dates, end_dates)) else: raise ValueError(\"start_dates is", "input values check_if_finite(x_data) x_trn, x_val, x_tst = separate_trn_tst( x_data, train_start_date,", "xarray dataset into numpy array, swap the axes, batch the", "Reformat our inputs to match theirs. df = pd.read_hdf(\"data/metr-la.h5\") seq_length_x", "discharge in training :param out_file: [str] file to where the", "0, adj) adj = -adj mean_adj = np.mean(adj[adj != 0])", "data [n_yrs, n_seg, len_seq, 2] \"\"\" ds_pre = xr.open_zarr(pretrain_file) x_data", "will be used :param primary_variable: [str] which variable the model", "the dataset, but I'll use the # first first_var =", "data from the test data according to the start and", "original dims \"\"\" if not isinstance(std, xr.Dataset) or not isinstance(mean,", "in the pretrain data to make sure we have the", "[n_out] 'y_trn_obs_mean': mean of the observation training data [n_out] 'y_obs_tst':", "period = None, primary_variable=\"temp\", #catch_prop_file=None, #exclude_file=None, #log_q=False, out_file=None, #segs=None, normalize_y=False,", "by seg_id_nat :return: \"\"\" df = pd.DataFrame(mat, columns=row_col_names, index=row_col_names) df", "\"x_cols\": np.array(x_vars), \"ids_train\": coord_as_reshaped_array(x_trn, \"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_train\": coord_as_reshaped_array(x_trn, \"date\",", "the start and end dates for the exclude group :param", "std=x_std, mean=x_mean) x_val_scl, _, _ = scale(x_val, std=x_std, mean=x_mean) x_tst_scl,", "\"ids_val\": coord_as_reshaped_array(x_val, \"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_val\": coord_as_reshaped_array(x_val, \"date\", offset=offset, seq_len=seq_length),", "start = int(i * offset * seq_len) idx = np.arange(start=start,", "LAtest['x'].shape LAtest['y'].shape check = np.moveaxis(data['x_train'], [0,1,2,3], [0,2,1,3]) np.savez_compressed(os.path.join(out_file, 'pre_train.npz'), x=data['x_train'],", "/ (std + 1e-10) check_if_finite(std) check_if_finite(mean) return scaled, std, mean", "test data according to the start and end dates. This", "np.savez_compressed(os.path.join(out_file, 'test.npz'), x=data['x_test'], y=data['y_pre_test'], ) np.savez_compressed(os.path.join(out_file,'val.npz'), x=data['x_val'], y=data['y_pre_val'], ) '''", "val_end_date, test_start_date, test_end_date, ) y_pre_trn, y_pre_val, y_pre_tst = separate_trn_tst( y_pre,", "of training data with dims [nseg, ndates, nfeat] :param seq_len:", ":return: [numpy array] processed adjacency matrix \"\"\" adj_matrices = np.load(infile)", "os import pandas as pd import util import os.path import", "!= 0]) adj[adj != 0] = adj[adj != 0] -", "period=np.nan, offset=1, out_file = 'data/DRB_gwn_full') '''f __name__ == \"__main__\": parser", "reshape_for_training(data): \"\"\" reshape the data for training :param data: training", "'2021-09-30'], x_vars=[\"seg_rain\", \"seg_tave_air\", \"seginc_swrad\", \"seg_length\", \"seginc_potet\", \"seg_slope\", \"seg_humid\", \"seg_elev\"], y_vars=['seg_tave_water'],", "= obs.rename( {\"temp_c\": \"seg_tave_water\", \"discharge_cms\": \"seg_outflow\"} ) return obs def", "the training and testing data are scaled to have a", "of the variables. :param catch_prop_file: [str] the path to the", "else: raise ValueError(\"start_dates is str but not end_date\") # if", "= sel_partition_data(dataset, train_start_date, train_end_date) val = sel_partition_data(dataset, val_start_date, val_end_date) test", "\"\"\" # if it just one date range if isinstance(start_dates,", "name. It can be any in the dataset, but I'll", "file :return: [tuple of datetime objects] start date, end date", "the data so it has a standard deviation of 1", "and centered output data for entire period of record of", "i in range(len(start_dates)): date_slice = slice(start_dates[i], end_dates[i]) data_list.append(dataset.sel(date=date_slice)) return xr.concat(data_list,", "will be 0-365 and the second will be 182-547) :return:", "if len(start_dates) == len(end_dates): data_list = [] for i in", "(e.g., 0.5 means that the first batch will be 0-365", "'2016-10-01'], train_end_date=['2006-09-30', '2020-09-30'], val_start_date='2006-10-01', val_end_date='2016-09-30', test_start_date=['1980-10-01', '2020-10-01'], test_end_date=['1985-09-30', '2021-09-30'], x_vars=[\"seg_rain\",", "with dims [nbatches, nseg, seq_len (batch_size), nfeat] \"\"\" combined =", "the log of discharge in training :param out_file: [str] file", "check_if_finite(mean) return scaled, std, mean def sel_partition_data(dataset, start_dates, end_dates): \"\"\"", "the train data from the test data according to the", "/ (1 + np.exp(-adj[adj != 0])) I = np.eye(adj.shape[0]) A_hat", "nfeat] batched = split_into_batches(arr, seq_len=seq_len, offset=offset) # reshape data #", "[xr dataset] input or output data :param std: [xr dataset]", "convert_batch_reshape(dataset, seq_len=365, offset=1, y = False, period = np.nan): \"\"\"", "and testing data along with the means and standard deviations", "data with dims :return: scaled data with original dims \"\"\"", "training data [n_samples, seq_len, 2] 'dates_ids_tst: un-batched dates and national", "coord_array = xr.broadcast(dataset[coord_name], dataset[first_var])[0] new_var_name = coord_name + \"1\" dataset[new_var_name]", "date(s) to start period (can have multiple discontinuos periods) :param", "y=True, period=period), \"y_val\": convert_batch_reshape(y_obs_val, offset=offset, seq_len=seq_length, y=True, period=period), \"y_test\": convert_batch_reshape(y_obs_tst,", "the testing. :param dataset: [xr dataset] input or output data", ":return: dataset of just those dates \"\"\" # if it", "\"updown\") :param out_file: :return: [numpy array] processed adjacency matrix \"\"\"", "os.makedirs(out_file) ''' np.savez_compressed(os.path.join(out_file, 'pre_train.npz'), x=data['x_train'], y=data['y_pre_train']) np.savez_compressed(os.path.join(out_file,'train.npz'), x=data['x_train'], y=data['y_obs_train'], )", "pd.DataFrame(mat, columns=row_col_names, index=row_col_names) df = df.sort_index(axis=0) df = df.sort_index(axis=1) sensor_id_to_ind", "test_end_date, ): \"\"\" separate the train data from the test", "the exclude group from the exclude yml file :return: [tuple", "output data with dims :param train_start_date: [str or list] fmt:", "swap the axes, batch the array and reshape for training", "new_var_name = coord_name + \"1\" dataset[new_var_name] = coord_array reshaped_np_arr =", "datetime.datetime.strptime(start, \"%Y-%m-%d\") end = exclude_grp.get(\"end_date\") if end: end = datetime.datetime.strptime(end,", "== \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"--output_dir\", type=str, default=\"data/METR-LA\", help=\"Output directory.\")", "dataset.transpose(\"seg_id_nat\", \"date\") arr = dataset.to_array().values # if the dataset is", "-1) # batch the data # after [nbatch, nseg, seq_len,", "any in the dataset, but I'll use the # first", "list of filenames of observation files :param pre_train_file: [str] the", "catchment properties will not be included as predictors :param exclude_file:", "sensor_id_to_ind[sensor_id] = i return row_col_names, sensor_id_to_ind, df #check = prep_adj_matrix('../../gits/river-dl/DRB_data/distance_matrix.npz',", "else: os.makedirs(args.output_dir) generate_train_val_test(args) ##### Reformat our inputs to match theirs.", "split if s.shape[1] == seq_len]) combined = np.asarray(combined) return combined", "\"\"\" split training data into batches with size of batch_size", "convert_batch_reshape(y_obs_val, offset=offset, seq_len=seq_length, y=True, period=period), \"y_test\": convert_batch_reshape(y_obs_tst, offset=offset, seq_len=seq_length, y=True,", "reply = str(input(f'{args.output_dir} exists. Do you want to overwrite it?", "for the test period [n_yrs, n_seg, len_seq, n_out] 'dates_ids_trn: batched", "[tuple of datetime objects] start date, end date \"\"\" start", "0-365 and the second will be 182-547) :return: [numpy array]", "_ = scale(y_pre_trn, y_std, y_mean) else: _, y_std, y_mean =", "LAtrain['y'].shape LAtest['x'].shape LAtest['y'].shape check = np.moveaxis(data['x_train'], [0,1,2,3], [0,2,1,3]) np.savez_compressed(os.path.join(out_file, 'pre_train.npz'),", "theirs. df = pd.read_hdf(\"data/metr-la.h5\") seq_length_x = 12 seq_length_y = 12", "to offset the batches (e.g., 0.5 means that the first", "first_var = next(iter(dataset.data_vars.keys())) coord_array = xr.broadcast(dataset[coord_name], dataset[first_var])[0] new_var_name = coord_name", "and national seg ids for training data [n_samples, seq_len, 2]", "has a standard deviation of 1 and a mean of", "time \"\"\" obs = [x_data.sortby([\"seg_id_nat\", \"date\"])] for filename in obs_files:", "adj[adj != 0] - mean_adj adj[adj != 0] = adj[adj", "dataset \"\"\" # convert xr.dataset to numpy array dataset =", "!= 0] = adj[adj != 0] / std_adj adj[adj !=", "\"y_pre_train\": convert_batch_reshape(y_pre_trn, offset=offset, seq_len=seq_length, y=True, period=period), \"y_train\": convert_batch_reshape(y_obs_trn, offset=offset, seq_len=seq_length,", "import util import os.path import pandas as pd import numpy", "means that the first batch will be 0-365 and the", "ndates, nfeat] # this is the order that the split", "= ds_pre[x_vars] # make sure we don't have any weird", "/ std_adj adj[adj != 0] = 1 / (1 +", "matrix by seg_id_nat :return: \"\"\" df = pd.DataFrame(mat, columns=row_col_names, index=row_col_names)", "return obs def reshape_for_training(data): \"\"\" reshape the data for training", "[numpy array] array of training data with dims [nseg, ndates,", "and get the mean and std y_obs_trn, y_std, y_mean =", "convert_batch_reshape(y_pre_val, offset=offset, seq_len=seq_length, y=True, period=period), 'y_pre_train_test': convert_batch_reshape(y_pre_tst, offset=offset, seq_len=seq_length, y=True,", "add all but the first and last batch since they", "dataset: [xr dataset] input or output data with dims :param", "the observation training data [n_out] 'y_obs_tst': un-batched, unscaled, uncentered observation", "mean and std y_obs_trn, y_std, y_mean = scale(y_obs_trn) y_pre_trn, _,", "primary_variable=\"temp\", #catch_prop_file=None, #exclude_file=None, #log_q=False, out_file=None, #segs=None, normalize_y=False, ): \"\"\" prepare", "be 182-547) :return: [numpy array] batched data with dims [nbatches,", "y_start = 1 LAtrain = np.load('data/METR-LA/train.npz') LAtest = np.load('data/METR-LA/test.npz') LAval", "multiple discontinuos periods) \"\"\" train = sel_partition_data(dataset, train_start_date, train_end_date) val", "separate the train data from the test data according to", "date \"\"\" start = exclude_grp.get(\"start_date\") if start: start = datetime.datetime.strptime(start,", "offset the batches (e.g., 0.5 means that the first batch", "range if isinstance(start_dates, str): if isinstance(end_dates, str): return dataset.sel(date=slice(start_dates, end_dates))", "[str or list] fmt: \"YYYY-MM-DD\"; date(s) to start period (can", "= { \"x_train\": convert_batch_reshape(x_trn_scl, offset=offset, seq_len=seq_length), \"x_val\": convert_batch_reshape(x_val_scl, offset=offset, seq_len=seq_length),", ":param pre_train_file: [str] the file of pre_training data :return: [xr", "std, mean def sel_partition_data(dataset, start_dates, end_dates): \"\"\" select the data", "where the values will be written :returns: training and testing", "arr = np.moveaxis(arr, 0, -1) # batch the data #", "std: [xr dataset] standard deviation if scaling test data with", "data_array: [numpy array] array of training data with dims [nseg,", "_, y_std, y_mean = scale(y_obs_trn) data = { \"x_train\": convert_batch_reshape(x_trn_scl,", "y_pre_trn, y_pre_val, y_pre_tst = separate_trn_tst( y_pre, train_start_date, train_end_date, val_start_date, val_end_date,", ") if normalize_y: # scale y training data and get", "val_end_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to end validation", "by seg_id_nat ** :param infile: :param dist_type: [str] type of", "which variable the model should focus on 'temp' or 'flow'.", "empty, just return it as is if dataset.date.size == 0:", "with dims :param mean: [xr dataset] mean if scaling test", "row_col_names): \"\"\" sort the distance matrix by seg_id_nat :return: \"\"\"", "so it has a standard deviation of 1 and a", "or output data :param std: [xr dataset] standard deviation if", "test_start_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to start test", "This determines the order of the variables. :param catch_prop_file: [str]", "\"seg_slope\", \"seg_humid\", \"seg_elev\"], y_vars=['seg_tave_water'], primary_variable='temp', seq_length=365, period=np.nan, offset=1, out_file =", "separate_trn_tst( y_pre, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, ) if", "data to make sure we have the same indexing. :param", "y_pre = ds_pre[y_vars] y_obs_trn, y_obs_val, y_obs_tst = separate_trn_tst( y_obs, train_start_date,", "pickle.dump(out_dm, f, protocol=2) return adj_full[0], adj_full[1], A_hat def sort_dist_matrix(mat, row_col_names):", "= np.moveaxis(batched, [0,1,2,3], [0,2,1,3]) if y & np.isfinite(period): reshaped =", "end_dates[i]) data_list.append(dataset.sel(date=date_slice)) return xr.concat(data_list, dim=\"date\") else: raise ValueError(\"start_dates and end_dates", "or list] fmt: \"YYYY-MM-DD\"; date(s) to end period (can have", "block and all the dates that are not in the", "val_start_date, val_end_date, test_start_date, test_end_date, ) if normalize_y: # scale y", "!= 0] - mean_adj adj[adj != 0] = adj[adj !=", "of the training input and output data 'y_trn_pre': batched, scaled,", "and testing datasets. the training and testing data are scaled", "if start: start = datetime.datetime.strptime(start, \"%Y-%m-%d\") end = exclude_grp.get(\"end_date\") if", "[str] the file of pre_training data :return: [xr dataset] the", "val_start_date, val_end_date, test_start_date, test_end_date, x_vars=None, y_vars= [\"seg_tave_water\", \"seg_outflow\"], seq_length =", "adj_full[0], adj_full[1], A_hat def sort_dist_matrix(mat, row_col_names): \"\"\" sort the distance", "if scaling test data with dims :return: scaled data with", "the same time \"\"\" obs = [x_data.sortby([\"seg_id_nat\", \"date\"])] for filename", "scaled, and centered output data for entire period of record", "after [nseg, ndates, nfeat] # this is the order that", "seq_len, nseg, nfeat] #reshaped = reshape_for_training(batched) reshaped = np.moveaxis(batched, [0,1,2,3],", "np.reshape(data, [n_batch * n_seg, seq_len, n_feat]) def get_exclude_start_end(exclude_grp): \"\"\" get", "multiple discontinuos periods) :param train_end_date: [str or list] fmt: \"YYYY-MM-DD\";", "a std of 1 and a mean of zero :param", "train_start_date=['1985-10-01', '2016-10-01'], train_end_date=['2006-09-30', '2020-09-30'], val_start_date='2006-10-01', val_end_date='2016-09-30', test_start_date=['1980-10-01', '2020-10-01'], test_end_date=['1985-09-30', '2021-09-30'],", "the first batch will be 0-365 and the second will", "pandas as pd import numpy as np import yaml import", "the same indexing. :param obs_files: [list] list of filenames of", "assert np.isfinite(xarr.to_array().values).all() def prep_data( obs_temper_file, obs_flow_file, pretrain_file, #distfile, train_start_date, train_end_date,", "the catchment properties will not be included as predictors :param", "nfeat/nout] :return: reshaped data [nbatch * nseg, len_seq, nfeat/nout] \"\"\"", ":param start_dates: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to start", "len_seq, nfeat/nout] :return: reshaped data [nbatch * nseg, len_seq, nfeat/nout]", "the file with the pretraining data (SNTemp data) :param distfile:", "read_multiple_obs(obs_files, x_data): \"\"\" read and format multiple observation files. we", "path to exclude file :param log_q: [bool] whether or not", "take the log of discharge in training :param out_file: [str]", "\"\"\" obs = [x_data.sortby([\"seg_id_nat\", \"date\"])] for filename in obs_files: ds", "-adj mean_adj = np.mean(adj[adj != 0]) std_adj = np.std(adj[adj !=", "date, end date \"\"\" start = exclude_grp.get(\"start_date\") if start: start", "seq_len=seq_len, offset=offset ) return reshaped_np_arr def check_if_finite(xarr): assert np.isfinite(xarr.to_array().values).all() def", "[str or list] fmt: \"YYYY-MM-DD\"; date(s) to end validation period", "= np.load('data/METR-LA/test.npz') LAval = np.load('data/METR-LA/val.npz') LAtrain['x'].shape LAtrain['y'].shape LAtest['x'].shape LAtest['y'].shape check", "order of the variables. :param catch_prop_file: [str] the path to", "\"\"\" df = pd.DataFrame(mat, columns=row_col_names, index=row_col_names) df = df.sort_index(axis=0) df", "pd.read_hdf(\"data/metr-la.h5\") seq_length_x = 12 seq_length_y = 12 y_start = 1", "start_dates: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to start period", "exclude file :param log_q: [bool] whether or not to take", "numpy array dataset = dataset.transpose(\"seg_id_nat\", \"date\") arr = dataset.to_array().values #", "scaling test data with dims :return: scaled data with original", "with dims :param train_start_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s)", "periods) :return: dataset of just those dates \"\"\" # if", "seq_len=seq_length), \"ids_test\": coord_as_reshaped_array(x_tst, \"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_test\": coord_as_reshaped_array(x_tst, \"date\", offset=offset,", "will be written :returns: training and testing data along with", "we read in the pretrain data to make sure we", ":param data_array: [numpy array] array of training data with dims", "y_obs_trn, y_std, y_mean = scale(y_obs_trn) y_pre_trn, _, _ = scale(y_pre_trn,", "start: start = datetime.datetime.strptime(start, \"%Y-%m-%d\") end = exclude_grp.get(\"end_date\") if end:", "should focus on 'temp' or 'flow'. This determines the order", "coord_as_reshaped_array(dataset, coord_name, seq_len=365, offset=1): # I need one variable name.", "data_list = [] for i in range(len(start_dates)): date_slice = slice(start_dates[i],", "return reshaped_np_arr def check_if_finite(xarr): assert np.isfinite(xarr.to_array().values).all() def prep_data( obs_temper_file, obs_flow_file,", "end dates for the exclude group :param exclude_grp: [dict] dictionary", "obs_temper_file: [str] temperature observations file (csv) :param obs_flow_file:[str] discharge observations", "[str or list] fmt: \"YYYY-MM-DD\"; date(s) to start train period", "deviation of the y observations training data [n_out] 'y_trn_obs_mean': mean", "catch_prop_file: [str] the path to the catchment properties file. If", "un-batched dates and national seg ids for testing data [n_yrs,", "of zero :param obs_temper_file: [str] temperature observations file (csv) :param", "it's a list of date ranges elif isinstance(start_dates, list) or", "x_val_scl, _, _ = scale(x_val, std=x_std, mean=x_mean) x_tst_scl, _, _", "__name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"--output_dir\", type=str, default=\"data/METR-LA\", help=\"Output", "= read_multiple_obs([obs_temper_file, obs_flow_file], x_data) y_obs = y_obs[y_vars] y_pre = ds_pre[y_vars]", "directory.\") parser.add_argument(\"--traffic_df_filename\", type=str, default=\"data/metr-la.h5\", help=\"Raw traffic readings.\",) parser.add_argument(\"--seq_length_x\", type=int, default=12,", "convert xarray dataset into numpy array, swap the axes, batch", "fmt: \"YYYY-MM-DD\"; date(s) to end validation period (can have multiple", "included as predictors :param exclude_file: [str] path to exclude file", "pre_train_file: [str] the file of pre_training data :return: [xr dataset]", "dims [nseg, ndates, nfeat] :param seq_len: [int] length of sequences", "I = np.eye(adj.shape[0]) A_hat = adj.copy() + I D =", "prep_data( obs_temper_file, obs_flow_file, pretrain_file, #distfile, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date,", "with open(out_file+'.pkl', 'wb') as f: pickle.dump(out_dm, f, protocol=2) return adj_full[0],", "import division from __future__ import print_function from __future__ import unicode_literals", "'2020-10-01'], test_end_date=['1985-09-30', '2021-09-30'], x_vars=[\"seg_rain\", \"seg_tave_air\", \"seginc_swrad\", \"seg_length\", \"seginc_potet\", \"seg_slope\", \"seg_humid\",", "scale(x_val, std=x_std, mean=x_mean) x_tst_scl, _, _ = scale(x_tst, std=x_std, mean=x_mean)", "str but not end_date\") # if it's a list of", "training period 'y_trn_obs_std': standard deviation of the y observations training", "in one continuous block and all the dates that are", "This assumes your training data is in one continuous block", "[0,1,2,3], [0,2,1,3]) if y & np.isfinite(period): reshaped = reshaped[:,-period:,...] return", "ValueError(\"start_dates must be either str, list, or tuple\") def separate_trn_tst(", "batch_size :param data_array: [numpy array] array of training data with", "date range or a set of date ranges :param dataset:", "zero :param dataset: [xr dataset] input or output data :param", "\"\"\" # convert xr.dataset to numpy array dataset = dataset.transpose(\"seg_id_nat\",", "validation period (can have multiple discontinuos periods) :param val_end_date: [str", "obs_files: ds = xr.open_zarr(filename) obs.append(ds) if \"site_id\" in ds.variables: del", "testing datasets. the training and testing data are scaled to", "np.load('data/METR-LA/test.npz') LAval = np.load('data/METR-LA/val.npz') LAtrain['x'].shape LAtrain['y'].shape LAtest['x'].shape LAtest['y'].shape check =", "the distance matrix .npz file :param train_start_date: [str or list]", "offset=1, out_file = 'data/DRB_gwn_full') '''f __name__ == \"__main__\": parser =", "offset=1, y = False, period = np.nan): \"\"\" convert xarray", "0.5 means that the first batch will be 0-365 and", "(\"upstream\", \"downstream\" or \"updown\") :param out_file: :return: [numpy array] processed", "y=True, period=period), \"y_train\": convert_batch_reshape(y_obs_trn, offset=offset, seq_len=seq_length, y=True, period=period), \"y_val\": convert_batch_reshape(y_obs_val,", "for testing data [n_yrs, n_seg, len_seq, 2] \"\"\" ds_pre =", "np.array(y_vars), 'period': np.array([period]), 'y_pre_train_val': convert_batch_reshape(y_pre_val, offset=offset, seq_len=seq_length, y=True, period=period), 'y_pre_train_test':", "exclude_grp.get(\"start_date\") if start: start = datetime.datetime.strptime(start, \"%Y-%m-%d\") end = exclude_grp.get(\"end_date\")", "# before [nfeat, nseg, ndates]; after [nseg, ndates, nfeat] #", "open(out_file+'.pkl', 'wb') as f: pickle.dump(out_dm, f, protocol=2) return adj_full[0], adj_full[1],", "training data into batches with size of batch_size :param data_array:", "(can have multiple discontinuos periods) :param end_dates: [str or list]", "test_end_date, ) x_scl, x_std, x_mean = scale(x_data) x_trn_scl, _, _", "as f: pickle.dump(out_dm, f, protocol=2) return adj_full[0], adj_full[1], A_hat def", "data = { \"x_train\": convert_batch_reshape(x_trn_scl, offset=offset, seq_len=seq_length), \"x_val\": convert_batch_reshape(x_val_scl, offset=offset,", "182-547) :return: [numpy array] batched data with dims [nbatches, nseg,", "args = parser.parse_args() if os.path.exists(args.output_dir): reply = str(input(f'{args.output_dir} exists. Do", "[bool] whether or not to take the log of discharge", "+ 1, step=seq_len) split = np.split(data_array, indices_or_sections=idx, axis=1) # add", "obs = obs[[\"temp_c\", \"discharge_cms\"]] obs = obs.rename( {\"temp_c\": \"seg_tave_water\", \"discharge_cms\":", "obs.append(ds) if \"site_id\" in ds.variables: del ds[\"site_id\"] obs = xr.merge(obs,", "date range if isinstance(start_dates, str): if isinstance(end_dates, str): return dataset.sel(date=slice(start_dates,", "or list] fmt: \"YYYY-MM-DD\"; date(s) to start period (can have", "dates that are not in the training are in the", "len_seq, n_out] 'dates_ids_trn: batched dates and national seg ids for", "data are scaled to have a std of 1 and", "x=data['x_train'], y=data['y_pre_train']) np.savez_compressed(os.path.join(out_file,'train.npz'), x=data['x_train'], y=data['y_obs_train'], ) np.savez_compressed(os.path.join(out_file, 'test.npz'), x=data['x_test'], y=data['y_obs_tst'],", "val_end_date, test_start_date, test_end_date, ): \"\"\" separate the train data from", "np.load('data/METR-LA/val.npz') LAtrain['x'].shape LAtrain['y'].shape LAtest['x'].shape LAtest['y'].shape check = np.moveaxis(data['x_train'], [0,1,2,3], [0,2,1,3])", "reshaped_np_arr = convert_batch_reshape( dataset[[new_var_name]], seq_len=seq_len, offset=offset ) return reshaped_np_arr def", "test data with dims :param mean: [xr dataset] mean if", "make sure we have the same indexing. :param obs_files: [list]", "or list] fmt: \"YYYY-MM-DD\"; date(s) to end validation period (can", "(can have multiple discontinuos periods) :param test_end_date: [str or list]", "record of SNTemp [n_samples, seq_len, n_out] 'y_obs_trn': batched, scaled, and", "offset=offset ) return reshaped_np_arr def check_if_finite(xarr): assert np.isfinite(xarr.to_array().values).all() def prep_data(", "n_feat = data.shape return np.reshape(data, [n_batch * n_seg, seq_len, n_feat])", "of observation files :param pre_train_file: [str] the file of pre_training", "or tuple\") def separate_trn_tst( dataset, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date,", "scale(x_tst, std=x_std, mean=x_mean) y_obs = read_multiple_obs([obs_temper_file, obs_flow_file], x_data) y_obs =", "dist_type, out_file=None): \"\"\" process adj matrix. **The resulting matrix is", "one continuous block and all the dates that are not", "i, sensor_id in enumerate(df.columns): sensor_id_to_ind[sensor_id] = i return row_col_names, sensor_id_to_ind,", "= scale(x_data) x_trn_scl, _, _ = scale(x_trn, std=x_std, mean=x_mean) x_val_scl,", "isinstance(std, xr.Dataset) or not isinstance(mean, xr.Dataset): std = dataset.std(skipna=True) mean", "xr.open_zarr(filename) obs.append(ds) if \"site_id\" in ds.variables: del ds[\"site_id\"] obs =", "x=data['x_test'], y=data['y_obs_tst'], ) np.savez_compressed(os.path.join(out_file,'val.npz'), x=data['x_val'], y=data['y_obs_val'], ) ''' np.savez_compressed(os.path.join(out_file,'data.npz'), **data)", "else: raise ValueError(\"start_dates must be either str, list, or tuple\")", "the path to the catchment properties file. If left unfilled,", ") np.savez_compressed(os.path.join(out_file, 'test.npz'), x=data['x_test'], y=data['y_obs_tst'], ) np.savez_compressed(os.path.join(out_file,'val.npz'), x=data['x_val'], y=data['y_obs_val'], )", ":param out_file: [str] file to where the values will be", "= ds_pre[y_vars] y_obs_trn, y_obs_val, y_obs_tst = separate_trn_tst( y_obs, train_start_date, train_end_date,", "# batch the data # after [nbatch, nseg, seq_len, nfeat]", "[0,1,2,3], [0,2,1,3]) np.savez_compressed(os.path.join(out_file, 'pre_train.npz'), x=data['x_train'], y=data['y_pre_train']) np.savez_compressed(os.path.join(out_file,'train.npz'), x=data['x_train'], y=data['y_pre_train'], )", "#exclude_file=None, #log_q=False, out_file=None, #segs=None, normalize_y=False, ): \"\"\" prepare input and", "'y_trn_obs_std': standard deviation of the y observations training data [n_out]", "elif isinstance(start_dates, list) or isinstance(start_dates, tuple): if len(start_dates) == len(end_dates):", "raise ValueError(\"start_dates must be either str, list, or tuple\") def", "np.sum(A_hat, axis=1) D_inv = D ** -1.0 D_inv = np.diag(D_inv)", "distance matrix (\"upstream\", \"downstream\" or \"updown\") :param out_file: :return: [numpy", ":param std: [xr dataset] standard deviation if scaling test data", "one date range if isinstance(start_dates, str): if isinstance(end_dates, str): return", "if it just one date range if isinstance(start_dates, str): if", "in range(int(1 / offset)): start = int(i * offset *", "training data is in one continuous block and all the", "read in the pretrain data to make sure we have", "def read_multiple_obs(obs_files, x_data): \"\"\" read and format multiple observation files.", "training data with dims [nseg, ndates, nfeat] :param seq_len: [int]", "multiple discontinuos periods) :param val_start_date: [str or list] fmt: \"YYYY-MM-DD\";", "the variables. :param catch_prop_file: [str] the path to the catchment", "the split into batches expects arr = np.moveaxis(arr, 0, -1)", "training are in the testing. :param dataset: [xr dataset] input", "I D = np.sum(A_hat, axis=1) D_inv = D ** -1.0", "(csv) :param obs_flow_file:[str] discharge observations file (csv) :param pretrain_file: [str]", "a date range or a set of date ranges :param", "test_end_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to end test", "discontinuos periods) :param val_start_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s)", "reshape the data for training :param data: training data (either", "y_vars=['seg_tave_water'], primary_variable='temp', seq_length=365, period=np.nan, offset=1, out_file = 'data/DRB_gwn_full') '''f __name__", "dataset] standard deviation if scaling test data with dims :param", ":return: [numpy array] batched and reshaped dataset \"\"\" # convert", "[\"seg_tave_water\", \"seg_outflow\"], seq_length = 365, offset = 1, period =", "pickle def scale(dataset, std=None, mean=None): \"\"\" scale the data so", ") parser.add_argument(\"--dow\", action='store_true',) args = parser.parse_args() if os.path.exists(args.output_dir): reply =", "offset=offset, seq_len=seq_length), \"y_pre_train\": convert_batch_reshape(y_pre_trn, offset=offset, seq_len=seq_length, y=True, period=period), \"y_train\": convert_batch_reshape(y_obs_trn,", "seq_len=seq_length, y=True, period=period), \"y_vars\": np.array(y_vars), 'period': np.array([period]), 'y_pre_train_val': convert_batch_reshape(y_pre_val, offset=offset,", "a std of zero scaled = (dataset - mean) /", "- mean_adj adj[adj != 0] = adj[adj != 0] /", "log of discharge in training :param out_file: [str] file to", "as pd import util import os.path import pandas as pd", "end_dates)) else: raise ValueError(\"start_dates is str but not end_date\") #", "will not be included as predictors :param exclude_file: [str] path", "date(s) to end period (can have multiple discontinuos periods) :return:", "dataset.std(skipna=True) mean = dataset.mean(skipna=True) # adding small number in case", "= slice(start_dates[i], end_dates[i]) data_list.append(dataset.sel(date=date_slice)) return xr.concat(data_list, dim=\"date\") else: raise ValueError(\"start_dates", "np.split(data_array, indices_or_sections=idx, axis=1) # add all but the first and", "reshaped data [nbatch * nseg, len_seq, nfeat/nout] \"\"\" n_batch, n_seg,", "the dataset is empty, just return it as is if", "val_start_date='2006-10-01', val_end_date='2016-09-30', test_start_date=['1980-10-01', '2020-10-01'], test_end_date=['1985-09-30', '2021-09-30'], x_vars=[\"seg_rain\", \"seg_tave_air\", \"seginc_swrad\", \"seg_length\",", "scale(y_obs_trn) data = { \"x_train\": convert_batch_reshape(x_trn_scl, offset=offset, seq_len=seq_length), \"x_val\": convert_batch_reshape(x_val_scl,", "with date dimension :param start_dates: [str or list] fmt: \"YYYY-MM-DD\";", "data according to the start and end dates. This assumes", "np.savez_compressed(os.path.join(out_file,'train.npz'), x=data['x_train'], y=data['y_obs_train'], ) np.savez_compressed(os.path.join(out_file, 'test.npz'), x=data['x_test'], y=data['y_obs_tst'], ) np.savez_compressed(os.path.join(out_file,'val.npz'),", "batch the data # after [nbatch, nseg, seq_len, nfeat] batched", "in and process data into training and testing datasets. the", "matrix is sorted by seg_id_nat ** :param infile: :param dist_type:", "discontinuos periods) :param x_vars: [list] variables that should be used", "A_hat = np.matmul(D_inv, A_hat) if out_file: out_dm = [adj_full[0], adj_full[1],", "dataset is empty, just return it as is if dataset.date.size", "test_end_date=['1985-09-30', '2021-09-30'], x_vars=[\"seg_rain\", \"seg_tave_air\", \"seginc_swrad\", \"seg_length\", \"seginc_potet\", \"seg_slope\", \"seg_humid\", \"seg_elev\"],", "data (either x or y or mask) dims: [nbatch, nseg,", "temperature observations file (csv) :param obs_flow_file:[str] discharge observations file (csv)", "\"dates_test\": coord_as_reshaped_array(x_tst, \"date\", offset=offset, seq_len=seq_length), \"y_pre_train\": convert_batch_reshape(y_pre_trn, offset=offset, seq_len=seq_length, y=True,", "val_end_date) test = sel_partition_data(dataset, test_start_date, test_end_date) return train, val, test", "to where the values will be written :returns: training and", "of the observation training data [n_out] 'y_obs_tst': un-batched, unscaled, uncentered", "end_dates): \"\"\" select the data from a date range or", "ValueError(\"start_dates is str but not end_date\") # if it's a", "dim=\"date\") else: raise ValueError(\"start_dates and end_dates must have same length\")", "adj.copy() + I D = np.sum(A_hat, axis=1) D_inv = D", "focus on 'temp' or 'flow'. This determines the order of", "'''f __name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"--output_dir\", type=str, default=\"data/METR-LA\",", "# after [nbatch, nseg, seq_len, nfeat] batched = split_into_batches(arr, seq_len=seq_len,", "be used as input. If None, all of the variables", "y_mean = scale(y_obs_trn) y_pre_trn, _, _ = scale(y_pre_trn, y_std, y_mean)", "\"x_std\": x_std.to_array().values, \"x_mean\": x_mean.to_array().values, \"x_cols\": np.array(x_vars), \"ids_train\": coord_as_reshaped_array(x_trn, \"seg_id_nat\", offset=offset,", "\"y_val\": convert_batch_reshape(y_obs_val, offset=offset, seq_len=seq_length, y=True, period=period), \"y_test\": convert_batch_reshape(y_obs_tst, offset=offset, seq_len=seq_length,", "!= 0]) std_adj = np.std(adj[adj != 0]) adj[adj != 0]", "'wb') as f: pickle.dump(out_dm, f, protocol=2) return adj_full[0], adj_full[1], A_hat", "and all the dates that are not in the training", "same indexing. :param obs_files: [list] list of filenames of observation", "DL model training read in and process data into training", "for entire period of record of SNTemp [n_samples, seq_len, n_out]", "and the second will be 182-547) :return: [numpy array] batched", "input or output data with date dimension :param start_dates: [str", "is str but not end_date\") # if it's a list", "data to be batched :param seq_len: [int] length of sequences", "discharge observations file (csv) :param pretrain_file: [str] the file with", "normalize_y: # scale y training data and get the mean", "check_if_finite(x_data) x_trn, x_val, x_tst = separate_trn_tst( x_data, train_start_date, train_end_date, val_start_date,", "+ 1e-10) check_if_finite(std) check_if_finite(mean) return scaled, std, mean def sel_partition_data(dataset,", "str(input(f'{args.output_dir} exists. Do you want to overwrite it? (y/n)')).lower().strip() if", "the catchment properties file. If left unfilled, the catchment properties", "centered output data for entire period of record of SNTemp", "data into training and testing datasets. the training and testing", "seq_len=seq_length, y=True, period=period), \"y_test\": convert_batch_reshape(y_obs_tst, offset=offset, seq_len=seq_length, y=True, period=period), \"y_vars\":", "} if out_file: if os.path.isdir(out_file) == False: os.makedirs(out_file) ''' np.savez_compressed(os.path.join(out_file,", "sort_dist_matrix(adj, adj_matrices[\"rowcolnames\"]) adj = adj_full[2] adj = np.where(np.isinf(adj), 0, adj)", "observation data for the training period 'y_trn_obs_std': standard deviation of", "np.savez_compressed(os.path.join(out_file,'data.npz'), **data) return data def prep_adj_matrix(infile, dist_type, out_file=None): \"\"\" process", ":param dataset: [xr dataset] input or output data :param std:", "file. If left unfilled, the catchment properties will not be", "seq_len (batch_size), nfeat] \"\"\" combined = [] for i in", "data [n_out] 'y_obs_tst': un-batched, unscaled, uncentered observation data for the", "as xr import datetime import pickle def scale(dataset, std=None, mean=None):", "matrix (\"upstream\", \"downstream\" or \"updown\") :param out_file: :return: [numpy array]", "output data with date dimension :param start_dates: [str or list]", "with dims [nseg, ndates, nfeat] :param seq_len: [int] length of", "processed adjacency matrix \"\"\" adj_matrices = np.load(infile) adj = adj_matrices[dist_type]", "but the first and last batch since they will be", "\"dates_val\": coord_as_reshaped_array(x_val, \"date\", offset=offset, seq_len=seq_length), \"ids_test\": coord_as_reshaped_array(x_tst, \"seg_id_nat\", offset=offset, seq_len=seq_length),", "[str] the file with the pretraining data (SNTemp data) :param", "first first_var = next(iter(dataset.data_vars.keys())) coord_array = xr.broadcast(dataset[coord_name], dataset[first_var])[0] new_var_name =", "enumerate(df.columns): sensor_id_to_ind[sensor_id] = i return row_col_names, sensor_id_to_ind, df #check =", "dataset] input or output data with dims :param train_start_date: [str", "= convert_batch_reshape( dataset[[new_var_name]], seq_len=seq_len, offset=offset ) return reshaped_np_arr def check_if_finite(xarr):", "#log_q=False, out_file=None, #segs=None, normalize_y=False, ): \"\"\" prepare input and output", "y training data and get the mean and std y_obs_trn,", "reshaped[:,-period:,...] return reshaped def coord_as_reshaped_array(dataset, coord_name, seq_len=365, offset=1): # I", ") return reshaped_np_arr def check_if_finite(xarr): assert np.isfinite(xarr.to_array().values).all() def prep_data( obs_temper_file,", "range or a set of date ranges :param dataset: [xr", "all of the variables will be used :param primary_variable: [str]", "scale the data so it has a standard deviation of", "period (can have multiple discontinuos periods) :param val_start_date: [str or", "= {} for i, sensor_id in enumerate(df.columns): sensor_id_to_ind[sensor_id] = i", "not end_date\") # if it's a list of date ranges", "train, val, test def split_into_batches(data_array, seq_len=365, offset=1): \"\"\" split training", "0]) std_adj = np.std(adj[adj != 0]) adj[adj != 0] =", ":return: [numpy array] batched data with dims [nbatches, nseg, seq_len", "y=data['y_obs_train'], ) np.savez_compressed(os.path.join(out_file, 'test.npz'), x=data['x_test'], y=data['y_obs_tst'], ) np.savez_compressed(os.path.join(out_file,'val.npz'), x=data['x_val'], y=data['y_obs_val'],", "period=period), \"y_test\": convert_batch_reshape(y_obs_tst, offset=offset, seq_len=seq_length, y=True, period=period), \"y_vars\": np.array(y_vars), 'period':", "multiple discontinuos periods) :param val_end_date: [str or list] fmt: \"YYYY-MM-DD\";", "multiple discontinuos periods) :param x_vars: [list] variables that should be", "if s.shape[1] == seq_len]) combined = np.asarray(combined) return combined def", "into batches expects arr = np.moveaxis(arr, 0, -1) # batch", "all but the first and last batch since they will", "xarray as xr import datetime import pickle def scale(dataset, std=None,", "s.shape[1] == seq_len]) combined = np.asarray(combined) return combined def read_multiple_obs(obs_files,", "np import os import pandas as pd import util import", "[nbatches, nseg, seq_len (batch_size), nfeat] \"\"\" combined = [] for", "as predictors :param exclude_file: [str] path to exclude file :param", "= xr.open_zarr(pretrain_file) x_data = ds_pre[x_vars] # make sure we don't", "0] = adj[adj != 0] - mean_adj adj[adj != 0]", "and testing data are scaled to have a std of", "of date ranges :param dataset: [xr dataset] input or output", "ds_pre[y_vars] y_obs_trn, y_obs_val, y_obs_tst = separate_trn_tst( y_obs, train_start_date, train_end_date, val_start_date,", ":param val_start_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to start", "help=\"Output directory.\") parser.add_argument(\"--traffic_df_filename\", type=str, default=\"data/metr-la.h5\", help=\"Raw traffic readings.\",) parser.add_argument(\"--seq_length_x\", type=int,", "return combined def read_multiple_obs(obs_files, x_data): \"\"\" read and format multiple", ":param obs_flow_file:[str] discharge observations file (csv) :param pretrain_file: [str] the", "# convert xr.dataset to numpy array dataset = dataset.transpose(\"seg_id_nat\", \"date\")", "end = exclude_grp.get(\"end_date\") if end: end = datetime.datetime.strptime(end, \"%Y-%m-%d\") return", "adj_matrices[\"rowcolnames\"]) adj = adj_full[2] adj = np.where(np.isinf(adj), 0, adj) adj", "start date, end date \"\"\" start = exclude_grp.get(\"start_date\") if start:", "'y_pre_train_val': convert_batch_reshape(y_pre_val, offset=offset, seq_len=seq_length, y=True, period=period), 'y_pre_train_test': convert_batch_reshape(y_pre_tst, offset=offset, seq_len=seq_length,", "= datetime.datetime.strptime(end, \"%Y-%m-%d\") return start, end def convert_batch_reshape(dataset, seq_len=365, offset=1,", "split into batches expects arr = np.moveaxis(arr, 0, -1) #", "print_function from __future__ import unicode_literals import argparse import numpy as", "not in the training are in the testing. :param dataset:", "seq_len=seq_length), \"dates_val\": coord_as_reshaped_array(x_val, \"date\", offset=offset, seq_len=seq_length), \"ids_test\": coord_as_reshaped_array(x_tst, \"seg_id_nat\", offset=offset,", "seq_len, 2] 'dates_ids_tst: un-batched dates and national seg ids for", "seq_len=seq_length, y=True, period=period), \"y_val\": convert_batch_reshape(y_obs_val, offset=offset, seq_len=seq_length, y=True, period=period), \"y_test\":", "util import os.path import pandas as pd import numpy as", "0] - mean_adj adj[adj != 0] = adj[adj != 0]", "nseg, ndates]; after [nseg, ndates, nfeat] # this is the", "data (SNTemp data) :param distfile: [str] path to the distance", "= np.moveaxis(arr, 0, -1) # batch the data # after", "offset=offset, seq_len=seq_length, y=True, period=period), \"y_std\": y_std.to_array().values, \"y_mean\": y_mean.to_array().values, } if", "[0,2,1,3]) np.savez_compressed(os.path.join(out_file, 'pre_train.npz'), x=data['x_train'], y=data['y_pre_train']) np.savez_compressed(os.path.join(out_file,'train.npz'), x=data['x_train'], y=data['y_pre_train'], ) np.savez_compressed(os.path.join(out_file,", "[str or list] fmt: \"YYYY-MM-DD\"; date(s) to end period (can", "to numpy array dataset = dataset.transpose(\"seg_id_nat\", \"date\") arr = dataset.to_array().values", "testing data along with the means and standard deviations of", "un-batched, unscaled, uncentered observation data for the test period [n_yrs,", "with size of batch_size :param data_array: [numpy array] array of", "train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, ) x_scl, x_std, x_mean =", "left unfilled, the catchment properties will not be included as", "y=data['y_pre_train']) np.savez_compressed(os.path.join(out_file,'train.npz'), x=data['x_train'], y=data['y_pre_train'], ) np.savez_compressed(os.path.join(out_file, 'test.npz'), x=data['x_test'], y=data['y_pre_test'], )", "y_mean.to_array().values, } if out_file: if os.path.isdir(out_file) == False: os.makedirs(out_file) '''", "x=data['x_train'], y=data['y_pre_train']) np.savez_compressed(os.path.join(out_file,'train.npz'), x=data['x_train'], y=data['y_pre_train'], ) np.savez_compressed(os.path.join(out_file, 'test.npz'), x=data['x_test'], y=data['y_pre_test'],", "offset=offset, seq_len=seq_length), \"x_val\": convert_batch_reshape(x_val_scl, offset=offset, seq_len=seq_length), \"x_test\": convert_batch_reshape(x_tst_scl, offset=offset, seq_len=seq_length),", "def separate_trn_tst( dataset, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, ):", "A_hat] with open(out_file+'.pkl', 'wb') as f: pickle.dump(out_dm, f, protocol=2) return", "to end train period (can have multiple discontinuos periods) :param", "std=x_std, mean=x_mean) y_obs = read_multiple_obs([obs_temper_file, obs_flow_file], x_data) y_obs = y_obs[y_vars]", ":param exclude_grp: [dict] dictionary representing the exclude group from the", "\"seg_tave_air\", \"seginc_swrad\", \"seg_length\", \"seginc_potet\", \"seg_slope\", \"seg_humid\", \"seg_elev\"], y_vars=['seg_tave_water'], primary_variable='temp', seq_length=365,", "observation training data [n_out] 'y_obs_tst': un-batched, unscaled, uncentered observation data", "== 0: return arr # before [nfeat, nseg, ndates]; after", "or list] fmt: \"YYYY-MM-DD\"; date(s) to start train period (can", "idx = np.arange(start=start, stop=data_array.shape[1] + 1, step=seq_len) split = np.split(data_array,", "want to overwrite it? (y/n)')).lower().strip() if reply[0] != 'y': exit", "= scale(x_trn, std=x_std, mean=x_mean) x_val_scl, _, _ = scale(x_val, std=x_std,", "seq_length = 365, offset = 1, period = None, primary_variable=\"temp\",", "#distfile, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, x_vars=None, y_vars= [\"seg_tave_water\",", "offset=offset, seq_len=seq_length), \"x_test\": convert_batch_reshape(x_tst_scl, offset=offset, seq_len=seq_length), \"x_std\": x_std.to_array().values, \"x_mean\": x_mean.to_array().values,", "index=row_col_names) df = df.sort_index(axis=0) df = df.sort_index(axis=1) sensor_id_to_ind = {}", "batched, scaled, and centered output data for entire period of", "list] fmt: \"YYYY-MM-DD\"; date(s) to end train period (can have", "reshaped_np_arr def check_if_finite(xarr): assert np.isfinite(xarr.to_array().values).all() def prep_data( obs_temper_file, obs_flow_file, pretrain_file,", "'test.npz'), x=data['x_test'], y=data['y_obs_tst'], ) np.savez_compressed(os.path.join(out_file,'val.npz'), x=data['x_val'], y=data['y_obs_val'], ) ''' np.savez_compressed(os.path.join(out_file,'data.npz'),", "or list] fmt: \"YYYY-MM-DD\"; date(s) to end test period (can", "for i in range(int(1 / offset)): start = int(i *", "\"date\", offset=offset, seq_len=seq_length), \"ids_val\": coord_as_reshaped_array(x_val, \"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_val\": coord_as_reshaped_array(x_val,", "1, period = None, primary_variable=\"temp\", #catch_prop_file=None, #exclude_file=None, #log_q=False, out_file=None, #segs=None,", "it? (y/n)')).lower().strip() if reply[0] != 'y': exit else: os.makedirs(args.output_dir) generate_train_val_test(args)", "length of sequences (i.e., 365) :param offset: [float] 0-1, how", "val_end_date, test_start_date, test_end_date, ) if normalize_y: # scale y training", "= sort_dist_matrix(adj, adj_matrices[\"rowcolnames\"]) adj = adj_full[2] adj = np.where(np.isinf(adj), 0,", "data with dims :param mean: [xr dataset] mean if scaling", ":param end_dates: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to end", "<reponame>SimonTopp/Graph-WaveNet from __future__ import absolute_import from __future__ import division from", "array dataset = dataset.transpose(\"seg_id_nat\", \"date\") arr = dataset.to_array().values # if", "train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, x_vars=None, y_vars= [\"seg_tave_water\", \"seg_outflow\"], seq_length", "= dataset.mean(skipna=True) # adding small number in case there is", "before [nfeat, nseg, ndates]; after [nseg, ndates, nfeat] # this", "of date ranges elif isinstance(start_dates, list) or isinstance(start_dates, tuple): if", "0-1, how to offset the batches (e.g., 0.5 means that", "x_mean.to_array().values, \"x_cols\": np.array(x_vars), \"ids_train\": coord_as_reshaped_array(x_trn, \"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_train\": coord_as_reshaped_array(x_trn,", "reshaped def coord_as_reshaped_array(dataset, coord_name, seq_len=365, offset=1): # I need one", "= datetime.datetime.strptime(start, \"%Y-%m-%d\") end = exclude_grp.get(\"end_date\") if end: end =", "observation files. we read in the pretrain data to make", "numpy as np import yaml import xarray as xr import", "for training data [n_samples, seq_len, 2] 'dates_ids_tst: un-batched dates and", "slice(start_dates[i], end_dates[i]) data_list.append(dataset.sel(date=date_slice)) return xr.concat(data_list, dim=\"date\") else: raise ValueError(\"start_dates and", "period (can have multiple discontinuos periods) :param val_end_date: [str or", "* n_seg, seq_len, n_feat]) def get_exclude_start_end(exclude_grp): \"\"\" get the start", "files :param pre_train_file: [str] the file of pre_training data :return:", ":return: scaled data with original dims \"\"\" if not isinstance(std,", "y or mask) dims: [nbatch, nseg, len_seq, nfeat/nout] :return: reshaped", "xr.merge(obs, join=\"left\") obs = obs[[\"temp_c\", \"discharge_cms\"]] obs = obs.rename( {\"temp_c\":", ":param dist_type: [str] type of distance matrix (\"upstream\", \"downstream\" or", "convert_batch_reshape(y_pre_tst, offset=offset, seq_len=seq_length, y=True, period=period), \"y_std\": y_std.to_array().values, \"y_mean\": y_mean.to_array().values, }", "#segs=None, normalize_y=False, ): \"\"\" prepare input and output data for", "= np.mean(adj[adj != 0]) std_adj = np.std(adj[adj != 0]) adj[adj", "prep_data(obs_temper_file='../../gits/river-dl/DRB_data/obs_temp_full', obs_flow_file='../../gits/river-dl/DRB_data/obs_flow_full', pretrain_file='../../gits/river-dl/DRB_data/uncal_sntemp_input_output', train_start_date=['1985-10-01', '2016-10-01'], train_end_date=['2006-09-30', '2020-09-30'], val_start_date='2006-10-01', val_end_date='2016-09-30', test_start_date=['1980-10-01',", "discontinuos periods) :param train_end_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s)", "how to offset the batches (e.g., 0.5 means that the", "pre_training data :return: [xr dataset] the observations in the same", "ranges elif isinstance(start_dates, list) or isinstance(start_dates, tuple): if len(start_dates) ==", "x_data = ds_pre[x_vars] # make sure we don't have any", "period of record of SNTemp [n_samples, seq_len, n_out] 'y_obs_trn': batched,", "= split_into_batches(arr, seq_len=seq_len, offset=offset) # reshape data # after [nseq,", "variable the model should focus on 'temp' or 'flow'. This", "to end test period (can have multiple discontinuos periods) :param", "axes, batch the array and reshape for training :param dataset:", "help=\"Sequence Length.\",) parser.add_argument(\"--seq_length_y\", type=int, default=12, help=\"Sequence Length.\",) parser.add_argument(\"--y_start\", type=int, default=1,", "A_hat) if out_file: out_dm = [adj_full[0], adj_full[1], A_hat] with open(out_file+'.pkl',", "\"YYYY-MM-DD\"; date(s) to end validation period (can have multiple discontinuos", "adj[adj != 0] = adj[adj != 0] / std_adj adj[adj", "0, -1) # batch the data # after [nbatch, nseg,", "seg ids for testing data [n_yrs, n_seg, len_seq, 2] \"\"\"", "isinstance(start_dates, tuple): if len(start_dates) == len(end_dates): data_list = [] for", "if the dataset is empty, just return it as is", "reshape_for_training(batched) reshaped = np.moveaxis(batched, [0,1,2,3], [0,2,1,3]) if y & np.isfinite(period):", "values check_if_finite(x_data) x_trn, x_val, x_tst = separate_trn_tst( x_data, train_start_date, train_end_date,", "data for the test period [n_yrs, n_seg, len_seq, n_out] 'dates_ids_trn:", "a standard deviation of 1 and a mean of zero", "output data for DL model training read in and process", "array] batched data with dims [nbatches, nseg, seq_len (batch_size), nfeat]", "training data and get the mean and std y_obs_trn, y_std,", "D ** -1.0 D_inv = np.diag(D_inv) A_hat = np.matmul(D_inv, A_hat)", "from __future__ import absolute_import from __future__ import division from __future__", "start and end dates for the exclude group :param exclude_grp:", "with original dims \"\"\" if not isinstance(std, xr.Dataset) or not", "= argparse.ArgumentParser() parser.add_argument(\"--output_dir\", type=str, default=\"data/METR-LA\", help=\"Output directory.\") parser.add_argument(\"--traffic_df_filename\", type=str, default=\"data/metr-la.h5\",", "testing. :param dataset: [xr dataset] input or output data with", "same length\") else: raise ValueError(\"start_dates must be either str, list,", "filenames of observation files :param pre_train_file: [str] the file of", "data for the training period 'y_trn_obs_std': standard deviation of the", "\"__main__\": check2 = prep_data(obs_temper_file='../../gits/river-dl/DRB_data/obs_temp_full', obs_flow_file='../../gits/river-dl/DRB_data/obs_flow_full', pretrain_file='../../gits/river-dl/DRB_data/uncal_sntemp_input_output', train_start_date=['1985-10-01', '2016-10-01'], train_end_date=['2006-09-30', '2020-09-30'],", "matrix. **The resulting matrix is sorted by seg_id_nat ** :param", "select the data from a date range or a set", "'data/DRB_gwn_full/adj_mx') #if __name__ == \"__main__\": check2 = prep_data(obs_temper_file='../../gits/river-dl/DRB_data/obs_temp_full', obs_flow_file='../../gits/river-dl/DRB_data/obs_flow_full', pretrain_file='../../gits/river-dl/DRB_data/uncal_sntemp_input_output',", "last batch since they will be smaller combined.extend([s for s", "return scaled, std, mean def sel_partition_data(dataset, start_dates, end_dates): \"\"\" select", "to match theirs. df = pd.read_hdf(\"data/metr-la.h5\") seq_length_x = 12 seq_length_y", "str): if isinstance(end_dates, str): return dataset.sel(date=slice(start_dates, end_dates)) else: raise ValueError(\"start_dates", "to end period (can have multiple discontinuos periods) :return: dataset", "= parser.parse_args() if os.path.exists(args.output_dir): reply = str(input(f'{args.output_dir} exists. Do you", "if it's a list of date ranges elif isinstance(start_dates, list)", "scale(x_data) x_trn_scl, _, _ = scale(x_trn, std=x_std, mean=x_mean) x_val_scl, _,", "0] / std_adj adj[adj != 0] = 1 / (1", "from __future__ import print_function from __future__ import unicode_literals import argparse", "for i in range(len(start_dates)): date_slice = slice(start_dates[i], end_dates[i]) data_list.append(dataset.sel(date=date_slice)) return", "seq_len=seq_len, offset=offset) # reshape data # after [nseq, seq_len, nseg,", "y_obs_trn, y_obs_val, y_obs_tst = separate_trn_tst( y_obs, train_start_date, train_end_date, val_start_date, val_end_date,", "xr.Dataset): std = dataset.std(skipna=True) mean = dataset.mean(skipna=True) # adding small", "data # after [nseq, seq_len, nseg, nfeat] #reshaped = reshape_for_training(batched)", "training :param data: training data (either x or y or", "std of zero scaled = (dataset - mean) / (std", "def reshape_for_training(data): \"\"\" reshape the data for training :param data:", "mean) / (std + 1e-10) check_if_finite(std) check_if_finite(mean) return scaled, std,", "set of date ranges :param dataset: [xr dataset] input or", "the data for training :param data: training data (either x", "training and testing data along with the means and standard", "adding small number in case there is a std of", "!= 'y': exit else: os.makedirs(args.output_dir) generate_train_val_test(args) ##### Reformat our inputs", "output data :param std: [xr dataset] standard deviation if scaling", "ndates]; after [nseg, ndates, nfeat] # this is the order", "data :return: [xr dataset] the observations in the same time", "normalize_y=False, ): \"\"\" prepare input and output data for DL", "[n_yrs, n_seg, len_seq, 2] \"\"\" ds_pre = xr.open_zarr(pretrain_file) x_data =", "\"y_train\": convert_batch_reshape(y_obs_trn, offset=offset, seq_len=seq_length, y=True, period=period), \"y_val\": convert_batch_reshape(y_obs_val, offset=offset, seq_len=seq_length,", "import pickle def scale(dataset, std=None, mean=None): \"\"\" scale the data", "or list] fmt: \"YYYY-MM-DD\"; date(s) to start validation period (can", "dictionary representing the exclude group from the exclude yml file", "default=\"data/METR-LA\", help=\"Output directory.\") parser.add_argument(\"--traffic_df_filename\", type=str, default=\"data/metr-la.h5\", help=\"Raw traffic readings.\",) parser.add_argument(\"--seq_length_x\",", "LAtest['y'].shape check = np.moveaxis(data['x_train'], [0,1,2,3], [0,2,1,3]) np.savez_compressed(os.path.join(out_file, 'pre_train.npz'), x=data['x_train'], y=data['y_pre_train'])", "\"\"\" start = exclude_grp.get(\"start_date\") if start: start = datetime.datetime.strptime(start, \"%Y-%m-%d\")", "adj_full = sort_dist_matrix(adj, adj_matrices[\"rowcolnames\"]) adj = adj_full[2] adj = np.where(np.isinf(adj),", "str, list, or tuple\") def separate_trn_tst( dataset, train_start_date, train_end_date, val_start_date,", "data :param std: [xr dataset] standard deviation if scaling test", "the values will be written :returns: training and testing data", "or 'flow'. This determines the order of the variables. :param", "# if it just one date range if isinstance(start_dates, str):", "of record of SNTemp [n_samples, seq_len, n_out] 'y_obs_trn': batched, scaled,", "= adj_matrices[dist_type] adj_full = sort_dist_matrix(adj, adj_matrices[\"rowcolnames\"]) adj = adj_full[2] adj", "!= 0])) I = np.eye(adj.shape[0]) A_hat = adj.copy() + I", "= [] for i in range(int(1 / offset)): start =", "assumes your training data is in one continuous block and", "seq_len=365, offset=1, y = False, period = np.nan): \"\"\" convert", "== len(end_dates): data_list = [] for i in range(len(start_dates)): date_slice", "\"ids_train\": coord_as_reshaped_array(x_trn, \"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_train\": coord_as_reshaped_array(x_trn, \"date\", offset=offset, seq_len=seq_length),", "batched = split_into_batches(arr, seq_len=seq_len, offset=offset) # reshape data # after", "[str] path to the distance matrix .npz file :param train_start_date:", "return data def prep_adj_matrix(infile, dist_type, out_file=None): \"\"\" process adj matrix.", "the means and standard deviations of the training input and", "have any weird input values check_if_finite(x_data) x_trn, x_val, x_tst =", "that should be used as input. If None, all of", "return row_col_names, sensor_id_to_ind, df #check = prep_adj_matrix('../../gits/river-dl/DRB_data/distance_matrix.npz', 'upstream', 'data/DRB_gwn_full/adj_mx') #if", "exclude_grp: [dict] dictionary representing the exclude group from the exclude", "coord_name, seq_len=365, offset=1): # I need one variable name. It", "\"YYYY-MM-DD\"; date(s) to start test period (can have multiple discontinuos", "offset=offset, seq_len=seq_length), \"x_std\": x_std.to_array().values, \"x_mean\": x_mean.to_array().values, \"x_cols\": np.array(x_vars), \"ids_train\": coord_as_reshaped_array(x_trn,", "x=data['x_val'], y=data['y_obs_val'], ) ''' np.savez_compressed(os.path.join(out_file,'data.npz'), **data) return data def prep_adj_matrix(infile,", "periods) :param test_end_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to", "period [n_yrs, n_seg, len_seq, n_out] 'dates_ids_trn: batched dates and national", "def scale(dataset, std=None, mean=None): \"\"\" scale the data so it", "testing data are scaled to have a std of 1", "adj_matrices = np.load(infile) adj = adj_matrices[dist_type] adj_full = sort_dist_matrix(adj, adj_matrices[\"rowcolnames\"])", "= sel_partition_data(dataset, val_start_date, val_end_date) test = sel_partition_data(dataset, test_start_date, test_end_date) return", "== \"__main__\": check2 = prep_data(obs_temper_file='../../gits/river-dl/DRB_data/obs_temp_full', obs_flow_file='../../gits/river-dl/DRB_data/obs_flow_full', pretrain_file='../../gits/river-dl/DRB_data/uncal_sntemp_input_output', train_start_date=['1985-10-01', '2016-10-01'], train_end_date=['2006-09-30',", "exclude group :param exclude_grp: [dict] dictionary representing the exclude group", "mean=None): \"\"\" scale the data so it has a standard", "combined = np.asarray(combined) return combined def read_multiple_obs(obs_files, x_data): \"\"\" read", "return it as is if dataset.date.size == 0: return arr", "sequences (i.e., 365) :param offset: [float] 0-1, how to offset", "pretrain_file='../../gits/river-dl/DRB_data/uncal_sntemp_input_output', train_start_date=['1985-10-01', '2016-10-01'], train_end_date=['2006-09-30', '2020-09-30'], val_start_date='2006-10-01', val_end_date='2016-09-30', test_start_date=['1980-10-01', '2020-10-01'], test_end_date=['1985-09-30',", "= 12 seq_length_y = 12 y_start = 1 LAtrain =", ":param dataset: [xr dataset] input or output data with date", "12 y_start = 1 LAtrain = np.load('data/METR-LA/train.npz') LAtest = np.load('data/METR-LA/test.npz')", "offset=offset) # reshape data # after [nseq, seq_len, nseg, nfeat]", "primary_variable: [str] which variable the model should focus on 'temp'", "mean_adj adj[adj != 0] = adj[adj != 0] / std_adj", ":param train_end_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to end", "val_start_date, val_end_date, test_start_date, test_end_date, ) x_scl, x_std, x_mean = scale(x_data)", ":param train_start_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to start", "[numpy array] processed adjacency matrix \"\"\" adj_matrices = np.load(infile) adj", "as np import os import pandas as pd import util", "get_exclude_start_end(exclude_grp): \"\"\" get the start and end dates for the", "datetime import pickle def scale(dataset, std=None, mean=None): \"\"\" scale the", "discontinuos periods) :param val_end_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s)", ") np.savez_compressed(os.path.join(out_file, 'test.npz'), x=data['x_test'], y=data['y_pre_test'], ) np.savez_compressed(os.path.join(out_file,'val.npz'), x=data['x_val'], y=data['y_pre_val'], )", "np.savez_compressed(os.path.join(out_file,'train.npz'), x=data['x_train'], y=data['y_pre_train'], ) np.savez_compressed(os.path.join(out_file, 'test.npz'), x=data['x_test'], y=data['y_pre_test'], ) np.savez_compressed(os.path.join(out_file,'val.npz'),", "train period (can have multiple discontinuos periods) :param train_end_date: [str", "seq_len=seq_length), \"dates_test\": coord_as_reshaped_array(x_tst, \"date\", offset=offset, seq_len=seq_length), \"y_pre_train\": convert_batch_reshape(y_pre_trn, offset=offset, seq_len=seq_length,", "df #check = prep_adj_matrix('../../gits/river-dl/DRB_data/distance_matrix.npz', 'upstream', 'data/DRB_gwn_full/adj_mx') #if __name__ == \"__main__\":", "model should focus on 'temp' or 'flow'. This determines the", "# first first_var = next(iter(dataset.data_vars.keys())) coord_array = xr.broadcast(dataset[coord_name], dataset[first_var])[0] new_var_name", "fmt: \"YYYY-MM-DD\"; date(s) to end period (can have multiple discontinuos", "adj[adj != 0] / std_adj adj[adj != 0] = 1", "data with dims [nseg, ndates, nfeat] :param seq_len: [int] length", "indices_or_sections=idx, axis=1) # add all but the first and last", "\"y_std\": y_std.to_array().values, \"y_mean\": y_mean.to_array().values, } if out_file: if os.path.isdir(out_file) ==", "the observations in the same time \"\"\" obs = [x_data.sortby([\"seg_id_nat\",", "type=str, default=\"data/metr-la.h5\", help=\"Raw traffic readings.\",) parser.add_argument(\"--seq_length_x\", type=int, default=12, help=\"Sequence Length.\",)", "read_multiple_obs([obs_temper_file, obs_flow_file], x_data) y_obs = y_obs[y_vars] y_pre = ds_pre[y_vars] y_obs_trn,", "from a date range or a set of date ranges", "len(start_dates) == len(end_dates): data_list = [] for i in range(len(start_dates)):", "np.savez_compressed(os.path.join(out_file, 'test.npz'), x=data['x_test'], y=data['y_obs_tst'], ) np.savez_compressed(os.path.join(out_file,'val.npz'), x=data['x_val'], y=data['y_obs_val'], ) '''", "dataset.sel(date=slice(start_dates, end_dates)) else: raise ValueError(\"start_dates is str but not end_date\")", "\"YYYY-MM-DD\"; date(s) to end test period (can have multiple discontinuos", "scaling test data with dims :param mean: [xr dataset] mean", "ds[\"site_id\"] obs = xr.merge(obs, join=\"left\") obs = obs[[\"temp_c\", \"discharge_cms\"]] obs", "mean=x_mean) x_val_scl, _, _ = scale(x_val, std=x_std, mean=x_mean) x_tst_scl, _,", "end test period (can have multiple discontinuos periods) \"\"\" train", "start validation period (can have multiple discontinuos periods) :param val_end_date:", "array] processed adjacency matrix \"\"\" adj_matrices = np.load(infile) adj =", "from the test data according to the start and end", "\"downstream\" or \"updown\") :param out_file: :return: [numpy array] processed adjacency", "seq_len, n_feat = data.shape return np.reshape(data, [n_batch * n_seg, seq_len,", "default=1, help=\"Y pred start\", ) parser.add_argument(\"--dow\", action='store_true',) args = parser.parse_args()", "dataset] mean if scaling test data with dims :return: scaled", "\"seg_humid\", \"seg_elev\"], y_vars=['seg_tave_water'], primary_variable='temp', seq_length=365, period=np.nan, offset=1, out_file = 'data/DRB_gwn_full')", "batched, scaled, and centered output observation data for the training", "return arr # before [nfeat, nseg, ndates]; after [nseg, ndates,", "df.sort_index(axis=1) sensor_id_to_ind = {} for i, sensor_id in enumerate(df.columns): sensor_id_to_ind[sensor_id]", "[str] temperature observations file (csv) :param obs_flow_file:[str] discharge observations file", "standard deviation if scaling test data with dims :param mean:", "''' np.savez_compressed(os.path.join(out_file, 'pre_train.npz'), x=data['x_train'], y=data['y_pre_train']) np.savez_compressed(os.path.join(out_file,'train.npz'), x=data['x_train'], y=data['y_obs_train'], ) np.savez_compressed(os.path.join(out_file,", "that are not in the training are in the testing.", "means and standard deviations of the training input and output", "matrix .npz file :param train_start_date: [str or list] fmt: \"YYYY-MM-DD\";", "[xr dataset] standard deviation if scaling test data with dims", "n_out] 'y_obs_trn': batched, scaled, and centered output observation data for", "end period (can have multiple discontinuos periods) :return: dataset of", "y_mean) else: _, y_std, y_mean = scale(y_obs_trn) data = {", "dims \"\"\" if not isinstance(std, xr.Dataset) or not isinstance(mean, xr.Dataset):", ") y_pre_trn, y_pre_val, y_pre_tst = separate_trn_tst( y_pre, train_start_date, train_end_date, val_start_date,", "train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, ) x_scl, x_std, x_mean", "convert_batch_reshape(x_tst_scl, offset=offset, seq_len=seq_length), \"x_std\": x_std.to_array().values, \"x_mean\": x_mean.to_array().values, \"x_cols\": np.array(x_vars), \"ids_train\":", "import yaml import xarray as xr import datetime import pickle", "(y/n)')).lower().strip() if reply[0] != 'y': exit else: os.makedirs(args.output_dir) generate_train_val_test(args) #####", "exclude_file: [str] path to exclude file :param log_q: [bool] whether", "obs = xr.merge(obs, join=\"left\") obs = obs[[\"temp_c\", \"discharge_cms\"]] obs =", "list] fmt: \"YYYY-MM-DD\"; date(s) to start validation period (can have", "batches with size of batch_size :param data_array: [numpy array] array", "std of 1 and a mean of zero :param obs_temper_file:", "[0,2,1,3]) if y & np.isfinite(period): reshaped = reshaped[:,-period:,...] return reshaped", "standard deviations of the training input and output data 'y_trn_pre':", "fmt: \"YYYY-MM-DD\"; date(s) to end train period (can have multiple", "overwrite it? (y/n)')).lower().strip() if reply[0] != 'y': exit else: os.makedirs(args.output_dir)", "distfile: [str] path to the distance matrix .npz file :param", "'dates_ids_trn: batched dates and national seg ids for training data", "coord_as_reshaped_array(x_val, \"date\", offset=offset, seq_len=seq_length), \"ids_test\": coord_as_reshaped_array(x_tst, \"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_test\":", "scale(y_obs_trn) y_pre_trn, _, _ = scale(y_pre_trn, y_std, y_mean) else: _,", "else: raise ValueError(\"start_dates and end_dates must have same length\") else:", "= 1 / (1 + np.exp(-adj[adj != 0])) I =", "import os.path import pandas as pd import numpy as np", "input and output data 'y_trn_pre': batched, scaled, and centered output", "the model should focus on 'temp' or 'flow'. This determines", "'data/DRB_gwn_full') '''f __name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"--output_dir\", type=str,", "train = sel_partition_data(dataset, train_start_date, train_end_date) val = sel_partition_data(dataset, val_start_date, val_end_date)", "list] fmt: \"YYYY-MM-DD\"; date(s) to start period (can have multiple", "dataset] input or output data :param std: [xr dataset] standard", "start, end def convert_batch_reshape(dataset, seq_len=365, offset=1, y = False, period", "(std + 1e-10) check_if_finite(std) check_if_finite(mean) return scaled, std, mean def", "'y_trn_pre': batched, scaled, and centered output data for entire period", "[str] path to exclude file :param log_q: [bool] whether or", "expects arr = np.moveaxis(arr, 0, -1) # batch the data", "batches (e.g., 0.5 means that the first batch will be", "the data # after [nbatch, nseg, seq_len, nfeat] batched =", ":param primary_variable: [str] which variable the model should focus on", "1 LAtrain = np.load('data/METR-LA/train.npz') LAtest = np.load('data/METR-LA/test.npz') LAval = np.load('data/METR-LA/val.npz')", "numpy as np import os import pandas as pd import", "nfeat] # this is the order that the split into", "you want to overwrite it? (y/n)')).lower().strip() if reply[0] != 'y':", "y_obs_val, y_obs_tst = separate_trn_tst( y_obs, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date,", "observations in the same time \"\"\" obs = [x_data.sortby([\"seg_id_nat\", \"date\"])]", "np.isfinite(xarr.to_array().values).all() def prep_data( obs_temper_file, obs_flow_file, pretrain_file, #distfile, train_start_date, train_end_date, val_start_date,", "scaled to have a std of 1 and a mean", "train_end_date=['2006-09-30', '2020-09-30'], val_start_date='2006-10-01', val_end_date='2016-09-30', test_start_date=['1980-10-01', '2020-10-01'], test_end_date=['1985-09-30', '2021-09-30'], x_vars=[\"seg_rain\", \"seg_tave_air\",", ":returns: training and testing data along with the means and", "of zero scaled = (dataset - mean) / (std +", "as is if dataset.date.size == 0: return arr # before", "into numpy array, swap the axes, batch the array and", "1, step=seq_len) split = np.split(data_array, indices_or_sections=idx, axis=1) # add all", "test_start_date, test_end_date, ) if normalize_y: # scale y training data", "= (dataset - mean) / (std + 1e-10) check_if_finite(std) check_if_finite(mean)", "the order of the variables. :param catch_prop_file: [str] the path", "datetime.datetime.strptime(end, \"%Y-%m-%d\") return start, end def convert_batch_reshape(dataset, seq_len=365, offset=1, y", ".npz file :param train_start_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s)", "with dims :return: scaled data with original dims \"\"\" if", "[str] file to where the values will be written :returns:", "period=period), \"y_train\": convert_batch_reshape(y_obs_trn, offset=offset, seq_len=seq_length, y=True, period=period), \"y_val\": convert_batch_reshape(y_obs_val, offset=offset,", "check2 = prep_data(obs_temper_file='../../gits/river-dl/DRB_data/obs_temp_full', obs_flow_file='../../gits/river-dl/DRB_data/obs_flow_full', pretrain_file='../../gits/river-dl/DRB_data/uncal_sntemp_input_output', train_start_date=['1985-10-01', '2016-10-01'], train_end_date=['2006-09-30', '2020-09-30'], val_start_date='2006-10-01',", "input or output data with dims :param train_start_date: [str or", "y_pre_val, y_pre_tst = separate_trn_tst( y_pre, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date,", "deviation if scaling test data with dims :param mean: [xr", "file :param train_start_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to", "the variables will be used :param primary_variable: [str] which variable", "period=period), \"y_std\": y_std.to_array().values, \"y_mean\": y_mean.to_array().values, } if out_file: if os.path.isdir(out_file)", "= pd.read_hdf(\"data/metr-la.h5\") seq_length_x = 12 seq_length_y = 12 y_start =", "have multiple discontinuos periods) :return: dataset of just those dates", "separate_trn_tst( x_data, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, ) x_scl,", "to the catchment properties file. If left unfilled, the catchment", "offset=offset, seq_len=seq_length), \"dates_train\": coord_as_reshaped_array(x_trn, \"date\", offset=offset, seq_len=seq_length), \"ids_val\": coord_as_reshaped_array(x_val, \"seg_id_nat\",", "data 'y_trn_pre': batched, scaled, and centered output data for entire", "coord_array reshaped_np_arr = convert_batch_reshape( dataset[[new_var_name]], seq_len=seq_len, offset=offset ) return reshaped_np_arr", "std_adj adj[adj != 0] = 1 / (1 + np.exp(-adj[adj", "coord_as_reshaped_array(x_trn, \"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_train\": coord_as_reshaped_array(x_trn, \"date\", offset=offset, seq_len=seq_length), \"ids_val\":", "x_std.to_array().values, \"x_mean\": x_mean.to_array().values, \"x_cols\": np.array(x_vars), \"ids_train\": coord_as_reshaped_array(x_trn, \"seg_id_nat\", offset=offset, seq_len=seq_length),", ":param data: training data (either x or y or mask)", "seg ids for training data [n_samples, seq_len, 2] 'dates_ids_tst: un-batched", "array] array of training data with dims [nseg, ndates, nfeat]", "adj_full[2] adj = np.where(np.isinf(adj), 0, adj) adj = -adj mean_adj", "seq_len=seq_length, y=True, period=period), \"y_train\": convert_batch_reshape(y_obs_trn, offset=offset, seq_len=seq_length, y=True, period=period), \"y_val\":", "y=True, period=period), \"y_test\": convert_batch_reshape(y_obs_tst, offset=offset, seq_len=seq_length, y=True, period=period), \"y_vars\": np.array(y_vars),", "adj_full[1], A_hat] with open(out_file+'.pkl', 'wb') as f: pickle.dump(out_dm, f, protocol=2)", "xr.dataset to numpy array dataset = dataset.transpose(\"seg_id_nat\", \"date\") arr =", "periods) :param x_vars: [list] variables that should be used as", "return start, end def convert_batch_reshape(dataset, seq_len=365, offset=1, y = False,", "scale(y_pre_trn, y_std, y_mean) else: _, y_std, y_mean = scale(y_obs_trn) data", "must have same length\") else: raise ValueError(\"start_dates must be either", "= np.split(data_array, indices_or_sections=idx, axis=1) # add all but the first", "): \"\"\" separate the train data from the test data", "dataset[[new_var_name]], seq_len=seq_len, offset=offset ) return reshaped_np_arr def check_if_finite(xarr): assert np.isfinite(xarr.to_array().values).all()", ":param exclude_file: [str] path to exclude file :param log_q: [bool]", "offset * seq_len) idx = np.arange(start=start, stop=data_array.shape[1] + 1, step=seq_len)", "y=True, period=period), \"y_std\": y_std.to_array().values, \"y_mean\": y_mean.to_array().values, } if out_file: if", "periods) :param test_start_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to", "test data with dims :return: scaled data with original dims", "list] fmt: \"YYYY-MM-DD\"; date(s) to start train period (can have", "centered output observation data for the training period 'y_trn_obs_std': standard", "ds_pre[x_vars] # make sure we don't have any weird input", "start test period (can have multiple discontinuos periods) :param test_end_date:", "combined.extend([s for s in split if s.shape[1] == seq_len]) combined", "indexing. :param obs_files: [list] list of filenames of observation files", "LAtrain = np.load('data/METR-LA/train.npz') LAtest = np.load('data/METR-LA/test.npz') LAval = np.load('data/METR-LA/val.npz') LAtrain['x'].shape", "whether or not to take the log of discharge in", "nseg, nfeat] #reshaped = reshape_for_training(batched) reshaped = np.moveaxis(batched, [0,1,2,3], [0,2,1,3])", "[str] which variable the model should focus on 'temp' or", "the test data according to the start and end dates.", "return reshaped def coord_as_reshaped_array(dataset, coord_name, seq_len=365, offset=1): # I need", "return xr.concat(data_list, dim=\"date\") else: raise ValueError(\"start_dates and end_dates must have", "the dates that are not in the training are in", "np.moveaxis(arr, 0, -1) # batch the data # after [nbatch,", "std=None, mean=None): \"\"\" scale the data so it has a", "'y': exit else: os.makedirs(args.output_dir) generate_train_val_test(args) ##### Reformat our inputs to", "# add all but the first and last batch since", "for training :param dataset: [xr dataset] data to be batched", "offset=1): # I need one variable name. It can be", "national seg ids for testing data [n_yrs, n_seg, len_seq, 2]", "[xr dataset] input or output data with date dimension :param", "periods) :param end_dates: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to", "seq_len=seq_length), \"x_test\": convert_batch_reshape(x_tst_scl, offset=offset, seq_len=seq_length), \"x_std\": x_std.to_array().values, \"x_mean\": x_mean.to_array().values, \"x_cols\":", "pretrain_file, #distfile, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, x_vars=None, y_vars=", ":param test_start_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to start", "of distance matrix (\"upstream\", \"downstream\" or \"updown\") :param out_file: :return:", "but I'll use the # first first_var = next(iter(dataset.data_vars.keys())) coord_array", "(i.e., 365) :param offset: [float] 0-1, how to offset the", "or list] fmt: \"YYYY-MM-DD\"; date(s) to end train period (can", "y_obs_tst = separate_trn_tst( y_obs, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date,", "/ offset)): start = int(i * offset * seq_len) idx", "out_file: [str] file to where the values will be written", "y_pre_trn, _, _ = scale(y_pre_trn, y_std, y_mean) else: _, y_std,", "!= 0] / std_adj adj[adj != 0] = 1 /", "end: end = datetime.datetime.strptime(end, \"%Y-%m-%d\") return start, end def convert_batch_reshape(dataset,", "np.nan): \"\"\" convert xarray dataset into numpy array, swap the", "dates \"\"\" # if it just one date range if", "val_start_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to start validation", "are scaled to have a std of 1 and a", "D = np.sum(A_hat, axis=1) D_inv = D ** -1.0 D_inv", "help=\"Y pred start\", ) parser.add_argument(\"--dow\", action='store_true',) args = parser.parse_args() if", "182-547) :return: [numpy array] batched and reshaped dataset \"\"\" #", "variables that should be used as input. If None, all", "= reshape_for_training(batched) reshaped = np.moveaxis(batched, [0,1,2,3], [0,2,1,3]) if y &", "nfeat] #reshaped = reshape_for_training(batched) reshaped = np.moveaxis(batched, [0,1,2,3], [0,2,1,3]) if", "adj[adj != 0] = 1 / (1 + np.exp(-adj[adj !=", "sensor_id in enumerate(df.columns): sensor_id_to_ind[sensor_id] = i return row_col_names, sensor_id_to_ind, df", "for the training period 'y_trn_obs_std': standard deviation of the y", "type=int, default=12, help=\"Sequence Length.\",) parser.add_argument(\"--seq_length_y\", type=int, default=12, help=\"Sequence Length.\",) parser.add_argument(\"--y_start\",", "\"discharge_cms\"]] obs = obs.rename( {\"temp_c\": \"seg_tave_water\", \"discharge_cms\": \"seg_outflow\"} ) return", "ds = xr.open_zarr(filename) obs.append(ds) if \"site_id\" in ds.variables: del ds[\"site_id\"]", "coord_name + \"1\" dataset[new_var_name] = coord_array reshaped_np_arr = convert_batch_reshape( dataset[[new_var_name]],", "\"ids_test\": coord_as_reshaped_array(x_tst, \"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_test\": coord_as_reshaped_array(x_tst, \"date\", offset=offset, seq_len=seq_length),", "fmt: \"YYYY-MM-DD\"; date(s) to start train period (can have multiple", "raise ValueError(\"start_dates is str but not end_date\") # if it's", "[n_batch * n_seg, seq_len, n_feat]) def get_exclude_start_end(exclude_grp): \"\"\" get the", "def get_exclude_start_end(exclude_grp): \"\"\" get the start and end dates for", "the test period [n_yrs, n_seg, len_seq, n_out] 'dates_ids_trn: batched dates", "y_obs, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, ) y_pre_trn, y_pre_val,", "list] fmt: \"YYYY-MM-DD\"; date(s) to end period (can have multiple", "mean=x_mean) x_tst_scl, _, _ = scale(x_tst, std=x_std, mean=x_mean) y_obs =", "parser.add_argument(\"--dow\", action='store_true',) args = parser.parse_args() if os.path.exists(args.output_dir): reply = str(input(f'{args.output_dir}", "y_pre_tst = separate_trn_tst( y_pre, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date,", "and centered output observation data for the training period 'y_trn_obs_std':", "__future__ import unicode_literals import argparse import numpy as np import", "testing data [n_yrs, n_seg, len_seq, 2] \"\"\" ds_pre = xr.open_zarr(pretrain_file)", "parser.add_argument(\"--output_dir\", type=str, default=\"data/METR-LA\", help=\"Output directory.\") parser.add_argument(\"--traffic_df_filename\", type=str, default=\"data/metr-la.h5\", help=\"Raw traffic", "[nfeat, nseg, ndates]; after [nseg, ndates, nfeat] # this is", "batched :param seq_len: [int] length of sequences (i.e., 365) :param", "the y observations training data [n_out] 'y_trn_obs_mean': mean of the", "[str] type of distance matrix (\"upstream\", \"downstream\" or \"updown\") :param", "[numpy array] batched and reshaped dataset \"\"\" # convert xr.dataset", "size of batch_size :param data_array: [numpy array] array of training", "[int] length of sequences (i.e., 365) :param offset: [float] 0-1,", "of filenames of observation files :param pre_train_file: [str] the file", "protocol=2) return adj_full[0], adj_full[1], A_hat def sort_dist_matrix(mat, row_col_names): \"\"\" sort", "data into batches with size of batch_size :param data_array: [numpy", "start period (can have multiple discontinuos periods) :param end_dates: [str", "data: training data (either x or y or mask) dims:", "yml file :return: [tuple of datetime objects] start date, end", "I'll use the # first first_var = next(iter(dataset.data_vars.keys())) coord_array =", "seq_len=seq_length, y=True, period=period), \"y_std\": y_std.to_array().values, \"y_mean\": y_mean.to_array().values, } if out_file:", "train_start_date, train_end_date) val = sel_partition_data(dataset, val_start_date, val_end_date) test = sel_partition_data(dataset,", "and reshape for training :param dataset: [xr dataset] data to", "'period': np.array([period]), 'y_pre_train_val': convert_batch_reshape(y_pre_val, offset=offset, seq_len=seq_length, y=True, period=period), 'y_pre_train_test': convert_batch_reshape(y_pre_tst,", "y_std.to_array().values, \"y_mean\": y_mean.to_array().values, } if out_file: if os.path.isdir(out_file) == False:", "either str, list, or tuple\") def separate_trn_tst( dataset, train_start_date, train_end_date,", "date(s) to start test period (can have multiple discontinuos periods)", "training :param out_file: [str] file to where the values will", "n_seg, len_seq, n_out] 'dates_ids_trn: batched dates and national seg ids", "y_obs = read_multiple_obs([obs_temper_file, obs_flow_file], x_data) y_obs = y_obs[y_vars] y_pre =", "convert_batch_reshape(x_val_scl, offset=offset, seq_len=seq_length), \"x_test\": convert_batch_reshape(x_tst_scl, offset=offset, seq_len=seq_length), \"x_std\": x_std.to_array().values, \"x_mean\":", "= np.load(infile) adj = adj_matrices[dist_type] adj_full = sort_dist_matrix(adj, adj_matrices[\"rowcolnames\"]) adj", "we don't have any weird input values check_if_finite(x_data) x_trn, x_val,", "import numpy as np import yaml import xarray as xr", "should be used as input. If None, all of the", "of SNTemp [n_samples, seq_len, n_out] 'y_obs_trn': batched, scaled, and centered", "help=\"Raw traffic readings.\",) parser.add_argument(\"--seq_length_x\", type=int, default=12, help=\"Sequence Length.\",) parser.add_argument(\"--seq_length_y\", type=int,", "file to where the values will be written :returns: training", "* offset * seq_len) idx = np.arange(start=start, stop=data_array.shape[1] + 1,", "have a std of 1 and a mean of zero", "= prep_data(obs_temper_file='../../gits/river-dl/DRB_data/obs_temp_full', obs_flow_file='../../gits/river-dl/DRB_data/obs_flow_full', pretrain_file='../../gits/river-dl/DRB_data/uncal_sntemp_input_output', train_start_date=['1985-10-01', '2016-10-01'], train_end_date=['2006-09-30', '2020-09-30'], val_start_date='2006-10-01', val_end_date='2016-09-30',", "\"\"\" process adj matrix. **The resulting matrix is sorted by", "obs_files: [list] list of filenames of observation files :param pre_train_file:", "get the start and end dates for the exclude group", "xr import datetime import pickle def scale(dataset, std=None, mean=None): \"\"\"", "axis=1) D_inv = D ** -1.0 D_inv = np.diag(D_inv) A_hat", "arr # before [nfeat, nseg, ndates]; after [nseg, ndates, nfeat]", "same time \"\"\" obs = [x_data.sortby([\"seg_id_nat\", \"date\"])] for filename in", "data with dims :param train_start_date: [str or list] fmt: \"YYYY-MM-DD\";", "of pre_training data :return: [xr dataset] the observations in the", "import pandas as pd import numpy as np import yaml", "from __future__ import unicode_literals import argparse import numpy as np", "sel_partition_data(dataset, train_start_date, train_end_date) val = sel_partition_data(dataset, val_start_date, val_end_date) test =", "f: pickle.dump(out_dm, f, protocol=2) return adj_full[0], adj_full[1], A_hat def sort_dist_matrix(mat,", "\"\"\" select the data from a date range or a", "have multiple discontinuos periods) :param test_end_date: [str or list] fmt:", "\"date\"])] for filename in obs_files: ds = xr.open_zarr(filename) obs.append(ds) if", "or list] fmt: \"YYYY-MM-DD\"; date(s) to start test period (can", "{} for i, sensor_id in enumerate(df.columns): sensor_id_to_ind[sensor_id] = i return", "= 'data/DRB_gwn_full') '''f __name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"--output_dir\",", "'temp' or 'flow'. This determines the order of the variables.", "val_start_date, val_end_date, test_start_date, test_end_date, ) y_pre_trn, y_pre_val, y_pre_tst = separate_trn_tst(", "yaml import xarray as xr import datetime import pickle def", "list of date ranges elif isinstance(start_dates, list) or isinstance(start_dates, tuple):", "make sure we don't have any weird input values check_if_finite(x_data)", "the training are in the testing. :param dataset: [xr dataset]", "x_data) y_obs = y_obs[y_vars] y_pre = ds_pre[y_vars] y_obs_trn, y_obs_val, y_obs_tst", "import print_function from __future__ import unicode_literals import argparse import numpy", ":return: reshaped data [nbatch * nseg, len_seq, nfeat/nout] \"\"\" n_batch,", "in the dataset, but I'll use the # first first_var", "in the same time \"\"\" obs = [x_data.sortby([\"seg_id_nat\", \"date\"])] for", "sure we don't have any weird input values check_if_finite(x_data) x_trn,", "all the dates that are not in the training are", "y=data['y_pre_train'], ) np.savez_compressed(os.path.join(out_file, 'test.npz'), x=data['x_test'], y=data['y_pre_test'], ) np.savez_compressed(os.path.join(out_file,'val.npz'), x=data['x_val'], y=data['y_pre_val'],", "batch the array and reshape for training :param dataset: [xr", "mean of the observation training data [n_out] 'y_obs_tst': un-batched, unscaled,", "_ = scale(x_tst, std=x_std, mean=x_mean) y_obs = read_multiple_obs([obs_temper_file, obs_flow_file], x_data)", "#reshaped = reshape_for_training(batched) reshaped = np.moveaxis(batched, [0,1,2,3], [0,2,1,3]) if y", "get the mean and std y_obs_trn, y_std, y_mean = scale(y_obs_trn)", "for the exclude group :param exclude_grp: [dict] dictionary representing the", "properties file. If left unfilled, the catchment properties will not", "are not in the training are in the testing. :param", "adjacency matrix \"\"\" adj_matrices = np.load(infile) adj = adj_matrices[dist_type] adj_full", "= i return row_col_names, sensor_id_to_ind, df #check = prep_adj_matrix('../../gits/river-dl/DRB_data/distance_matrix.npz', 'upstream',", "adj_full[1], A_hat def sort_dist_matrix(mat, row_col_names): \"\"\" sort the distance matrix", "dataset.mean(skipna=True) # adding small number in case there is a", "observation data for the test period [n_yrs, n_seg, len_seq, n_out]", "training read in and process data into training and testing", "\"seg_tave_water\", \"discharge_cms\": \"seg_outflow\"} ) return obs def reshape_for_training(data): \"\"\" reshape", "offset=offset, seq_len=seq_length, y=True, period=period), \"y_test\": convert_batch_reshape(y_obs_tst, offset=offset, seq_len=seq_length, y=True, period=period),", "exit else: os.makedirs(args.output_dir) generate_train_val_test(args) ##### Reformat our inputs to match", "= [x_data.sortby([\"seg_id_nat\", \"date\"])] for filename in obs_files: ds = xr.open_zarr(filename)", "= adj[adj != 0] / std_adj adj[adj != 0] =", "discontinuos periods) :return: dataset of just those dates \"\"\" #", "have same length\") else: raise ValueError(\"start_dates must be either str,", "+ np.exp(-adj[adj != 0])) I = np.eye(adj.shape[0]) A_hat = adj.copy()", "return adj_full[0], adj_full[1], A_hat def sort_dist_matrix(mat, row_col_names): \"\"\" sort the", "sensor_id_to_ind = {} for i, sensor_id in enumerate(df.columns): sensor_id_to_ind[sensor_id] =", "split_into_batches(data_array, seq_len=365, offset=1): \"\"\" split training data into batches with", "'y_pre_train_test': convert_batch_reshape(y_pre_tst, offset=offset, seq_len=seq_length, y=True, period=period), \"y_std\": y_std.to_array().values, \"y_mean\": y_mean.to_array().values,", "period=period), 'y_pre_train_test': convert_batch_reshape(y_pre_tst, offset=offset, seq_len=seq_length, y=True, period=period), \"y_std\": y_std.to_array().values, \"y_mean\":", "[str or list] fmt: \"YYYY-MM-DD\"; date(s) to start validation period", "[xr dataset] input or output data with dims :param train_start_date:", "array and reshape for training :param dataset: [xr dataset] data", "train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, ) if normalize_y: #", "seq_len=seq_length, y=True, period=period), 'y_pre_train_test': convert_batch_reshape(y_pre_tst, offset=offset, seq_len=seq_length, y=True, period=period), \"y_std\":", "= 365, offset = 1, period = None, primary_variable=\"temp\", #catch_prop_file=None,", "\"\"\" separate the train data from the test data according", "LAtest = np.load('data/METR-LA/test.npz') LAval = np.load('data/METR-LA/val.npz') LAtrain['x'].shape LAtrain['y'].shape LAtest['x'].shape LAtest['y'].shape", "pandas as pd import util import os.path import pandas as", "batched data with dims [nbatches, nseg, seq_len (batch_size), nfeat] \"\"\"", "sel_partition_data(dataset, start_dates, end_dates): \"\"\" select the data from a date", "Length.\",) parser.add_argument(\"--seq_length_y\", type=int, default=12, help=\"Sequence Length.\",) parser.add_argument(\"--y_start\", type=int, default=1, help=\"Y", "!= 0] = adj[adj != 0] - mean_adj adj[adj !=", "determines the order of the variables. :param catch_prop_file: [str] the", "out_file: :return: [numpy array] processed adjacency matrix \"\"\" adj_matrices =", "to the distance matrix .npz file :param train_start_date: [str or", "period = np.nan): \"\"\" convert xarray dataset into numpy array,", "2] \"\"\" ds_pre = xr.open_zarr(pretrain_file) x_data = ds_pre[x_vars] # make", "dates for the exclude group :param exclude_grp: [dict] dictionary representing", "mean if scaling test data with dims :return: scaled data", "if end: end = datetime.datetime.strptime(end, \"%Y-%m-%d\") return start, end def", "[nseg, ndates, nfeat] :param seq_len: [int] length of sequences (i.e.,", "__name__ == \"__main__\": check2 = prep_data(obs_temper_file='../../gits/river-dl/DRB_data/obs_temp_full', obs_flow_file='../../gits/river-dl/DRB_data/obs_flow_full', pretrain_file='../../gits/river-dl/DRB_data/uncal_sntemp_input_output', train_start_date=['1985-10-01', '2016-10-01'],", ":param obs_temper_file: [str] temperature observations file (csv) :param obs_flow_file:[str] discharge", "path to the catchment properties file. If left unfilled, the", "just one date range if isinstance(start_dates, str): if isinstance(end_dates, str):", "split = np.split(data_array, indices_or_sections=idx, axis=1) # add all but the", "A_hat = adj.copy() + I D = np.sum(A_hat, axis=1) D_inv", "if normalize_y: # scale y training data and get the", "dataset.date.size == 0: return arr # before [nfeat, nseg, ndates];", "data for entire period of record of SNTemp [n_samples, seq_len,", "'y_obs_tst': un-batched, unscaled, uncentered observation data for the test period", "**The resulting matrix is sorted by seg_id_nat ** :param infile:", "distance matrix .npz file :param train_start_date: [str or list] fmt:", "'y_obs_trn': batched, scaled, and centered output observation data for the", "[nbatch, nseg, len_seq, nfeat/nout] :return: reshaped data [nbatch * nseg,", "test_start_date, test_end_date, x_vars=None, y_vars= [\"seg_tave_water\", \"seg_outflow\"], seq_length = 365, offset", "have multiple discontinuos periods) :param x_vars: [list] variables that should", "offset=offset, seq_len=seq_length), \"ids_test\": coord_as_reshaped_array(x_tst, \"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_test\": coord_as_reshaped_array(x_tst, \"date\",", "coord_as_reshaped_array(x_trn, \"date\", offset=offset, seq_len=seq_length), \"ids_val\": coord_as_reshaped_array(x_val, \"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_val\":", "if os.path.exists(args.output_dir): reply = str(input(f'{args.output_dir} exists. Do you want to", "ndates, nfeat] :param seq_len: [int] length of sequences (i.e., 365)", "or output data with date dimension :param start_dates: [str or", "val = sel_partition_data(dataset, val_start_date, val_end_date) test = sel_partition_data(dataset, test_start_date, test_end_date)", "obs_flow_file:[str] discharge observations file (csv) :param pretrain_file: [str] the file", "train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, ) y_pre_trn, y_pre_val, y_pre_tst =", "fmt: \"YYYY-MM-DD\"; date(s) to start validation period (can have multiple", "pretrain data to make sure we have the same indexing.", "axis=1) # add all but the first and last batch", "from __future__ import division from __future__ import print_function from __future__", "[list] variables that should be used as input. If None,", "(1 + np.exp(-adj[adj != 0])) I = np.eye(adj.shape[0]) A_hat =", "the array and reshape for training :param dataset: [xr dataset]", "split_into_batches(arr, seq_len=seq_len, offset=offset) # reshape data # after [nseq, seq_len,", "os.makedirs(args.output_dir) generate_train_val_test(args) ##### Reformat our inputs to match theirs. df", "n_out] 'dates_ids_trn: batched dates and national seg ids for training", "val_end_date, test_start_date, test_end_date, ) x_scl, x_std, x_mean = scale(x_data) x_trn_scl,", "_ = scale(x_trn, std=x_std, mean=x_mean) x_val_scl, _, _ = scale(x_val,", "training and testing datasets. the training and testing data are", "if reply[0] != 'y': exit else: os.makedirs(args.output_dir) generate_train_val_test(args) ##### Reformat", "train_end_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to end train", "batched and reshaped dataset \"\"\" # convert xr.dataset to numpy", "traffic readings.\",) parser.add_argument(\"--seq_length_x\", type=int, default=12, help=\"Sequence Length.\",) parser.add_argument(\"--seq_length_y\", type=int, default=12,", ":param catch_prop_file: [str] the path to the catchment properties file.", "\"x_val\": convert_batch_reshape(x_val_scl, offset=offset, seq_len=seq_length), \"x_test\": convert_batch_reshape(x_tst_scl, offset=offset, seq_len=seq_length), \"x_std\": x_std.to_array().values,", "x=data['x_train'], y=data['y_obs_train'], ) np.savez_compressed(os.path.join(out_file, 'test.npz'), x=data['x_test'], y=data['y_obs_tst'], ) np.savez_compressed(os.path.join(out_file,'val.npz'), x=data['x_val'],", "have multiple discontinuos periods) \"\"\" train = sel_partition_data(dataset, train_start_date, train_end_date)", "# reshape data # after [nseq, seq_len, nseg, nfeat] #reshaped", "will be 182-547) :return: [numpy array] batched data with dims", "for i, sensor_id in enumerate(df.columns): sensor_id_to_ind[sensor_id] = i return row_col_names,", "D_inv = D ** -1.0 D_inv = np.diag(D_inv) A_hat =", "readings.\",) parser.add_argument(\"--seq_length_x\", type=int, default=12, help=\"Sequence Length.\",) parser.add_argument(\"--seq_length_y\", type=int, default=12, help=\"Sequence", "import xarray as xr import datetime import pickle def scale(dataset,", "and end dates. This assumes your training data is in", "= dataset.to_array().values # if the dataset is empty, just return", "+ \"1\" dataset[new_var_name] = coord_array reshaped_np_arr = convert_batch_reshape( dataset[[new_var_name]], seq_len=seq_len,", "convert_batch_reshape(x_trn_scl, offset=offset, seq_len=seq_length), \"x_val\": convert_batch_reshape(x_val_scl, offset=offset, seq_len=seq_length), \"x_test\": convert_batch_reshape(x_tst_scl, offset=offset,", "y_std, y_mean) else: _, y_std, y_mean = scale(y_obs_trn) data =", "std=x_std, mean=x_mean) x_tst_scl, _, _ = scale(x_tst, std=x_std, mean=x_mean) y_obs", "= np.moveaxis(data['x_train'], [0,1,2,3], [0,2,1,3]) np.savez_compressed(os.path.join(out_file, 'pre_train.npz'), x=data['x_train'], y=data['y_pre_train']) np.savez_compressed(os.path.join(out_file,'train.npz'), x=data['x_train'],", "= adj_full[2] adj = np.where(np.isinf(adj), 0, adj) adj = -adj", "= scale(x_tst, std=x_std, mean=x_mean) y_obs = read_multiple_obs([obs_temper_file, obs_flow_file], x_data) y_obs", "\"date\", offset=offset, seq_len=seq_length), \"ids_test\": coord_as_reshaped_array(x_tst, \"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_test\": coord_as_reshaped_array(x_tst,", "in split if s.shape[1] == seq_len]) combined = np.asarray(combined) return", "df.sort_index(axis=0) df = df.sort_index(axis=1) sensor_id_to_ind = {} for i, sensor_id", "seq_len=seq_length), \"x_val\": convert_batch_reshape(x_val_scl, offset=offset, seq_len=seq_length), \"x_test\": convert_batch_reshape(x_tst_scl, offset=offset, seq_len=seq_length), \"x_std\":", "(SNTemp data) :param distfile: [str] path to the distance matrix", "isinstance(start_dates, str): if isinstance(end_dates, str): return dataset.sel(date=slice(start_dates, end_dates)) else: raise", "have the same indexing. :param obs_files: [list] list of filenames", "\"\"\" get the start and end dates for the exclude", "the exclude group :param exclude_grp: [dict] dictionary representing the exclude", "a mean of zero :param obs_temper_file: [str] temperature observations file", "or not isinstance(mean, xr.Dataset): std = dataset.std(skipna=True) mean = dataset.mean(skipna=True)", "import pandas as pd import util import os.path import pandas", "np.eye(adj.shape[0]) A_hat = adj.copy() + I D = np.sum(A_hat, axis=1)", "[nseg, ndates, nfeat] # this is the order that the", "input or output data :param std: [xr dataset] standard deviation", "reshaped = np.moveaxis(batched, [0,1,2,3], [0,2,1,3]) if y & np.isfinite(period): reshaped", "np.matmul(D_inv, A_hat) if out_file: out_dm = [adj_full[0], adj_full[1], A_hat] with", "def sel_partition_data(dataset, start_dates, end_dates): \"\"\" select the data from a", "= obs[[\"temp_c\", \"discharge_cms\"]] obs = obs.rename( {\"temp_c\": \"seg_tave_water\", \"discharge_cms\": \"seg_outflow\"}", "* nseg, len_seq, nfeat/nout] \"\"\" n_batch, n_seg, seq_len, n_feat =", "used :param primary_variable: [str] which variable the model should focus", "If left unfilled, the catchment properties will not be included", "out_file = 'data/DRB_gwn_full') '''f __name__ == \"__main__\": parser = argparse.ArgumentParser()", "def convert_batch_reshape(dataset, seq_len=365, offset=1, y = False, period = np.nan):", "in the training are in the testing. :param dataset: [xr", "std = dataset.std(skipna=True) mean = dataset.mean(skipna=True) # adding small number", "x_vars: [list] variables that should be used as input. If", "have multiple discontinuos periods) :param val_end_date: [str or list] fmt:", ") x_scl, x_std, x_mean = scale(x_data) x_trn_scl, _, _ =", "type=int, default=12, help=\"Sequence Length.\",) parser.add_argument(\"--y_start\", type=int, default=1, help=\"Y pred start\",", "in training :param out_file: [str] file to where the values", "2] 'dates_ids_tst: un-batched dates and national seg ids for testing", "\"\"\" read and format multiple observation files. we read in", "import os import pandas as pd import util import os.path", "np.moveaxis(batched, [0,1,2,3], [0,2,1,3]) if y & np.isfinite(period): reshaped = reshaped[:,-period:,...]", "for filename in obs_files: ds = xr.open_zarr(filename) obs.append(ds) if \"site_id\"", "not to take the log of discharge in training :param", "np import yaml import xarray as xr import datetime import", "= coord_name + \"1\" dataset[new_var_name] = coord_array reshaped_np_arr = convert_batch_reshape(", "obs def reshape_for_training(data): \"\"\" reshape the data for training :param", "out_dm = [adj_full[0], adj_full[1], A_hat] with open(out_file+'.pkl', 'wb') as f:", "[xr dataset] mean if scaling test data with dims :return:", "scaled data with original dims \"\"\" if not isinstance(std, xr.Dataset)", "the order that the split into batches expects arr =", "0: return arr # before [nfeat, nseg, ndates]; after [nseg,", "test def split_into_batches(data_array, seq_len=365, offset=1): \"\"\" split training data into", ":param offset: [float] 0-1, how to offset the batches (e.g.,", "#if __name__ == \"__main__\": check2 = prep_data(obs_temper_file='../../gits/river-dl/DRB_data/obs_temp_full', obs_flow_file='../../gits/river-dl/DRB_data/obs_flow_full', pretrain_file='../../gits/river-dl/DRB_data/uncal_sntemp_input_output', train_start_date=['1985-10-01',", "the start and end dates. This assumes your training data", "convert_batch_reshape(y_obs_tst, offset=offset, seq_len=seq_length, y=True, period=period), \"y_vars\": np.array(y_vars), 'period': np.array([period]), 'y_pre_train_val':", "prep_adj_matrix(infile, dist_type, out_file=None): \"\"\" process adj matrix. **The resulting matrix", "dates and national seg ids for training data [n_samples, seq_len,", "\"seginc_swrad\", \"seg_length\", \"seginc_potet\", \"seg_slope\", \"seg_humid\", \"seg_elev\"], y_vars=['seg_tave_water'], primary_variable='temp', seq_length=365, period=np.nan,", "if dataset.date.size == 0: return arr # before [nfeat, nseg,", "don't have any weird input values check_if_finite(x_data) x_trn, x_val, x_tst", "data from a date range or a set of date", "import datetime import pickle def scale(dataset, std=None, mean=None): \"\"\" scale", "for training :param data: training data (either x or y", "adj = adj_matrices[dist_type] adj_full = sort_dist_matrix(adj, adj_matrices[\"rowcolnames\"]) adj = adj_full[2]", "seq_length=365, period=np.nan, offset=1, out_file = 'data/DRB_gwn_full') '''f __name__ == \"__main__\":", "\"discharge_cms\": \"seg_outflow\"} ) return obs def reshape_for_training(data): \"\"\" reshape the", "range(len(start_dates)): date_slice = slice(start_dates[i], end_dates[i]) data_list.append(dataset.sel(date=date_slice)) return xr.concat(data_list, dim=\"date\") else:", "convert_batch_reshape(y_obs_trn, offset=offset, seq_len=seq_length, y=True, period=period), \"y_val\": convert_batch_reshape(y_obs_val, offset=offset, seq_len=seq_length, y=True,", "according to the start and end dates. This assumes your", "end = datetime.datetime.strptime(end, \"%Y-%m-%d\") return start, end def convert_batch_reshape(dataset, seq_len=365,", "not be included as predictors :param exclude_file: [str] path to", "dataset] the observations in the same time \"\"\" obs =", "dimension :param start_dates: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to", "have multiple discontinuos periods) :param end_dates: [str or list] fmt:", "there is a std of zero scaled = (dataset -", "they will be smaller combined.extend([s for s in split if", "mean def sel_partition_data(dataset, start_dates, end_dates): \"\"\" select the data from", "files. we read in the pretrain data to make sure", "separate_trn_tst( dataset, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, ): \"\"\"", "with the pretraining data (SNTemp data) :param distfile: [str] path", "nfeat] \"\"\" combined = [] for i in range(int(1 /", "ids for testing data [n_yrs, n_seg, len_seq, 2] \"\"\" ds_pre", "n_feat]) def get_exclude_start_end(exclude_grp): \"\"\" get the start and end dates", "y=True, period=period), \"y_vars\": np.array(y_vars), 'period': np.array([period]), 'y_pre_train_val': convert_batch_reshape(y_pre_val, offset=offset, seq_len=seq_length,", "seg_id_nat :return: \"\"\" df = pd.DataFrame(mat, columns=row_col_names, index=row_col_names) df =", "or a set of date ranges :param dataset: [xr dataset]", "default=12, help=\"Sequence Length.\",) parser.add_argument(\"--seq_length_y\", type=int, default=12, help=\"Sequence Length.\",) parser.add_argument(\"--y_start\", type=int,", "for DL model training read in and process data into", "tuple): if len(start_dates) == len(end_dates): data_list = [] for i", "'flow'. This determines the order of the variables. :param catch_prop_file:", "y=data['y_obs_val'], ) ''' np.savez_compressed(os.path.join(out_file,'data.npz'), **data) return data def prep_adj_matrix(infile, dist_type,", "seq_len=365, offset=1): \"\"\" split training data into batches with size", "action='store_true',) args = parser.parse_args() if os.path.exists(args.output_dir): reply = str(input(f'{args.output_dir} exists.", "scale y training data and get the mean and std", "date(s) to end validation period (can have multiple discontinuos periods)", "None, primary_variable=\"temp\", #catch_prop_file=None, #exclude_file=None, #log_q=False, out_file=None, #segs=None, normalize_y=False, ): \"\"\"", "test_end_date, ) if normalize_y: # scale y training data and", "obs = [x_data.sortby([\"seg_id_nat\", \"date\"])] for filename in obs_files: ds =", "training data [n_out] 'y_obs_tst': un-batched, unscaled, uncentered observation data for", "- mean) / (std + 1e-10) check_if_finite(std) check_if_finite(mean) return scaled,", "unscaled, uncentered observation data for the test period [n_yrs, n_seg,", ":param seq_len: [int] length of sequences (i.e., 365) :param offset:", "str): return dataset.sel(date=slice(start_dates, end_dates)) else: raise ValueError(\"start_dates is str but", "reshape for training :param dataset: [xr dataset] data to be", "np.std(adj[adj != 0]) adj[adj != 0] = adj[adj != 0]", "to start validation period (can have multiple discontinuos periods) :param", "is in one continuous block and all the dates that", "(can have multiple discontinuos periods) :param val_start_date: [str or list]", "1e-10) check_if_finite(std) check_if_finite(mean) return scaled, std, mean def sel_partition_data(dataset, start_dates,", "periods) :param val_end_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to", "list] fmt: \"YYYY-MM-DD\"; date(s) to end validation period (can have", "# make sure we don't have any weird input values", "x_data): \"\"\" read and format multiple observation files. we read", "y_vars= [\"seg_tave_water\", \"seg_outflow\"], seq_length = 365, offset = 1, period", "def check_if_finite(xarr): assert np.isfinite(xarr.to_array().values).all() def prep_data( obs_temper_file, obs_flow_file, pretrain_file, #distfile,", "mean=x_mean) y_obs = read_multiple_obs([obs_temper_file, obs_flow_file], x_data) y_obs = y_obs[y_vars] y_pre", "obs[[\"temp_c\", \"discharge_cms\"]] obs = obs.rename( {\"temp_c\": \"seg_tave_water\", \"discharge_cms\": \"seg_outflow\"} )", "mask) dims: [nbatch, nseg, len_seq, nfeat/nout] :return: reshaped data [nbatch", "the exclude yml file :return: [tuple of datetime objects] start", "[nseq, seq_len, nseg, nfeat] #reshaped = reshape_for_training(batched) reshaped = np.moveaxis(batched,", "batched dates and national seg ids for training data [n_samples,", "period (can have multiple discontinuos periods) :param test_start_date: [str or", "in the testing. :param dataset: [xr dataset] input or output", "# adding small number in case there is a std", "on 'temp' or 'flow'. This determines the order of the", "period=period), \"y_vars\": np.array(y_vars), 'period': np.array([period]), 'y_pre_train_val': convert_batch_reshape(y_pre_val, offset=offset, seq_len=seq_length, y=True,", "reshape data # after [nseq, seq_len, nseg, nfeat] #reshaped =", "training :param dataset: [xr dataset] data to be batched :param", "y_std, y_mean = scale(y_obs_trn) y_pre_trn, _, _ = scale(y_pre_trn, y_std,", "'pre_train.npz'), x=data['x_train'], y=data['y_pre_train']) np.savez_compressed(os.path.join(out_file,'train.npz'), x=data['x_train'], y=data['y_obs_train'], ) np.savez_compressed(os.path.join(out_file, 'test.npz'), x=data['x_test'],", "data for training :param data: training data (either x or", "{\"temp_c\": \"seg_tave_water\", \"discharge_cms\": \"seg_outflow\"} ) return obs def reshape_for_training(data): \"\"\"", "= np.matmul(D_inv, A_hat) if out_file: out_dm = [adj_full[0], adj_full[1], A_hat]", "parser.add_argument(\"--seq_length_y\", type=int, default=12, help=\"Sequence Length.\",) parser.add_argument(\"--y_start\", type=int, default=1, help=\"Y pred", "a list of date ranges elif isinstance(start_dates, list) or isinstance(start_dates,", "std y_obs_trn, y_std, y_mean = scale(y_obs_trn) y_pre_trn, _, _ =", "\"\"\" ds_pre = xr.open_zarr(pretrain_file) x_data = ds_pre[x_vars] # make sure", "catchment properties file. If left unfilled, the catchment properties will", "the file of pre_training data :return: [xr dataset] the observations", "if out_file: out_dm = [adj_full[0], adj_full[1], A_hat] with open(out_file+'.pkl', 'wb')", "input and output data for DL model training read in", "f, protocol=2) return adj_full[0], adj_full[1], A_hat def sort_dist_matrix(mat, row_col_names): \"\"\"", "start\", ) parser.add_argument(\"--dow\", action='store_true',) args = parser.parse_args() if os.path.exists(args.output_dir): reply", "validation period (can have multiple discontinuos periods) :param test_start_date: [str", "be batched :param seq_len: [int] length of sequences (i.e., 365)", "ds_pre = xr.open_zarr(pretrain_file) x_data = ds_pre[x_vars] # make sure we", "obs_temper_file, obs_flow_file, pretrain_file, #distfile, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date,", "discontinuos periods) :param test_start_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s)", ":param infile: :param dist_type: [str] type of distance matrix (\"upstream\",", "batch will be 0-365 and the second will be 182-547)", ":param test_end_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to end", "\"seg_outflow\"], seq_length = 365, offset = 1, period = None,", "None, all of the variables will be used :param primary_variable:", "# after [nseq, seq_len, nseg, nfeat] #reshaped = reshape_for_training(batched) reshaped", ":param distfile: [str] path to the distance matrix .npz file", "primary_variable='temp', seq_length=365, period=np.nan, offset=1, out_file = 'data/DRB_gwn_full') '''f __name__ ==", ":param obs_files: [list] list of filenames of observation files :param", "filename in obs_files: ds = xr.open_zarr(filename) obs.append(ds) if \"site_id\" in", "or isinstance(start_dates, tuple): if len(start_dates) == len(end_dates): data_list = []", "train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, x_vars=None, y_vars= [\"seg_tave_water\", \"seg_outflow\"],", "discontinuos periods) :param end_dates: [str or list] fmt: \"YYYY-MM-DD\"; date(s)", "is sorted by seg_id_nat ** :param infile: :param dist_type: [str]", "period (can have multiple discontinuos periods) :param train_end_date: [str or", "observations file (csv) :param obs_flow_file:[str] discharge observations file (csv) :param", "= data.shape return np.reshape(data, [n_batch * n_seg, seq_len, n_feat]) def", "** :param infile: :param dist_type: [str] type of distance matrix", "= reshaped[:,-period:,...] return reshaped def coord_as_reshaped_array(dataset, coord_name, seq_len=365, offset=1): #", "test_start_date, test_end_date, ) y_pre_trn, y_pre_val, y_pre_tst = separate_trn_tst( y_pre, train_start_date,", "combined = [] for i in range(int(1 / offset)): start", "= xr.broadcast(dataset[coord_name], dataset[first_var])[0] new_var_name = coord_name + \"1\" dataset[new_var_name] =", "process data into training and testing datasets. the training and", "help=\"Sequence Length.\",) parser.add_argument(\"--y_start\", type=int, default=1, help=\"Y pred start\", ) parser.add_argument(\"--dow\",", "# this is the order that the split into batches", "y=data['y_obs_tst'], ) np.savez_compressed(os.path.join(out_file,'val.npz'), x=data['x_val'], y=data['y_obs_val'], ) ''' np.savez_compressed(os.path.join(out_file,'data.npz'), **data) return", "offset=offset, seq_len=seq_length), \"dates_val\": coord_as_reshaped_array(x_val, \"date\", offset=offset, seq_len=seq_length), \"ids_test\": coord_as_reshaped_array(x_tst, \"seg_id_nat\",", "type of distance matrix (\"upstream\", \"downstream\" or \"updown\") :param out_file:", "group :param exclude_grp: [dict] dictionary representing the exclude group from", "period (can have multiple discontinuos periods) :param test_end_date: [str or", "mean_adj = np.mean(adj[adj != 0]) std_adj = np.std(adj[adj != 0])", "be either str, list, or tuple\") def separate_trn_tst( dataset, train_start_date,", "[str or list] fmt: \"YYYY-MM-DD\"; date(s) to start test period", "seq_len) idx = np.arange(start=start, stop=data_array.shape[1] + 1, step=seq_len) split =", "have multiple discontinuos periods) :param train_end_date: [str or list] fmt:", "and output data for DL model training read in and", "file (csv) :param pretrain_file: [str] the file with the pretraining", "365, offset = 1, period = None, primary_variable=\"temp\", #catch_prop_file=None, #exclude_file=None,", "** -1.0 D_inv = np.diag(D_inv) A_hat = np.matmul(D_inv, A_hat) if", "to exclude file :param log_q: [bool] whether or not to", "adj = np.where(np.isinf(adj), 0, adj) adj = -adj mean_adj =", "the batches (e.g., 0.5 means that the first batch will", "offset=offset, seq_len=seq_length, y=True, period=period), \"y_vars\": np.array(y_vars), 'period': np.array([period]), 'y_pre_train_val': convert_batch_reshape(y_pre_val,", "be 182-547) :return: [numpy array] batched and reshaped dataset \"\"\"", "but not end_date\") # if it's a list of date", "period 'y_trn_obs_std': standard deviation of the y observations training data", "smaller combined.extend([s for s in split if s.shape[1] == seq_len])", "val_start_date, val_end_date) test = sel_partition_data(dataset, test_start_date, test_end_date) return train, val,", "deviation of 1 and a mean of zero :param dataset:", "prepare input and output data for DL model training read", "combined def read_multiple_obs(obs_files, x_data): \"\"\" read and format multiple observation", "x=data['x_train'], y=data['y_pre_train'], ) np.savez_compressed(os.path.join(out_file, 'test.npz'), x=data['x_test'], y=data['y_pre_test'], ) np.savez_compressed(os.path.join(out_file,'val.npz'), x=data['x_val'],", "dataset.to_array().values # if the dataset is empty, just return it", "= np.std(adj[adj != 0]) adj[adj != 0] = adj[adj !=", "national seg ids for training data [n_samples, seq_len, 2] 'dates_ids_tst:", "reshaped = reshaped[:,-period:,...] return reshaped def coord_as_reshaped_array(dataset, coord_name, seq_len=365, offset=1):", "be used :param primary_variable: [str] which variable the model should", "if os.path.isdir(out_file) == False: os.makedirs(out_file) ''' np.savez_compressed(os.path.join(out_file, 'pre_train.npz'), x=data['x_train'], y=data['y_pre_train'])", "[n_yrs, n_seg, len_seq, n_out] 'dates_ids_trn: batched dates and national seg", "infile: :param dist_type: [str] type of distance matrix (\"upstream\", \"downstream\"", "offset = 1, period = None, primary_variable=\"temp\", #catch_prop_file=None, #exclude_file=None, #log_q=False,", "n_batch, n_seg, seq_len, n_feat = data.shape return np.reshape(data, [n_batch *", "match theirs. df = pd.read_hdf(\"data/metr-la.h5\") seq_length_x = 12 seq_length_y =", "dataset of just those dates \"\"\" # if it just", "x or y or mask) dims: [nbatch, nseg, len_seq, nfeat/nout]", "* seq_len) idx = np.arange(start=start, stop=data_array.shape[1] + 1, step=seq_len) split", "out_file=None): \"\"\" process adj matrix. **The resulting matrix is sorted", "end_date\") # if it's a list of date ranges elif", "that the split into batches expects arr = np.moveaxis(arr, 0,", "-1.0 D_inv = np.diag(D_inv) A_hat = np.matmul(D_inv, A_hat) if out_file:", "one variable name. It can be any in the dataset,", "= y_obs[y_vars] y_pre = ds_pre[y_vars] y_obs_trn, y_obs_val, y_obs_tst = separate_trn_tst(", "np.array(x_vars), \"ids_train\": coord_as_reshaped_array(x_trn, \"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_train\": coord_as_reshaped_array(x_trn, \"date\", offset=offset,", "scaled, std, mean def sel_partition_data(dataset, start_dates, end_dates): \"\"\" select the", "discontinuos periods) :param test_end_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s)", "[n_samples, seq_len, n_out] 'y_obs_trn': batched, scaled, and centered output observation", "\"seg_id_nat\", offset=offset, seq_len=seq_length), \"dates_val\": coord_as_reshaped_array(x_val, \"date\", offset=offset, seq_len=seq_length), \"ids_test\": coord_as_reshaped_array(x_tst,", "unicode_literals import argparse import numpy as np import os import", "length\") else: raise ValueError(\"start_dates must be either str, list, or", "y_obs = y_obs[y_vars] y_pre = ds_pre[y_vars] y_obs_trn, y_obs_val, y_obs_tst =", "= np.sum(A_hat, axis=1) D_inv = D ** -1.0 D_inv =", "date_slice = slice(start_dates[i], end_dates[i]) data_list.append(dataset.sel(date=date_slice)) return xr.concat(data_list, dim=\"date\") else: raise", "start_dates, end_dates): \"\"\" select the data from a date range", "np.arange(start=start, stop=data_array.shape[1] + 1, step=seq_len) split = np.split(data_array, indices_or_sections=idx, axis=1)", "training data (either x or y or mask) dims: [nbatch,", "data [nbatch * nseg, len_seq, nfeat/nout] \"\"\" n_batch, n_seg, seq_len,", "dims :param mean: [xr dataset] mean if scaling test data", "end validation period (can have multiple discontinuos periods) :param test_start_date:", "it just one date range if isinstance(start_dates, str): if isinstance(end_dates,", "end def convert_batch_reshape(dataset, seq_len=365, offset=1, y = False, period =", "##### Reformat our inputs to match theirs. df = pd.read_hdf(\"data/metr-la.h5\")", "file with the pretraining data (SNTemp data) :param distfile: [str]", "those dates \"\"\" # if it just one date range", "nseg, len_seq, nfeat/nout] :return: reshaped data [nbatch * nseg, len_seq,", "\"\"\" convert xarray dataset into numpy array, swap the axes,", "dataset] data to be batched :param seq_len: [int] length of", "= [] for i in range(len(start_dates)): date_slice = slice(start_dates[i], end_dates[i])", "y observations training data [n_out] 'y_trn_obs_mean': mean of the observation", "end_dates must have same length\") else: raise ValueError(\"start_dates must be", "\"date\") arr = dataset.to_array().values # if the dataset is empty,", "ds.variables: del ds[\"site_id\"] obs = xr.merge(obs, join=\"left\") obs = obs[[\"temp_c\",", "distance matrix by seg_id_nat :return: \"\"\" df = pd.DataFrame(mat, columns=row_col_names,", "D_inv = np.diag(D_inv) A_hat = np.matmul(D_inv, A_hat) if out_file: out_dm", "dist_type: [str] type of distance matrix (\"upstream\", \"downstream\" or \"updown\")", "this is the order that the split into batches expects", "order that the split into batches expects arr = np.moveaxis(arr,", "np.savez_compressed(os.path.join(out_file, 'pre_train.npz'), x=data['x_train'], y=data['y_pre_train']) np.savez_compressed(os.path.join(out_file,'train.npz'), x=data['x_train'], y=data['y_pre_train'], ) np.savez_compressed(os.path.join(out_file, 'test.npz'),", "the pretraining data (SNTemp data) :param distfile: [str] path to", "LAtrain['x'].shape LAtrain['y'].shape LAtest['x'].shape LAtest['y'].shape check = np.moveaxis(data['x_train'], [0,1,2,3], [0,2,1,3]) np.savez_compressed(os.path.join(out_file,", "__future__ import division from __future__ import print_function from __future__ import", "in case there is a std of zero scaled =", "nseg, len_seq, nfeat/nout] \"\"\" n_batch, n_seg, seq_len, n_feat = data.shape", "split training data into batches with size of batch_size :param", "coord_as_reshaped_array(x_tst, \"date\", offset=offset, seq_len=seq_length), \"y_pre_train\": convert_batch_reshape(y_pre_trn, offset=offset, seq_len=seq_length, y=True, period=period),", "of just those dates \"\"\" # if it just one", "raise ValueError(\"start_dates and end_dates must have same length\") else: raise", "(can have multiple discontinuos periods) :param val_end_date: [str or list]", "test period (can have multiple discontinuos periods) :param x_vars: [list]", "default=12, help=\"Sequence Length.\",) parser.add_argument(\"--y_start\", type=int, default=1, help=\"Y pred start\", )", "seq_len: [int] length of sequences (i.e., 365) :param offset: [float]", "_, _ = scale(x_tst, std=x_std, mean=x_mean) y_obs = read_multiple_obs([obs_temper_file, obs_flow_file],", "np.load('data/METR-LA/train.npz') LAtest = np.load('data/METR-LA/test.npz') LAval = np.load('data/METR-LA/val.npz') LAtrain['x'].shape LAtrain['y'].shape LAtest['x'].shape", "periods) :param train_end_date: [str or list] fmt: \"YYYY-MM-DD\"; date(s) to", "nfeat] :param seq_len: [int] length of sequences (i.e., 365) :param", "list] fmt: \"YYYY-MM-DD\"; date(s) to start test period (can have", "= separate_trn_tst( y_pre, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, )", "data.shape return np.reshape(data, [n_batch * n_seg, seq_len, n_feat]) def get_exclude_start_end(exclude_grp):", "to start train period (can have multiple discontinuos periods) :param", "= str(input(f'{args.output_dir} exists. Do you want to overwrite it? (y/n)')).lower().strip()", "1 and a mean of zero :param dataset: [xr dataset]", "have multiple discontinuos periods) :param val_start_date: [str or list] fmt:", "to take the log of discharge in training :param out_file:", "train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, ) y_pre_trn, y_pre_val, y_pre_tst", "dates. This assumes your training data is in one continuous", "out_file: out_dm = [adj_full[0], adj_full[1], A_hat] with open(out_file+'.pkl', 'wb') as", "= pd.DataFrame(mat, columns=row_col_names, index=row_col_names) df = df.sort_index(axis=0) df = df.sort_index(axis=1)", "any weird input values check_if_finite(x_data) x_trn, x_val, x_tst = separate_trn_tst(", "datasets. the training and testing data are scaled to have", "multiple observation files. we read in the pretrain data to", "0] = 1 / (1 + np.exp(-adj[adj != 0])) I", "scale(dataset, std=None, mean=None): \"\"\" scale the data so it has", "y_pre, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date, ) if normalize_y:", "np.array([period]), 'y_pre_train_val': convert_batch_reshape(y_pre_val, offset=offset, seq_len=seq_length, y=True, period=period), 'y_pre_train_test': convert_batch_reshape(y_pre_tst, offset=offset,", "convert_batch_reshape(y_pre_trn, offset=offset, seq_len=seq_length, y=True, period=period), \"y_train\": convert_batch_reshape(y_obs_trn, offset=offset, seq_len=seq_length, y=True,", "seq_len=365, offset=1): # I need one variable name. It can", "columns=row_col_names, index=row_col_names) df = df.sort_index(axis=0) df = df.sort_index(axis=1) sensor_id_to_ind =", "(csv) :param pretrain_file: [str] the file with the pretraining data", "\"\"\" combined = [] for i in range(int(1 / offset)):", "adj[adj != 0] = adj[adj != 0] - mean_adj adj[adj", "import argparse import numpy as np import os import pandas", "mean: [xr dataset] mean if scaling test data with dims", "__future__ import absolute_import from __future__ import division from __future__ import", "the second will be 182-547) :return: [numpy array] batched data", "\"y_mean\": y_mean.to_array().values, } if out_file: if os.path.isdir(out_file) == False: os.makedirs(out_file)", "batches expects arr = np.moveaxis(arr, 0, -1) # batch the", "[dict] dictionary representing the exclude group from the exclude yml", "test_end_date) return train, val, test def split_into_batches(data_array, seq_len=365, offset=1): \"\"\"", "that the first batch will be 0-365 and the second", "reshaped dataset \"\"\" # convert xr.dataset to numpy array dataset", ":return: \"\"\" df = pd.DataFrame(mat, columns=row_col_names, index=row_col_names) df = df.sort_index(axis=0)", "obs.rename( {\"temp_c\": \"seg_tave_water\", \"discharge_cms\": \"seg_outflow\"} ) return obs def reshape_for_training(data):", "test_end_date, x_vars=None, y_vars= [\"seg_tave_water\", \"seg_outflow\"], seq_length = 365, offset =", "int(i * offset * seq_len) idx = np.arange(start=start, stop=data_array.shape[1] +", "& np.isfinite(period): reshaped = reshaped[:,-period:,...] return reshaped def coord_as_reshaped_array(dataset, coord_name,", "after [nbatch, nseg, seq_len, nfeat] batched = split_into_batches(arr, seq_len=seq_len, offset=offset)", "standard deviation of 1 and a mean of zero :param", "def sort_dist_matrix(mat, row_col_names): \"\"\" sort the distance matrix by seg_id_nat", "seq_len, n_feat]) def get_exclude_start_end(exclude_grp): \"\"\" get the start and end", "log_q: [bool] whether or not to take the log of", "0])) I = np.eye(adj.shape[0]) A_hat = adj.copy() + I D", "parser = argparse.ArgumentParser() parser.add_argument(\"--output_dir\", type=str, default=\"data/METR-LA\", help=\"Output directory.\") parser.add_argument(\"--traffic_df_filename\", type=str,", "adj = adj_full[2] adj = np.where(np.isinf(adj), 0, adj) adj =", "s in split if s.shape[1] == seq_len]) combined = np.asarray(combined)", "nseg, seq_len (batch_size), nfeat] \"\"\" combined = [] for i", "standard deviation of the y observations training data [n_out] 'y_trn_obs_mean':", "#check = prep_adj_matrix('../../gits/river-dl/DRB_data/distance_matrix.npz', 'upstream', 'data/DRB_gwn_full/adj_mx') #if __name__ == \"__main__\": check2", "nfeat/nout] \"\"\" n_batch, n_seg, seq_len, n_feat = data.shape return np.reshape(data,", "read and format multiple observation files. we read in the", "of the y observations training data [n_out] 'y_trn_obs_mean': mean of", "obs = obs.rename( {\"temp_c\": \"seg_tave_water\", \"discharge_cms\": \"seg_outflow\"} ) return obs", "# if it's a list of date ranges elif isinstance(start_dates,", "seq_len=seq_length), \"x_std\": x_std.to_array().values, \"x_mean\": x_mean.to_array().values, \"x_cols\": np.array(x_vars), \"ids_train\": coord_as_reshaped_array(x_trn, \"seg_id_nat\",", "pretraining data (SNTemp data) :param distfile: [str] path to the", "multiple discontinuos periods) :param test_start_date: [str or list] fmt: \"YYYY-MM-DD\";", "is empty, just return it as is if dataset.date.size ==", "\"YYYY-MM-DD\"; date(s) to end period (can have multiple discontinuos periods)", "just return it as is if dataset.date.size == 0: return", "a set of date ranges :param dataset: [xr dataset] input", "i in range(int(1 / offset)): start = int(i * offset", "= xr.open_zarr(filename) obs.append(ds) if \"site_id\" in ds.variables: del ds[\"site_id\"] obs", "(can have multiple discontinuos periods) :param x_vars: [list] variables that", "data [n_samples, seq_len, 2] 'dates_ids_tst: un-batched dates and national seg", "period (can have multiple discontinuos periods) :param end_dates: [str or", "# if the dataset is empty, just return it as", "variables will be used :param primary_variable: [str] which variable the", "the first and last batch since they will be smaller", "offset=offset, seq_len=seq_length), \"dates_test\": coord_as_reshaped_array(x_tst, \"date\", offset=offset, seq_len=seq_length), \"y_pre_train\": convert_batch_reshape(y_pre_trn, offset=offset,", "check_if_finite(xarr): assert np.isfinite(xarr.to_array().values).all() def prep_data( obs_temper_file, obs_flow_file, pretrain_file, #distfile, train_start_date,", ":param mean: [xr dataset] mean if scaling test data with", "= scale(y_pre_trn, y_std, y_mean) else: _, y_std, y_mean = scale(y_obs_trn)", "\"seg_elev\"], y_vars=['seg_tave_water'], primary_variable='temp', seq_length=365, period=np.nan, offset=1, out_file = 'data/DRB_gwn_full') '''f", "or y or mask) dims: [nbatch, nseg, len_seq, nfeat/nout] :return:", "data with original dims \"\"\" if not isinstance(std, xr.Dataset) or", "mean = dataset.mean(skipna=True) # adding small number in case there", "adj_matrices[dist_type] adj_full = sort_dist_matrix(adj, adj_matrices[\"rowcolnames\"]) adj = adj_full[2] adj =", "n_seg, seq_len, n_feat = data.shape return np.reshape(data, [n_batch * n_seg,", "array of training data with dims [nseg, ndates, nfeat] :param", "of 1 and a mean of zero :param dataset: [xr", "= np.nan): \"\"\" convert xarray dataset into numpy array, swap", "parser.add_argument(\"--seq_length_x\", type=int, default=12, help=\"Sequence Length.\",) parser.add_argument(\"--seq_length_y\", type=int, default=12, help=\"Sequence Length.\",)", "\"1\" dataset[new_var_name] = coord_array reshaped_np_arr = convert_batch_reshape( dataset[[new_var_name]], seq_len=seq_len, offset=offset", "input. If None, all of the variables will be used", "\"YYYY-MM-DD\"; date(s) to start train period (can have multiple discontinuos", "unfilled, the catchment properties will not be included as predictors", "[float] 0-1, how to offset the batches (e.g., 0.5 means", "= np.asarray(combined) return combined def read_multiple_obs(obs_files, x_data): \"\"\" read and", "or output data with dims :param train_start_date: [str or list]", "parser.add_argument(\"--traffic_df_filename\", type=str, default=\"data/metr-la.h5\", help=\"Raw traffic readings.\",) parser.add_argument(\"--seq_length_x\", type=int, default=12, help=\"Sequence", "(can have multiple discontinuos periods) \"\"\" train = sel_partition_data(dataset, train_start_date,", "sort_dist_matrix(mat, row_col_names): \"\"\" sort the distance matrix by seg_id_nat :return:", "365) :param offset: [float] 0-1, how to offset the batches", "convert_batch_reshape( dataset[[new_var_name]], seq_len=seq_len, offset=offset ) return reshaped_np_arr def check_if_finite(xarr): assert", "as pd import numpy as np import yaml import xarray", "in ds.variables: del ds[\"site_id\"] obs = xr.merge(obs, join=\"left\") obs =", "test period [n_yrs, n_seg, len_seq, n_out] 'dates_ids_trn: batched dates and", "pd import util import os.path import pandas as pd import", "import numpy as np import os import pandas as pd", "[list] list of filenames of observation files :param pre_train_file: [str]", "training input and output data 'y_trn_pre': batched, scaled, and centered", "default=\"data/metr-la.h5\", help=\"Raw traffic readings.\",) parser.add_argument(\"--seq_length_x\", type=int, default=12, help=\"Sequence Length.\",) parser.add_argument(\"--seq_length_y\",", "pd import numpy as np import yaml import xarray as", "and end_dates must have same length\") else: raise ValueError(\"start_dates must", "mean of zero :param obs_temper_file: [str] temperature observations file (csv)", "zero :param obs_temper_file: [str] temperature observations file (csv) :param obs_flow_file:[str]", "second will be 182-547) :return: [numpy array] batched and reshaped", "ValueError(\"start_dates and end_dates must have same length\") else: raise ValueError(\"start_dates", "= adj.copy() + I D = np.sum(A_hat, axis=1) D_inv =", "df = pd.read_hdf(\"data/metr-la.h5\") seq_length_x = 12 seq_length_y = 12 y_start", "sort the distance matrix by seg_id_nat :return: \"\"\" df =", "[xr dataset] the observations in the same time \"\"\" obs", "ranges :param dataset: [xr dataset] input or output data with", "use the # first first_var = next(iter(dataset.data_vars.keys())) coord_array = xr.broadcast(dataset[coord_name],", "x_vars=[\"seg_rain\", \"seg_tave_air\", \"seginc_swrad\", \"seg_length\", \"seginc_potet\", \"seg_slope\", \"seg_humid\", \"seg_elev\"], y_vars=['seg_tave_water'], primary_variable='temp',", "It can be any in the dataset, but I'll use", "'y_trn_obs_mean': mean of the observation training data [n_out] 'y_obs_tst': un-batched,", "test_start_date, test_end_date, ): \"\"\" separate the train data from the", "data_list.append(dataset.sel(date=date_slice)) return xr.concat(data_list, dim=\"date\") else: raise ValueError(\"start_dates and end_dates must", "variable name. It can be any in the dataset, but", "and a mean of zero :param obs_temper_file: [str] temperature observations", "isinstance(start_dates, list) or isinstance(start_dates, tuple): if len(start_dates) == len(end_dates): data_list", "(either x or y or mask) dims: [nbatch, nseg, len_seq,", "\"y_test\": convert_batch_reshape(y_obs_tst, offset=offset, seq_len=seq_length, y=True, period=period), \"y_vars\": np.array(y_vars), 'period': np.array([period]),", "= scale(y_obs_trn) data = { \"x_train\": convert_batch_reshape(x_trn_scl, offset=offset, seq_len=seq_length), \"x_val\":", "If None, all of the variables will be used :param", "or mask) dims: [nbatch, nseg, len_seq, nfeat/nout] :return: reshaped data", "start and end dates. This assumes your training data is", "dataset, but I'll use the # first first_var = next(iter(dataset.data_vars.keys()))", "y=data['y_pre_train']) np.savez_compressed(os.path.join(out_file,'train.npz'), x=data['x_train'], y=data['y_obs_train'], ) np.savez_compressed(os.path.join(out_file, 'test.npz'), x=data['x_test'], y=data['y_obs_tst'], )", "= exclude_grp.get(\"end_date\") if end: end = datetime.datetime.strptime(end, \"%Y-%m-%d\") return start,", "file :param log_q: [bool] whether or not to take the", "x_tst = separate_trn_tst( x_data, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date, test_end_date,", "val, test def split_into_batches(data_array, seq_len=365, offset=1): \"\"\" split training data", "'dates_ids_tst: un-batched dates and national seg ids for testing data", ") np.savez_compressed(os.path.join(out_file,'val.npz'), x=data['x_val'], y=data['y_obs_val'], ) ''' np.savez_compressed(os.path.join(out_file,'data.npz'), **data) return data", "\"\"\" if not isinstance(std, xr.Dataset) or not isinstance(mean, xr.Dataset): std", "= np.load('data/METR-LA/val.npz') LAtrain['x'].shape LAtrain['y'].shape LAtest['x'].shape LAtest['y'].shape check = np.moveaxis(data['x_train'], [0,1,2,3],", "and standard deviations of the training input and output data", "or \"updown\") :param out_file: :return: [numpy array] processed adjacency matrix", "weird input values check_if_finite(x_data) x_trn, x_val, x_tst = separate_trn_tst( x_data,", "# scale y training data and get the mean and", "'2020-09-30'], val_start_date='2006-10-01', val_end_date='2016-09-30', test_start_date=['1980-10-01', '2020-10-01'], test_end_date=['1985-09-30', '2021-09-30'], x_vars=[\"seg_rain\", \"seg_tave_air\", \"seginc_swrad\",", "and output data 'y_trn_pre': batched, scaled, and centered output data", "row_col_names, sensor_id_to_ind, df #check = prep_adj_matrix('../../gits/river-dl/DRB_data/distance_matrix.npz', 'upstream', 'data/DRB_gwn_full/adj_mx') #if __name__", "#catch_prop_file=None, #exclude_file=None, #log_q=False, out_file=None, #segs=None, normalize_y=False, ): \"\"\" prepare input", "date ranges elif isinstance(start_dates, list) or isinstance(start_dates, tuple): if len(start_dates)", "join=\"left\") obs = obs[[\"temp_c\", \"discharge_cms\"]] obs = obs.rename( {\"temp_c\": \"seg_tave_water\",", "val_start_date, val_end_date, test_start_date, test_end_date, ): \"\"\" separate the train data", "!= 0] = 1 / (1 + np.exp(-adj[adj != 0]))", "import unicode_literals import argparse import numpy as np import os", "date(s) to start validation period (can have multiple discontinuos periods)", "x_val, x_tst = separate_trn_tst( x_data, train_start_date, train_end_date, val_start_date, val_end_date, test_start_date,", "\"\"\" sort the distance matrix by seg_id_nat :return: \"\"\" df", "after [nseq, seq_len, nseg, nfeat] #reshaped = reshape_for_training(batched) reshaped =", "next(iter(dataset.data_vars.keys())) coord_array = xr.broadcast(dataset[coord_name], dataset[first_var])[0] new_var_name = coord_name + \"1\"", "(can have multiple discontinuos periods) :param test_start_date: [str or list]", "if y & np.isfinite(period): reshaped = reshaped[:,-period:,...] return reshaped def" ]
[ "function, string formating 10. Write a Python program to get", "of three given numbers, if the values are equal then", "(syntax, description etc.) of Python built-in function(s). Sample function :", "document. Sample string : a string that you \"don't\" have", "Tools: string formating 4. Write a Python program to calculate", "6. Write a Python program to get the difference between", "where \"Is\" has been added to the front. If the", "math, input function 9. Write a Python program to get", "7, 2), (2014, 7, 11) Expected output : 9 days", "Write a Python program to calculate number of days between", "a given month and year. Tools: Use 'calendar' module. 3.", "the following here document. Sample string : a string that", "volume of a sphere with radius 6. Tools: input function,", "math 6. Write a Python program to get the difference", "difference between a given number and 17, if the number", "number and 17, if the number is greater than 17", "program to calculate the sum of three given numbers, if", "the sum of three given numbers, if the values are", "begins with \"Is\" then return the string unchanged. Tools: input", "new string from a given string where \"Is\" has been", "<reponame>CodedLadiesInnovateTech/python-challenges <<<<<<< HEAD \"\"\" 1. Write a Python program to", "to get the volume of a sphere with radius 6.", "program to calculate number of days between two dates. Sample", "slicing ======= \"\"\" 1. Write a Python program to print", "Python program to print the documents (syntax, description etc.) of", "calculate the sum of three given numbers, if the values", "9. Write a Python program to get a new string", "copies of a given string. Tools: input function, slicing >>>>>>>", "5. Write a Python program to get the volume of", "of days between two dates. Sample dates : (2014, 7,", "a Python program to get the difference between a given", "string : a string that you \"don't\" have to escape", "a Python program to test whether a number is within", "(2014, 7, 11) Expected output : 9 days Tools: Datetime", "Python program to calculate number of days between two dates.", "17 return double the absolute difference. Tools: abs function, input", "of Python built-in function(s). Sample function : abs() Expected Result", "get a new string from a given string where \"Is\"", "string unchanged. Tools: input function, string formating 10. Write a", "number is within 100 of 1000 or 2000. Tools: maths,input", "string where \"Is\" has been added to the front. If", "of the argument. Tools: help function 2. Write a Python", "abs() Expected Result : abs(number) -> number Return the absolute", "given numbers, if the values are equal then return three", "documents (syntax, description etc.) of Python built-in function(s). Sample function", "string from a given string where \"Is\" has been added", "heredoc string --------> example Tools: string formating 4. Write a", "given month and year. Tools: Use 'calendar' module. 3. Write", "than 17 return double the absolute difference. Tools: abs function,", "Tools: abs function, input function, math 7. Write a Python", "the values are equal then return three times of their", "Tools: math, input function 9. Write a Python program to", "Python program to get a string which is n (non-negative", "number Return the absolute value of the argument. Tools: help", "calculate number of days between two dates. Sample dates :", "and 17, if the number is greater than 17 return", "return the string unchanged. Tools: input function, string formating 10.", "n (non-negative integer) copies of a given string. Tools: input", "days Tools: Datetime module, timedelta module 5. Write a Python", "function 8. Write a Python program to calculate the sum", "number is greater than 17 return double the absolute difference.", "to print the documents (syntax, description etc.) of Python built-in", "absolute value of the argument. Tools: help function 2. Write", "year. Tools: Use 'calendar' module. 3. Write a Python program", "Tools: input function, math 6. Write a Python program to", "HEAD \"\"\" 1. Write a Python program to print the", "program to get the difference between a given number and", "or 2000. Tools: maths,input function 8. Write a Python program", "given string already begins with \"Is\" then return the string", "print the following here document. Sample string : a string", "Write a Python program to print the following here document.", "the volume of a sphere with radius 6. Tools: input", "function, math 7. Write a Python program to test whether", "return three times of their sum. Tools: math, input function", "following here document. Sample string : a string that you", "Python program to calculate the sum of three given numbers,", "if the values are equal then return three times of", "argument. Tools: help function 2. Write a Python program to", "\"\"\" 1. Write a Python program to print the documents", "month and year. Tools: Use 'calendar' module. 3. Write a", "a Python program to get a new string from a", "Tools: maths,input function 8. Write a Python program to calculate", "Tools: input function, string formating 10. Write a Python program", "11) Expected output : 9 days Tools: Datetime module, timedelta", "1000 or 2000. Tools: maths,input function 8. Write a Python", "Write a Python program to get a new string from", "program to print the documents (syntax, description etc.) of Python", "a given number and 17, if the number is greater", "\"Is\" has been added to the front. If the given", "double the absolute difference. Tools: abs function, input function, math", "calendar of a given month and year. Tools: Use 'calendar'", "string already begins with \"Is\" then return the string unchanged.", "Python program to get the volume of a sphere with", "string formating 10. Write a Python program to get a", "of a given string. Tools: input function, slicing >>>>>>> f4444ec0d72c645d12694e90df7429456db0611c", "to print the calendar of a given month and year.", "Write a Python program to get a string which is", "unchanged. Tools: input function, string formating 10. Write a Python", "days between two dates. Sample dates : (2014, 7, 2),", "return double the absolute difference. Tools: abs function, input function,", "module 5. Write a Python program to get the volume", "Write a Python program to calculate the sum of three", "Python program to get the difference between a given number", ": abs() Expected Result : abs(number) -> number Return the", "given string where \"Is\" has been added to the front.", "the front. If the given string already begins with \"Is\"", "maths,input function 8. Write a Python program to calculate the", "a given string where \"Is\" has been added to the", "get the volume of a sphere with radius 6. Tools:", "to get a string which is n (non-negative integer) copies", "a number is within 100 of 1000 or 2000. Tools:", "2), (2014, 7, 11) Expected output : 9 days Tools:", "the given string already begins with \"Is\" then return the", "within 100 of 1000 or 2000. Tools: maths,input function 8.", "This is a ....... multi-line heredoc string --------> example Tools:", "a new string from a given string where \"Is\" has", "program to print the calendar of a given month and", "the argument. Tools: help function 2. Write a Python program", "a string that you \"don't\" have to escape This is", "function, slicing ======= \"\"\" 1. Write a Python program to", "function 2. Write a Python program to print the calendar", "values are equal then return three times of their sum.", "(non-negative integer) copies of a given string. Tools: input function,", "1. Write a Python program to print the documents (syntax,", "\"don't\" have to escape This is a ....... multi-line heredoc", "function, input function, math 7. Write a Python program to", "to get a new string from a given string where", "multi-line heredoc string --------> example Tools: string formating 4. Write", "Write a Python program to print the calendar of a", "get the difference between a given number and 17, if", "module. 3. Write a Python program to print the following", "greater than 17 return double the absolute difference. Tools: abs", "difference. Tools: abs function, input function, math 7. Write a", "string that you \"don't\" have to escape This is a", "2000. Tools: maths,input function 8. Write a Python program to", "sum of three given numbers, if the values are equal", "equal then return three times of their sum. Tools: math,", "Write a Python program to print the documents (syntax, description", "string --------> example Tools: string formating 4. Write a Python", "input function, math 6. Write a Python program to get", "a string which is n (non-negative integer) copies of a", "program to test whether a number is within 100 of", "are equal then return three times of their sum. Tools:", "If the given string already begins with \"Is\" then return", "9 days Tools: Datetime module, timedelta module 5. Write a", "8. Write a Python program to calculate the sum of", "a ....... multi-line heredoc string --------> example Tools: string formating", ": (2014, 7, 2), (2014, 7, 11) Expected output :", ": abs(number) -> number Return the absolute value of the", "a sphere with radius 6. Tools: input function, math 6.", "program to get the volume of a sphere with radius", "escape This is a ....... multi-line heredoc string --------> example", "to get the difference between a given number and 17,", "Python program to get a new string from a given", "input function, math 7. Write a Python program to test", "function : abs() Expected Result : abs(number) -> number Return", "7. Write a Python program to test whether a number", "abs function, input function, math 7. Write a Python program", "added to the front. If the given string already begins", "2. Write a Python program to print the calendar of", "math 7. Write a Python program to test whether a", "string. Tools: input function, slicing ======= \"\"\" 1. Write a", "Sample string : a string that you \"don't\" have to", "copies of a given string. Tools: input function, slicing =======", "with \"Is\" then return the string unchanged. Tools: input function,", "get a string which is n (non-negative integer) copies of", "here document. Sample string : a string that you \"don't\"", "have to escape This is a ....... multi-line heredoc string", "of their sum. Tools: math, input function 9. Write a", "formating 10. Write a Python program to get a string", "number of days between two dates. Sample dates : (2014,", "radius 6. Tools: input function, math 6. Write a Python", "a given string. Tools: input function, slicing >>>>>>> f4444ec0d72c645d12694e90df7429456db0611c \"\"\"", "Write a Python program to test whether a number is", "3. Write a Python program to print the following here", "program to print the following here document. Sample string :", "you \"don't\" have to escape This is a ....... multi-line", "function(s). Sample function : abs() Expected Result : abs(number) ->", "7, 11) Expected output : 9 days Tools: Datetime module,", "of a given month and year. Tools: Use 'calendar' module.", "description etc.) of Python built-in function(s). Sample function : abs()", "program to get a new string from a given string", "module, timedelta module 5. Write a Python program to get", "three given numbers, if the values are equal then return", "--------> example Tools: string formating 4. Write a Python program", "a Python program to get the volume of a sphere", "Tools: help function 2. Write a Python program to print", "program to get a string which is n (non-negative integer)", "string which is n (non-negative integer) copies of a given", "Write a Python program to get the difference between a", "and year. Tools: Use 'calendar' module. 3. Write a Python", "integer) copies of a given string. Tools: input function, slicing", "the absolute difference. Tools: abs function, input function, math 7.", "if the number is greater than 17 return double the", "a given string. Tools: input function, slicing ======= \"\"\" 1.", "4. Write a Python program to calculate number of days", "to print the following here document. Sample string : a", "given number and 17, if the number is greater than", "to calculate number of days between two dates. Sample dates", "Tools: Datetime module, timedelta module 5. Write a Python program", "already begins with \"Is\" then return the string unchanged. Tools:", "to calculate the sum of three given numbers, if the", "between two dates. Sample dates : (2014, 7, 2), (2014,", "Python program to test whether a number is within 100", "sphere with radius 6. Tools: input function, math 6. Write", "to test whether a number is within 100 of 1000", "the documents (syntax, description etc.) of Python built-in function(s). Sample", "the calendar of a given month and year. Tools: Use", "Sample dates : (2014, 7, 2), (2014, 7, 11) Expected", "function, math 6. Write a Python program to get the", "between a given number and 17, if the number is", "the number is greater than 17 return double the absolute", "-> number Return the absolute value of the argument. Tools:", "test whether a number is within 100 of 1000 or", "given string. Tools: input function, slicing ======= \"\"\" 1. Write", "17, if the number is greater than 17 return double", "Tools: input function, slicing ======= \"\"\" 1. Write a Python", "input function, slicing ======= \"\"\" 1. Write a Python program", "example Tools: string formating 4. Write a Python program to", "Expected Result : abs(number) -> number Return the absolute value", "of 1000 or 2000. Tools: maths,input function 8. Write a", "string formating 4. Write a Python program to calculate number", "a Python program to print the documents (syntax, description etc.)", "print the documents (syntax, description etc.) of Python built-in function(s).", "a Python program to print the calendar of a given", "that you \"don't\" have to escape This is a .......", "help function 2. Write a Python program to print the", "is a ....... multi-line heredoc string --------> example Tools: string", "two dates. Sample dates : (2014, 7, 2), (2014, 7,", "their sum. Tools: math, input function 9. Write a Python", "abs(number) -> number Return the absolute value of the argument.", "sum. Tools: math, input function 9. Write a Python program", ": a string that you \"don't\" have to escape This", "Expected output : 9 days Tools: Datetime module, timedelta module", "output : 9 days Tools: Datetime module, timedelta module 5.", "input function, string formating 10. Write a Python program to", "dates : (2014, 7, 2), (2014, 7, 11) Expected output", "which is n (non-negative integer) copies of a given string.", "100 of 1000 or 2000. Tools: maths,input function 8. Write", "three times of their sum. Tools: math, input function 9.", "been added to the front. If the given string already", "is within 100 of 1000 or 2000. Tools: maths,input function", "a Python program to calculate number of days between two", "from a given string where \"Is\" has been added to", "Return the absolute value of the argument. Tools: help function", "is n (non-negative integer) copies of a given string. Tools:", "to the front. If the given string already begins with", "function 9. Write a Python program to get a new", "front. If the given string already begins with \"Is\" then", "Use 'calendar' module. 3. Write a Python program to print", "timedelta module 5. Write a Python program to get the", "print the calendar of a given month and year. Tools:", "Python program to print the calendar of a given month", "etc.) of Python built-in function(s). Sample function : abs() Expected", "of a given string. Tools: input function, slicing ======= \"\"\"", "dates. Sample dates : (2014, 7, 2), (2014, 7, 11)", "'calendar' module. 3. Write a Python program to print the", "a Python program to print the following here document. Sample", "absolute difference. Tools: abs function, input function, math 7. Write", "with radius 6. Tools: input function, math 6. Write a", "the string unchanged. Tools: input function, string formating 10. Write", "10. Write a Python program to get a string which", "value of the argument. Tools: help function 2. Write a", "is greater than 17 return double the absolute difference. Tools:", "6. Tools: input function, math 6. Write a Python program", "the difference between a given number and 17, if the", "whether a number is within 100 of 1000 or 2000.", "(2014, 7, 2), (2014, 7, 11) Expected output : 9", "\"Is\" then return the string unchanged. Tools: input function, string", "a Python program to get a string which is n", "a Python program to calculate the sum of three given", "Tools: Use 'calendar' module. 3. Write a Python program to", "Result : abs(number) -> number Return the absolute value of", "formating 4. Write a Python program to calculate number of", "....... multi-line heredoc string --------> example Tools: string formating 4.", "then return the string unchanged. Tools: input function, string formating", "======= \"\"\" 1. Write a Python program to print the", "<<<<<<< HEAD \"\"\" 1. Write a Python program to print", "built-in function(s). Sample function : abs() Expected Result : abs(number)", "the absolute value of the argument. Tools: help function 2.", "then return three times of their sum. Tools: math, input", "has been added to the front. If the given string", "input function 9. Write a Python program to get a", "numbers, if the values are equal then return three times", "Write a Python program to get the volume of a", "Python built-in function(s). Sample function : abs() Expected Result :", "Python program to print the following here document. Sample string", "times of their sum. Tools: math, input function 9. Write", "to escape This is a ....... multi-line heredoc string -------->", "Datetime module, timedelta module 5. Write a Python program to", "Sample function : abs() Expected Result : abs(number) -> number", "of a sphere with radius 6. Tools: input function, math", ": 9 days Tools: Datetime module, timedelta module 5. Write" ]
[ "self.__name__ == 'base': assert bases == (object,) assert kwds ==", "['base', 'derived'] if self.__name__ == 'base': assert bases == (object,)", "{'arg1': False, 'arg2': True} return super().__new__(metacls, name, bases, attributes) def", "aïvázis # orthologue # (c) 1998-2021 all rights reserved #", "all rights reserved # # \"\"\" When a metaclass understands", "super().__new__(metacls, name, bases, attributes) def __init__(self, name, bases, attributes, **kwds):", "class base(object, metaclass=meta, arg1=True, arg2=False): def __init__(self, **kwds): assert type(self).__name__", "'derived'] if self.__name__ == 'base': assert bases == (object,) assert", "assert kwds == {'arg1': False, 'arg2': True} return super().__new__(metacls, name,", "if self.__name__ == 'derived': assert bases == (base,) assert kwds", "# orthologue # (c) 1998-2021 all rights reserved # #", "True, 'arg2': False} if name == 'derived': assert bases ==", "'arg2': True} return super().__new__(metacls, name, bases, attributes) def __init__(self, name,", "== (base,) assert kwds == {'arg1': False, 'arg2': True} super().__init__(name,", "'meta' assert name in ['base', 'derived'] if name == 'base':", "-*- coding: utf-8 -*- # # <NAME>. aïvázis # orthologue", "the extra keywords that can be passed during class declaration,", "False, 'arg2': True} return super().__prepare__(name, bases) def __new__(metacls, name, bases,", "self.__name__ in ['base', 'derived'] if self.__name__ == 'base': assert bases", "def __prepare__(metacls, name, bases, **kwds): assert metacls.__name__ == 'meta' assert", "arg1=True, arg2=False): def __init__(self, **kwds): assert type(self).__name__ == 'base' assert", "understands the extra keywords that can be passed during class", "{} return def test(): b = base() d = derived()", "bases, attributes, **kwds): assert self.__name__ in ['base', 'derived'] if self.__name__", "class declaration, it has to override all these to accommodate", "bases, attributes) def __init__(self, name, bases, attributes, **kwds): assert self.__name__", "type(self).__name__ == 'derived' assert kwds == {} return def test():", "kwds == {'arg1': False, 'arg2': True} super().__init__(name, bases, attributes) return", "extra keywords that can be passed during class declaration, it", "'arg2': False} if name == 'derived': assert bases == (base,)", "'derived'] if name == 'base': assert bases == (object,) assert", "True} super().__init__(name, bases, attributes) return class base(object, metaclass=meta, arg1=True, arg2=False):", "class derived(base, arg1=False, arg2=True): def __init__(self, **kwds): assert type(self).__name__ ==", "bases, attributes, **kwds): assert metacls.__name__ == 'meta' assert name in", "it has to override all these to accommodate the change", "# main if __name__ == \"__main__\": test() # end of", "return super().__prepare__(name, bases) def __new__(metacls, name, bases, attributes, **kwds): assert", "bases) def __new__(metacls, name, bases, attributes, **kwds): assert metacls.__name__ ==", "derived(base, arg1=False, arg2=True): def __init__(self, **kwds): assert type(self).__name__ == 'derived'", "has to override all these to accommodate the change in", "assert kwds == {'arg1': False, 'arg2': True} return super().__prepare__(name, bases)", "'arg2': False} if self.__name__ == 'derived': assert bases == (base,)", "signature \"\"\" class meta(type): @classmethod def __prepare__(metacls, name, bases, **kwds):", "name, bases, attributes) def __init__(self, name, bases, attributes, **kwds): assert", "__init__(self, name, bases, attributes, **kwds): assert self.__name__ in ['base', 'derived']", "in signature \"\"\" class meta(type): @classmethod def __prepare__(metacls, name, bases,", "__prepare__(metacls, name, bases, **kwds): assert metacls.__name__ == 'meta' assert name", "metacls.__name__ == 'meta' assert name in ['base', 'derived'] if name", "__init__(self, **kwds): assert type(self).__name__ == 'derived' assert kwds == {}", "accommodate the change in signature \"\"\" class meta(type): @classmethod def", "@classmethod def __prepare__(metacls, name, bases, **kwds): assert metacls.__name__ == 'meta'", "== {'arg1': False, 'arg2': True} super().__init__(name, bases, attributes) return class", "assert type(self).__name__ == 'base' assert kwds == {} return class", "== {'arg1': True, 'arg2': False} if self.__name__ == 'derived': assert", "'derived' assert kwds == {} return def test(): b =", "test(): b = base() d = derived() return # main", "# # \"\"\" When a metaclass understands the extra keywords", "assert self.__name__ in ['base', 'derived'] if self.__name__ == 'base': assert", "all these to accommodate the change in signature \"\"\" class", "meta(type): @classmethod def __prepare__(metacls, name, bases, **kwds): assert metacls.__name__ ==", "== {} return class derived(base, arg1=False, arg2=True): def __init__(self, **kwds):", "== {'arg1': False, 'arg2': True} return super().__new__(metacls, name, bases, attributes)", "in ['base', 'derived'] if name == 'base': assert bases ==", "reserved # # \"\"\" When a metaclass understands the extra", "in ['base', 'derived'] if self.__name__ == 'base': assert bases ==", "metaclass understands the extra keywords that can be passed during", "True, 'arg2': False} if self.__name__ == 'derived': assert bases ==", "derived() return # main if __name__ == \"__main__\": test() #", "\"\"\" When a metaclass understands the extra keywords that can", "bases, **kwds): assert metacls.__name__ == 'meta' assert name in ['base',", "return # main if __name__ == \"__main__\": test() # end", "**kwds): assert metacls.__name__ == 'meta' assert name in ['base', 'derived']", "== (object,) assert kwds == {'arg1': True, 'arg2': False} if", "can be passed during class declaration, it has to override", "that can be passed during class declaration, it has to", "attributes, **kwds): assert metacls.__name__ == 'meta' assert name in ['base',", "return super().__new__(metacls, name, bases, attributes) def __init__(self, name, bases, attributes,", "(c) 1998-2021 all rights reserved # # \"\"\" When a", "kwds == {'arg1': True, 'arg2': False} if self.__name__ == 'derived':", "utf-8 -*- # # <NAME>. aïvázis # orthologue # (c)", "rights reserved # # \"\"\" When a metaclass understands the", "def __new__(metacls, name, bases, attributes, **kwds): assert metacls.__name__ == 'meta'", "name, bases, attributes, **kwds): assert metacls.__name__ == 'meta' assert name", "== 'meta' assert name in ['base', 'derived'] if name ==", "self.__name__ == 'derived': assert bases == (base,) assert kwds ==", "-*- # # <NAME>. aïvázis # orthologue # (c) 1998-2021", "super().__prepare__(name, bases) def __new__(metacls, name, bases, attributes, **kwds): assert metacls.__name__", "assert bases == (base,) assert kwds == {'arg1': False, 'arg2':", "# -*- coding: utf-8 -*- # # <NAME>. aïvázis #", "attributes, **kwds): assert self.__name__ in ['base', 'derived'] if self.__name__ ==", "== {'arg1': True, 'arg2': False} if name == 'derived': assert", "When a metaclass understands the extra keywords that can be", "kwds == {'arg1': False, 'arg2': True} return super().__new__(metacls, name, bases,", "arg2=False): def __init__(self, **kwds): assert type(self).__name__ == 'base' assert kwds", "bases == (base,) assert kwds == {'arg1': False, 'arg2': True}", "type(self).__name__ == 'base' assert kwds == {} return class derived(base,", "super().__init__(name, bases, attributes) return class base(object, metaclass=meta, arg1=True, arg2=False): def", "arg1=False, arg2=True): def __init__(self, **kwds): assert type(self).__name__ == 'derived' assert", "= base() d = derived() return # main if __name__", "assert bases == (object,) assert kwds == {'arg1': True, 'arg2':", "coding: utf-8 -*- # # <NAME>. aïvázis # orthologue #", "if name == 'derived': assert bases == (base,) assert kwds", "orthologue # (c) 1998-2021 all rights reserved # # \"\"\"", "'arg2': True} super().__init__(name, bases, attributes) return class base(object, metaclass=meta, arg1=True,", "**kwds): assert type(self).__name__ == 'derived' assert kwds == {} return", "{'arg1': False, 'arg2': True} return super().__prepare__(name, bases) def __new__(metacls, name,", "attributes) return class base(object, metaclass=meta, arg1=True, arg2=False): def __init__(self, **kwds):", "declaration, it has to override all these to accommodate the", "1998-2021 all rights reserved # # \"\"\" When a metaclass", "(base,) assert kwds == {'arg1': False, 'arg2': True} return super().__new__(metacls,", "False, 'arg2': True} super().__init__(name, bases, attributes) return class base(object, metaclass=meta,", "keywords that can be passed during class declaration, it has", "'base' assert kwds == {} return class derived(base, arg1=False, arg2=True):", "= derived() return # main if __name__ == \"__main__\": test()", "change in signature \"\"\" class meta(type): @classmethod def __prepare__(metacls, name,", "False, 'arg2': True} return super().__new__(metacls, name, bases, attributes) def __init__(self,", "== {} return def test(): b = base() d =", "name in ['base', 'derived'] if name == 'base': assert bases", "False} if self.__name__ == 'derived': assert bases == (base,) assert", "assert kwds == {'arg1': True, 'arg2': False} if self.__name__ ==", "'derived': assert bases == (base,) assert kwds == {'arg1': False,", "True} return super().__new__(metacls, name, bases, attributes) def __init__(self, name, bases,", "the change in signature \"\"\" class meta(type): @classmethod def __prepare__(metacls,", "assert type(self).__name__ == 'derived' assert kwds == {} return def", "<filename>tests/python/metaclass_inheritance.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- # #", "b = base() d = derived() return # main if", "name, bases, **kwds): assert metacls.__name__ == 'meta' assert name in", "{'arg1': False, 'arg2': True} super().__init__(name, bases, attributes) return class base(object,", "['base', 'derived'] if name == 'base': assert bases == (object,)", "main if __name__ == \"__main__\": test() # end of file", "\"\"\" class meta(type): @classmethod def __prepare__(metacls, name, bases, **kwds): assert", "kwds == {} return def test(): b = base() d", "assert kwds == {} return def test(): b = base()", "base(object, metaclass=meta, arg1=True, arg2=False): def __init__(self, **kwds): assert type(self).__name__ ==", "# (c) 1998-2021 all rights reserved # # \"\"\" When", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # <NAME>.", "name == 'derived': assert bases == (base,) assert kwds ==", "if self.__name__ == 'base': assert bases == (object,) assert kwds", "== 'derived' assert kwds == {} return def test(): b", "bases == (object,) assert kwds == {'arg1': True, 'arg2': False}", "{} return class derived(base, arg1=False, arg2=True): def __init__(self, **kwds): assert", "False} if name == 'derived': assert bases == (base,) assert", "to override all these to accommodate the change in signature", "== 'base' assert kwds == {} return class derived(base, arg1=False,", "assert kwds == {} return class derived(base, arg1=False, arg2=True): def", "(base,) assert kwds == {'arg1': False, 'arg2': True} super().__init__(name, bases,", "**kwds): assert type(self).__name__ == 'base' assert kwds == {} return", "passed during class declaration, it has to override all these", "python3 # -*- coding: utf-8 -*- # # <NAME>. aïvázis", "assert kwds == {'arg1': True, 'arg2': False} if name ==", "(object,) assert kwds == {'arg1': True, 'arg2': False} if name", "kwds == {} return class derived(base, arg1=False, arg2=True): def __init__(self,", "name, bases, attributes, **kwds): assert self.__name__ in ['base', 'derived'] if", "assert metacls.__name__ == 'meta' assert name in ['base', 'derived'] if", "metaclass=meta, arg1=True, arg2=False): def __init__(self, **kwds): assert type(self).__name__ == 'base'", "== 'derived': assert bases == (base,) assert kwds == {'arg1':", "True} return super().__prepare__(name, bases) def __new__(metacls, name, bases, attributes, **kwds):", "'arg2': True} return super().__prepare__(name, bases) def __new__(metacls, name, bases, attributes,", "def __init__(self, name, bases, attributes, **kwds): assert self.__name__ in ['base',", "assert name in ['base', 'derived'] if name == 'base': assert", "kwds == {'arg1': False, 'arg2': True} return super().__prepare__(name, bases) def", "def test(): b = base() d = derived() return #", "# # <NAME>. aïvázis # orthologue # (c) 1998-2021 all", "attributes) def __init__(self, name, bases, attributes, **kwds): assert self.__name__ in", "__new__(metacls, name, bases, attributes, **kwds): assert metacls.__name__ == 'meta' assert", "**kwds): assert self.__name__ in ['base', 'derived'] if self.__name__ == 'base':", "bases, attributes) return class base(object, metaclass=meta, arg1=True, arg2=False): def __init__(self,", "== 'base': assert bases == (object,) assert kwds == {'arg1':", "kwds == {'arg1': True, 'arg2': False} if name == 'derived':", "# <NAME>. aïvázis # orthologue # (c) 1998-2021 all rights", "to accommodate the change in signature \"\"\" class meta(type): @classmethod", "these to accommodate the change in signature \"\"\" class meta(type):", "a metaclass understands the extra keywords that can be passed", "assert kwds == {'arg1': False, 'arg2': True} super().__init__(name, bases, attributes)", "d = derived() return # main if __name__ == \"__main__\":", "def __init__(self, **kwds): assert type(self).__name__ == 'derived' assert kwds ==", "(object,) assert kwds == {'arg1': True, 'arg2': False} if self.__name__", "__init__(self, **kwds): assert type(self).__name__ == 'base' assert kwds == {}", "return def test(): b = base() d = derived() return", "base() d = derived() return # main if __name__ ==", "if name == 'base': assert bases == (object,) assert kwds", "return class base(object, metaclass=meta, arg1=True, arg2=False): def __init__(self, **kwds): assert", "{'arg1': True, 'arg2': False} if name == 'derived': assert bases", "{'arg1': True, 'arg2': False} if self.__name__ == 'derived': assert bases", "(base,) assert kwds == {'arg1': False, 'arg2': True} return super().__prepare__(name,", "<NAME>. aïvázis # orthologue # (c) 1998-2021 all rights reserved", "def __init__(self, **kwds): assert type(self).__name__ == 'base' assert kwds ==", "'base': assert bases == (object,) assert kwds == {'arg1': True,", "# \"\"\" When a metaclass understands the extra keywords that", "return class derived(base, arg1=False, arg2=True): def __init__(self, **kwds): assert type(self).__name__", "arg2=True): def __init__(self, **kwds): assert type(self).__name__ == 'derived' assert kwds", "be passed during class declaration, it has to override all", "== (base,) assert kwds == {'arg1': False, 'arg2': True} return", "override all these to accommodate the change in signature \"\"\"", "== {'arg1': False, 'arg2': True} return super().__prepare__(name, bases) def __new__(metacls,", "class meta(type): @classmethod def __prepare__(metacls, name, bases, **kwds): assert metacls.__name__", "name == 'base': assert bases == (object,) assert kwds ==", "during class declaration, it has to override all these to" ]
[ "(b1-b2)**2) def chroma(img, key, threshold): w, h = img.size() for", "c1 r2, g2, b2 = c2 return math.sqrt((r1-r2)**2 + (g1-g2)**2", "b2 = c2 return math.sqrt((r1-r2)**2 + (g1-g2)**2 + (b1-b2)**2) def", "= img.size() for y in range(h): for x in range(w):", "x in range(w): p = img.get(x, y) if dist(p, key)", "img.get(x, y) if dist(p, key) < threshold: img.set(x, y, Color.yellow)", "w, h = img.size() for y in range(h): for x", "def chroma(img, key, threshold): w, h = img.size() for y", "for x in range(w): p = img.get(x, y) if dist(p,", "+ (g1-g2)**2 + (b1-b2)**2) def chroma(img, key, threshold): w, h", "c2 return math.sqrt((r1-r2)**2 + (g1-g2)**2 + (b1-b2)**2) def chroma(img, key,", "+ (b1-b2)**2) def chroma(img, key, threshold): w, h = img.size()", "import * import math def dist(c1, c2): r1, g1, b1", "dist(p, key) < threshold: img.set(x, y, Color.yellow) statue = load_picture(\"photos/statue1.jpg\")", "g1, b1 = c1 r2, g2, b2 = c2 return", "threshold): w, h = img.size() for y in range(h): for", "dist(c1, c2): r1, g1, b1 = c1 r2, g2, b2", "c2): r1, g1, b1 = c1 r2, g2, b2 =", "= c1 r2, g2, b2 = c2 return math.sqrt((r1-r2)**2 +", "= c2 return math.sqrt((r1-r2)**2 + (g1-g2)**2 + (b1-b2)**2) def chroma(img,", "* import math def dist(c1, c2): r1, g1, b1 =", "img.set(x, y, Color.yellow) statue = load_picture(\"photos/statue1.jpg\") chroma(statue, (41, 75, 146),", "y, Color.yellow) statue = load_picture(\"photos/statue1.jpg\") chroma(statue, (41, 75, 146), 70)", "import math def dist(c1, c2): r1, g1, b1 = c1", "y in range(h): for x in range(w): p = img.get(x,", "key) < threshold: img.set(x, y, Color.yellow) statue = load_picture(\"photos/statue1.jpg\") chroma(statue,", "b1 = c1 r2, g2, b2 = c2 return math.sqrt((r1-r2)**2", "from cs1media import * import math def dist(c1, c2): r1,", "cs1media import * import math def dist(c1, c2): r1, g1,", "threshold: img.set(x, y, Color.yellow) statue = load_picture(\"photos/statue1.jpg\") chroma(statue, (41, 75,", "range(w): p = img.get(x, y) if dist(p, key) < threshold:", "def dist(c1, c2): r1, g1, b1 = c1 r2, g2,", "r2, g2, b2 = c2 return math.sqrt((r1-r2)**2 + (g1-g2)**2 +", "math.sqrt((r1-r2)**2 + (g1-g2)**2 + (b1-b2)**2) def chroma(img, key, threshold): w,", "if dist(p, key) < threshold: img.set(x, y, Color.yellow) statue =", "math def dist(c1, c2): r1, g1, b1 = c1 r2,", "range(h): for x in range(w): p = img.get(x, y) if", "p = img.get(x, y) if dist(p, key) < threshold: img.set(x,", "r1, g1, b1 = c1 r2, g2, b2 = c2", "< threshold: img.set(x, y, Color.yellow) statue = load_picture(\"photos/statue1.jpg\") chroma(statue, (41,", "return math.sqrt((r1-r2)**2 + (g1-g2)**2 + (b1-b2)**2) def chroma(img, key, threshold):", "key, threshold): w, h = img.size() for y in range(h):", "h = img.size() for y in range(h): for x in", "chroma(img, key, threshold): w, h = img.size() for y in", "for y in range(h): for x in range(w): p =", "(g1-g2)**2 + (b1-b2)**2) def chroma(img, key, threshold): w, h =", "in range(w): p = img.get(x, y) if dist(p, key) <", "= img.get(x, y) if dist(p, key) < threshold: img.set(x, y,", "g2, b2 = c2 return math.sqrt((r1-r2)**2 + (g1-g2)**2 + (b1-b2)**2)", "img.size() for y in range(h): for x in range(w): p", "y) if dist(p, key) < threshold: img.set(x, y, Color.yellow) statue", "in range(h): for x in range(w): p = img.get(x, y)", "Color.yellow) statue = load_picture(\"photos/statue1.jpg\") chroma(statue, (41, 75, 146), 70) statue.show()" ]
[ "# COMPUTE THE LIGHT CURVE # print(\"Computing light curve...\") time", "spots at any one time was %d.' % nspot) return", "uniform distribution of spot longitudes lon = scipy.rand(nspot_tot) * 2", "scipy.exp(-(time - pk[i])**2 / 2. / decay[i]**2) area_tot += area", "+ lon[i] phase0 = 2 * scipy.pi * time /", "/ scale_fac / ff res0 = scipy.array([nspot_eff.mean(), ff, amp_eff]) res1", "period[i] + lon[i] phase0 = 2 * scipy.pi * time", "was %.4f.' % ff) # print('Desired amplitude was %.4f, actual", "(amp, amp_eff)) # print('Desired number of spots at any one", "print('Used %d spots in total over %d rotation periods.' %", "Flux dF_tot -= area * mu dF_tot0 -= area *", "amax[i] else: area = amax[i] * \\ scipy.exp(-(time - pk[i])**2", "scipy.ones(nspot_tot) * tau # uniform distribution of spot peak times", "-= area * mu0 amp_eff = dF_tot.max()-dF_tot.min() nspot_eff = area_tot", "and polar rotation period (unit of time is equatorial rotation", "* time / period[i] + lon[i] phase0 = 2 *", "scipy.ones(nspot_tot) * ff * scale_fac # all spots have the", "diffrot = fractional difference between equatorial and polar rotation period", "periods.' % (nspot_tot, dur)) # print('Mean filling factor of individual", "decay = scipy.ones(nspot_tot) * tau # uniform distribution of spot", "extra) - extra # COMPUTE THE LIGHT CURVE # print(\"Computing", "- extra # COMPUTE THE LIGHT CURVE # print(\"Computing light", "range(nspot_tot): # Spot area if (pk[i] == 0) + (decay[i]", "* ff * scale_fac # all spots have the evolution", "period = ((scipy.sin(lat) - 0.5) * diffrot + 1.0 )", "* scipy.sin(lat[i]) + \\ scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase) mu0", "decay[i]**2) area_tot += area # Fore-shortening phase = 2 *", "simplified version of the class-based routines in spot_model.py. It generates", ") * p period0 = scipy.ones(nspot_tot) * p # all", "import scipy.io import pylab import numpy import glob import pyfits", "is equatorial rotation period)''' # print('Period = ', p) dur", "= amp / scipy.sqrt(nspot) scale_fac = 1 amax = scipy.ones(nspot_tot)", "# spot rotation rate optionally depends on latitude period =", "time res1[1,:] = area_tot res1[2,:] = dF_tot res1[3,:] = dF_tot0", "mklc(t, nspot=200, incl=(scipy.pi)*5./12., amp=1., tau=30.5, p=10.0): diffrot = 0. '''", "p=10.0): diffrot = 0. ''' This is a simplified version", "spot rotation rate optionally depends on latitude period = ((scipy.sin(lat)", "amplitude tau = characteristic spot life-time diffrot = fractional difference", "spots was %.4f.' % ff) # print('Desired amplitude was %.4f,", "2 / tau) # uniform distribution of spot longitudes lon", "amax = scipy.ones(nspot_tot) * ff * scale_fac # all spots", "scale_fac = 1 amax = scipy.ones(nspot_tot) * ff * scale_fac", "light curves for dark, point like spots with no limb-darkening.", "It generates a light curves for dark, point like spots", "spot peak times # start well before and end well", "numpy as np import scipy import scipy.io import pylab import", "any one time amp = desired light curve amplitude tau", "times # start well before and end well after time-series", "spot life-time diffrot = fractional difference between equatorial and polar", "dur / 2 / tau) # uniform distribution of spot", "3 * decay.max() pk = scipy.rand(nspot_tot) * (dur + 2", "(crude estimate of) total number of spots needed during entire", "extra # COMPUTE THE LIGHT CURVE # print(\"Computing light curve...\")", "# print('Desired amplitude was %.4f, actual amplitude was %.4f.' \\", "scipy.pi # distribution of spot latitudes uniform in sin(latitude) lat", "area * mu0 amp_eff = dF_tot.max()-dF_tot.min() nspot_eff = area_tot /", "of spots at any one time was %d.' % nspot)", "between equatorial and polar rotation period (unit of time is", "total number of spots needed during entire # time-series nspot_tot", "(max(t) - min(t)) # (crude estimate of) total number of", "# uniform distribution of spot peak times # start well", "(dur + 2 * extra) - extra # COMPUTE THE", "% (amp, amp_eff)) # print('Desired number of spots at any", "period0[i] + lon[i] mu = scipy.cos(incl) * scipy.sin(lat[i]) + \\", "* decay.max() pk = scipy.rand(nspot_tot) * (dur + 2 *", "= desired number of spots present on star at any", "+ (decay[i] == 0): area = scipy.ones_like(time) * amax[i] else:", "amp_eff = dF_tot.max()-dF_tot.min() nspot_eff = area_tot / scale_fac / ff", "scale_fac / ff res0 = scipy.array([nspot_eff.mean(), ff, amp_eff]) res1 =", "* scipy.pi # distribution of spot latitudes uniform in sin(latitude)", "estimate of) total number of spots needed during entire #", "factor needed per spot ff = amp / scipy.sqrt(nspot) scale_fac", "rotation period (unit of time is equatorial rotation period)''' #", "= scipy.zeros_like(time) dF_tot = scipy.zeros_like(time) dF_tot0 = scipy.zeros_like(time) # add", "= (max(t) - min(t)) # (crude estimate of) total number", "0): area = scipy.ones_like(time) * amax[i] else: area = amax[i]", "timescale decay = scipy.ones(nspot_tot) * tau # uniform distribution of", "of the class-based routines in spot_model.py. It generates a light", "< 0] = 0.0 mu0[mu0 < 0] = 0.0 #", "= 0.0 # Flux dF_tot -= area * mu dF_tot0", "# print('Period = ', p) dur = (max(t) - min(t))", "== 0) + (decay[i] == 0): area = scipy.ones_like(time) *", "= 2 * scipy.pi * time / period[i] + lon[i]", "p) dur = (max(t) - min(t)) # (crude estimate of)", "at any one time amp = desired light curve amplitude", "%d spots in total over %d rotation periods.' % (nspot_tot,", "spots for i in range(nspot_tot): # Spot area if (pk[i]", "+ \\ scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase) mu0 = scipy.cos(incl)", "amp_eff]) res1 = scipy.zeros((4, len(time))) res1[0,:] = time res1[1,:] =", "* \\ scipy.exp(-(time - pk[i])**2 / 2. / decay[i]**2) area_tot", "import numpy as np import scipy import scipy.io import pylab", "uniform distribution of spot peak times # start well before", "dur = (max(t) - min(t)) # (crude estimate of) total", "was %.4f, actual amplitude was %.4f.' \\ # % (amp,", "- min(t)) # (crude estimate of) total number of spots", "tau=30.5, p=10.0): diffrot = 0. ''' This is a simplified", "= numpy.array(t - min(t)) area_tot = scipy.zeros_like(time) dF_tot = scipy.zeros_like(time)", "* (dur + 2 * extra) - extra # COMPUTE", "mu[mu < 0] = 0.0 mu0[mu0 < 0] = 0.0", "spots needed during entire # time-series nspot_tot = int(nspot *", "pylab import numpy import glob import pyfits def mklc(t, nspot=200,", "peak times # start well before and end well after", "end well after time-series limits (to # avoid edge effects)", "spots have the same maximum area # (crude estimate of)", "* scale_fac # all spots have the evolution timescale decay", "glob import pyfits def mklc(t, nspot=200, incl=(scipy.pi)*5./12., amp=1., tau=30.5, p=10.0):", "life-time diffrot = fractional difference between equatorial and polar rotation", "area_tot / scale_fac / ff res0 = scipy.array([nspot_eff.mean(), ff, amp_eff])", "area = amax[i] * \\ scipy.exp(-(time - pk[i])**2 / 2.", "area_tot += area # Fore-shortening phase = 2 * scipy.pi", "equatorial and polar rotation period (unit of time is equatorial", "res1[3,:] = dF_tot0 # print('Used %d spots in total over", "spots present on star at any one time amp =", "time-series limits (to # avoid edge effects) extra = 3", "with no limb-darkening. Parameters: nspot = desired number of spots", "= area_tot res1[2,:] = dF_tot res1[3,:] = dF_tot0 # print('Used", "This is a simplified version of the class-based routines in", "a simplified version of the class-based routines in spot_model.py. It", "latitude period = ((scipy.sin(lat) - 0.5) * diffrot + 1.0", "add up the contributions of individual spots for i in", "* p # all spots have the same maximum area", "limb-darkening. Parameters: nspot = desired number of spots present on", "ff) # print('Desired amplitude was %.4f, actual amplitude was %.4f.'", "= scipy.ones(nspot_tot) * p # all spots have the same", "ff = amp / scipy.sqrt(nspot) scale_fac = 1 amax =", "= 0. ''' This is a simplified version of the", "scipy.sin(lat[i]) + \\ scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase) mu0 =", "import pylab import numpy import glob import pyfits def mklc(t,", "numpy import glob import pyfits def mklc(t, nspot=200, incl=(scipy.pi)*5./12., amp=1.,", "spots have the evolution timescale decay = scipy.ones(nspot_tot) * tau", "+ \\ scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase0) mu[mu < 0]", "time = numpy.array(t - min(t)) area_tot = scipy.zeros_like(time) dF_tot =", "res1[0,:] = time res1[1,:] = area_tot res1[2,:] = dF_tot res1[3,:]", "2 * extra) - extra # COMPUTE THE LIGHT CURVE", "polar rotation period (unit of time is equatorial rotation period)'''", "= characteristic spot life-time diffrot = fractional difference between equatorial", "* 2 * scipy.pi # distribution of spot latitudes uniform", "import scipy import scipy.io import pylab import numpy import glob", "diffrot + 1.0 ) * p period0 = scipy.ones(nspot_tot) *", "scipy.sin(lat[i]) + \\ scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase0) mu[mu <", "0.0 mu0[mu0 < 0] = 0.0 # Flux dF_tot -=", "CURVE # print(\"Computing light curve...\") time = numpy.array(t - min(t))", "1 amax = scipy.ones(nspot_tot) * ff * scale_fac # all", "period (unit of time is equatorial rotation period)''' # print('Period", "filling factor of individual spots was %.4f.' % ff) #", "amplitude was %.4f.' \\ # % (amp, amp_eff)) # print('Desired", "dark, point like spots with no limb-darkening. Parameters: nspot =", "amplitude was %.4f, actual amplitude was %.4f.' \\ # %", "decay.max() pk = scipy.rand(nspot_tot) * (dur + 2 * extra)", "* time / period0[i] + lon[i] mu = scipy.cos(incl) *", "longitudes lon = scipy.rand(nspot_tot) * 2 * scipy.pi # distribution", "%d rotation periods.' % (nspot_tot, dur)) # print('Mean filling factor", "no limb-darkening. Parameters: nspot = desired number of spots present", "generates a light curves for dark, point like spots with", "(unit of time is equatorial rotation period)''' # print('Period =", "= scipy.zeros_like(time) dF_tot0 = scipy.zeros_like(time) # add up the contributions", "= 1 amax = scipy.ones(nspot_tot) * ff * scale_fac #", "estimate of) filling factor needed per spot ff = amp", "0.5) * diffrot + 1.0 ) * p period0 =", "dF_tot0 -= area * mu0 amp_eff = dF_tot.max()-dF_tot.min() nspot_eff =", "and end well after time-series limits (to # avoid edge", "* scipy.sin(lat[i]) + \\ scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase0) mu[mu", "time / period0[i] + lon[i] mu = scipy.cos(incl) * scipy.sin(lat[i])", "p # all spots have the same maximum area #", "spot_model.py. It generates a light curves for dark, point like", "* mu0 amp_eff = dF_tot.max()-dF_tot.min() nspot_eff = area_tot / scale_fac", "* amax[i] else: area = amax[i] * \\ scipy.exp(-(time -", "scipy.zeros((4, len(time))) res1[0,:] = time res1[1,:] = area_tot res1[2,:] =", "fractional difference between equatorial and polar rotation period (unit of", "# print('Mean filling factor of individual spots was %.4f.' %", "of spot longitudes lon = scipy.rand(nspot_tot) * 2 * scipy.pi", "tau = characteristic spot life-time diffrot = fractional difference between", "rotation rate optionally depends on latitude period = ((scipy.sin(lat) -", "print('Mean filling factor of individual spots was %.4f.' % ff)", "= time res1[1,:] = area_tot res1[2,:] = dF_tot res1[3,:] =", "area * mu dF_tot0 -= area * mu0 amp_eff =", "# Spot area if (pk[i] == 0) + (decay[i] ==", "scipy.array([nspot_eff.mean(), ff, amp_eff]) res1 = scipy.zeros((4, len(time))) res1[0,:] = time", "one time amp = desired light curve amplitude tau =", "scipy.sqrt(nspot) scale_fac = 1 amax = scipy.ones(nspot_tot) * ff *", "phase = 2 * scipy.pi * time / period[i] +", "= scipy.ones(nspot_tot) * tau # uniform distribution of spot peak", "LIGHT CURVE # print(\"Computing light curve...\") time = numpy.array(t -", "- pk[i])**2 / 2. / decay[i]**2) area_tot += area #", "/ period0[i] + lon[i] mu = scipy.cos(incl) * scipy.sin(lat[i]) +", "scipy.arcsin(scipy.rand(nspot_tot)) # spot rotation rate optionally depends on latitude period", "dF_tot0 # print('Used %d spots in total over %d rotation", "print('Desired amplitude was %.4f, actual amplitude was %.4f.' \\ #", "spot longitudes lon = scipy.rand(nspot_tot) * 2 * scipy.pi #", "filling factor needed per spot ff = amp / scipy.sqrt(nspot)", "2. / decay[i]**2) area_tot += area # Fore-shortening phase =", "for i in range(nspot_tot): # Spot area if (pk[i] ==", "0.0 # Flux dF_tot -= area * mu dF_tot0 -=", "scipy.io import pylab import numpy import glob import pyfits def", "* dur / 2 / tau) # uniform distribution of", "amp = desired light curve amplitude tau = characteristic spot", "scipy import scipy.io import pylab import numpy import glob import", "edge effects) extra = 3 * decay.max() pk = scipy.rand(nspot_tot)", "Spot area if (pk[i] == 0) + (decay[i] == 0):", "amp=1., tau=30.5, p=10.0): diffrot = 0. ''' This is a", "1.0 ) * p period0 = scipy.ones(nspot_tot) * p #", "int(nspot * dur / 2 / tau) # uniform distribution", "as np import scipy import scipy.io import pylab import numpy", "dur)) # print('Mean filling factor of individual spots was %.4f.'", "uniform in sin(latitude) lat = scipy.arcsin(scipy.rand(nspot_tot)) # spot rotation rate", "area_tot = scipy.zeros_like(time) dF_tot = scipy.zeros_like(time) dF_tot0 = scipy.zeros_like(time) #", "scipy.cos(incl) * scipy.sin(lat[i]) + \\ scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase)", "evolution timescale decay = scipy.ones(nspot_tot) * tau # uniform distribution", "scipy.rand(nspot_tot) * (dur + 2 * extra) - extra #", "THE LIGHT CURVE # print(\"Computing light curve...\") time = numpy.array(t", "/ tau) # uniform distribution of spot longitudes lon =", "individual spots was %.4f.' % ff) # print('Desired amplitude was", "= dF_tot res1[3,:] = dF_tot0 # print('Used %d spots in", "distribution of spot latitudes uniform in sin(latitude) lat = scipy.arcsin(scipy.rand(nspot_tot))", "dF_tot.max()-dF_tot.min() nspot_eff = area_tot / scale_fac / ff res0 =", "area # Fore-shortening phase = 2 * scipy.pi * time", "any one time was %d.' % nspot) return res0, res1", "number of spots needed during entire # time-series nspot_tot =", "routines in spot_model.py. It generates a light curves for dark,", "period)''' # print('Period = ', p) dur = (max(t) -", "* scipy.pi * time / period0[i] + lon[i] mu =", "equatorial rotation period)''' # print('Period = ', p) dur =", "the contributions of individual spots for i in range(nspot_tot): #", "else: area = amax[i] * \\ scipy.exp(-(time - pk[i])**2 /", "present on star at any one time amp = desired", "res1 = scipy.zeros((4, len(time))) res1[0,:] = time res1[1,:] = area_tot", "/ ff res0 = scipy.array([nspot_eff.mean(), ff, amp_eff]) res1 = scipy.zeros((4,", "of spots present on star at any one time amp", "pyfits def mklc(t, nspot=200, incl=(scipy.pi)*5./12., amp=1., tau=30.5, p=10.0): diffrot =", "like spots with no limb-darkening. Parameters: nspot = desired number", "min(t)) area_tot = scipy.zeros_like(time) dF_tot = scipy.zeros_like(time) dF_tot0 = scipy.zeros_like(time)", "ff res0 = scipy.array([nspot_eff.mean(), ff, amp_eff]) res1 = scipy.zeros((4, len(time)))", "\\ scipy.exp(-(time - pk[i])**2 / 2. / decay[i]**2) area_tot +=", "# (crude estimate of) total number of spots needed during", "phase0 = 2 * scipy.pi * time / period0[i] +", "before and end well after time-series limits (to # avoid", "/ 2 / tau) # uniform distribution of spot longitudes", "# distribution of spot latitudes uniform in sin(latitude) lat =", "needed per spot ff = amp / scipy.sqrt(nspot) scale_fac =", "number of spots at any one time was %d.' %", "= scipy.arcsin(scipy.rand(nspot_tot)) # spot rotation rate optionally depends on latitude", "mu dF_tot0 -= area * mu0 amp_eff = dF_tot.max()-dF_tot.min() nspot_eff", "= fractional difference between equatorial and polar rotation period (unit", "for dark, point like spots with no limb-darkening. Parameters: nspot", "if (pk[i] == 0) + (decay[i] == 0): area =", "dF_tot = scipy.zeros_like(time) dF_tot0 = scipy.zeros_like(time) # add up the", "- min(t)) area_tot = scipy.zeros_like(time) dF_tot = scipy.zeros_like(time) dF_tot0 =", "lon[i] phase0 = 2 * scipy.pi * time / period0[i]", "scale_fac # all spots have the evolution timescale decay =", "+= area # Fore-shortening phase = 2 * scipy.pi *", "# print('Used %d spots in total over %d rotation periods.'", "# uniform distribution of spot longitudes lon = scipy.rand(nspot_tot) *", "0. ''' This is a simplified version of the class-based", "the class-based routines in spot_model.py. It generates a light curves", "rate optionally depends on latitude period = ((scipy.sin(lat) - 0.5)", "contributions of individual spots for i in range(nspot_tot): # Spot", "spot latitudes uniform in sin(latitude) lat = scipy.arcsin(scipy.rand(nspot_tot)) # spot", "curves for dark, point like spots with no limb-darkening. Parameters:", "import glob import pyfits def mklc(t, nspot=200, incl=(scipy.pi)*5./12., amp=1., tau=30.5,", "version of the class-based routines in spot_model.py. It generates a", "light curve amplitude tau = characteristic spot life-time diffrot =", "= dF_tot.max()-dF_tot.min() nspot_eff = area_tot / scale_fac / ff res0", "scipy.pi * time / period0[i] + lon[i] mu = scipy.cos(incl)", "at any one time was %d.' % nspot) return res0,", "time amp = desired light curve amplitude tau = characteristic", "* scipy.cos(lat[i]) * scipy.cos(phase0) mu[mu < 0] = 0.0 mu0[mu0", "# start well before and end well after time-series limits", "', p) dur = (max(t) - min(t)) # (crude estimate", "actual amplitude was %.4f.' \\ # % (amp, amp_eff)) #", "spots in total over %d rotation periods.' % (nspot_tot, dur))", "after time-series limits (to # avoid edge effects) extra =", "amax[i] * \\ scipy.exp(-(time - pk[i])**2 / 2. / decay[i]**2)", "min(t)) # (crude estimate of) total number of spots needed", "= area_tot / scale_fac / ff res0 = scipy.array([nspot_eff.mean(), ff,", "# % (amp, amp_eff)) # print('Desired number of spots at", "print('Desired number of spots at any one time was %d.'", "the evolution timescale decay = scipy.ones(nspot_tot) * tau # uniform", "curve amplitude tau = characteristic spot life-time diffrot = fractional", "= scipy.array([nspot_eff.mean(), ff, amp_eff]) res1 = scipy.zeros((4, len(time))) res1[0,:] =", "= scipy.rand(nspot_tot) * 2 * scipy.pi # distribution of spot", "(nspot_tot, dur)) # print('Mean filling factor of individual spots was", "= 0.0 mu0[mu0 < 0] = 0.0 # Flux dF_tot", "of individual spots for i in range(nspot_tot): # Spot area", "+ 1.0 ) * p period0 = scipy.ones(nspot_tot) * p", "mu = scipy.cos(incl) * scipy.sin(lat[i]) + \\ scipy.sin(incl) * scipy.cos(lat[i])", "\\ # % (amp, amp_eff)) # print('Desired number of spots", "res1[1,:] = area_tot res1[2,:] = dF_tot res1[3,:] = dF_tot0 #", "(pk[i] == 0) + (decay[i] == 0): area = scipy.ones_like(time)", "scipy.cos(lat[i]) * scipy.cos(phase) mu0 = scipy.cos(incl) * scipy.sin(lat[i]) + \\", "(crude estimate of) filling factor needed per spot ff =", "len(time))) res1[0,:] = time res1[1,:] = area_tot res1[2,:] = dF_tot", "total over %d rotation periods.' % (nspot_tot, dur)) # print('Mean", "of spots needed during entire # time-series nspot_tot = int(nspot", "time-series nspot_tot = int(nspot * dur / 2 / tau)", "2 * scipy.pi * time / period[i] + lon[i] phase0", "%.4f, actual amplitude was %.4f.' \\ # % (amp, amp_eff))", "area # (crude estimate of) filling factor needed per spot", "(decay[i] == 0): area = scipy.ones_like(time) * amax[i] else: area", "area_tot res1[2,:] = dF_tot res1[3,:] = dF_tot0 # print('Used %d", "was %.4f.' \\ # % (amp, amp_eff)) # print('Desired number", "# print(\"Computing light curve...\") time = numpy.array(t - min(t)) area_tot", "characteristic spot life-time diffrot = fractional difference between equatorial and", "= scipy.ones(nspot_tot) * ff * scale_fac # all spots have", "nspot_tot = int(nspot * dur / 2 / tau) #", "-= area * mu dF_tot0 -= area * mu0 amp_eff", "scipy.ones(nspot_tot) * p # all spots have the same maximum", "scipy.ones_like(time) * amax[i] else: area = amax[i] * \\ scipy.exp(-(time", "scipy.zeros_like(time) dF_tot0 = scipy.zeros_like(time) # add up the contributions of", "# print('Desired number of spots at any one time was", "* tau # uniform distribution of spot peak times #", "scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase0) mu[mu < 0] = 0.0", "= ', p) dur = (max(t) - min(t)) # (crude", "0] = 0.0 # Flux dF_tot -= area * mu", "2 * scipy.pi * time / period0[i] + lon[i] mu", "needed during entire # time-series nspot_tot = int(nspot * dur", "% (nspot_tot, dur)) # print('Mean filling factor of individual spots", "period0 = scipy.ones(nspot_tot) * p # all spots have the", "area if (pk[i] == 0) + (decay[i] == 0): area", "lon[i] mu = scipy.cos(incl) * scipy.sin(lat[i]) + \\ scipy.sin(incl) *", "* extra) - extra # COMPUTE THE LIGHT CURVE #", "desired number of spots present on star at any one", "scipy.cos(phase) mu0 = scipy.cos(incl) * scipy.sin(lat[i]) + \\ scipy.sin(incl) *", "= scipy.zeros((4, len(time))) res1[0,:] = time res1[1,:] = area_tot res1[2,:]", "spots with no limb-darkening. Parameters: nspot = desired number of", "/ scipy.sqrt(nspot) scale_fac = 1 amax = scipy.ones(nspot_tot) * ff", "lon = scipy.rand(nspot_tot) * 2 * scipy.pi # distribution of", "/ period[i] + lon[i] phase0 = 2 * scipy.pi *", "* scipy.pi * time / period[i] + lon[i] phase0 =", "distribution of spot longitudes lon = scipy.rand(nspot_tot) * 2 *", "distribution of spot peak times # start well before and", "== 0): area = scipy.ones_like(time) * amax[i] else: area =", "* scipy.cos(phase) mu0 = scipy.cos(incl) * scipy.sin(lat[i]) + \\ scipy.sin(incl)", "in spot_model.py. It generates a light curves for dark, point", "nspot=200, incl=(scipy.pi)*5./12., amp=1., tau=30.5, p=10.0): diffrot = 0. ''' This", "a light curves for dark, point like spots with no", "scipy.cos(lat[i]) * scipy.cos(phase0) mu[mu < 0] = 0.0 mu0[mu0 <", "amp_eff)) # print('Desired number of spots at any one time", "of time is equatorial rotation period)''' # print('Period = ',", "# Flux dF_tot -= area * mu dF_tot0 -= area", "the same maximum area # (crude estimate of) filling factor", "start well before and end well after time-series limits (to", "+ 2 * extra) - extra # COMPUTE THE LIGHT", "COMPUTE THE LIGHT CURVE # print(\"Computing light curve...\") time =", "2 * scipy.pi # distribution of spot latitudes uniform in", "import pyfits def mklc(t, nspot=200, incl=(scipy.pi)*5./12., amp=1., tau=30.5, p=10.0): diffrot", "desired light curve amplitude tau = characteristic spot life-time diffrot", "/ 2. / decay[i]**2) area_tot += area # Fore-shortening phase", "lat = scipy.arcsin(scipy.rand(nspot_tot)) # spot rotation rate optionally depends on", "= int(nspot * dur / 2 / tau) # uniform", "res1[2,:] = dF_tot res1[3,:] = dF_tot0 # print('Used %d spots", "in total over %d rotation periods.' % (nspot_tot, dur)) #", "over %d rotation periods.' % (nspot_tot, dur)) # print('Mean filling", "%.4f.' \\ # % (amp, amp_eff)) # print('Desired number of", "same maximum area # (crude estimate of) filling factor needed", "in range(nspot_tot): # Spot area if (pk[i] == 0) +", "curve...\") time = numpy.array(t - min(t)) area_tot = scipy.zeros_like(time) dF_tot", "# all spots have the same maximum area # (crude", "\\ scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase0) mu[mu < 0] =", "nspot_eff = area_tot / scale_fac / ff res0 = scipy.array([nspot_eff.mean(),", "of spot peak times # start well before and end", "amp / scipy.sqrt(nspot) scale_fac = 1 amax = scipy.ones(nspot_tot) *", "= ((scipy.sin(lat) - 0.5) * diffrot + 1.0 ) *", "nspot = desired number of spots present on star at", "pk[i])**2 / 2. / decay[i]**2) area_tot += area # Fore-shortening", "star at any one time amp = desired light curve", "i in range(nspot_tot): # Spot area if (pk[i] == 0)", "scipy.cos(incl) * scipy.sin(lat[i]) + \\ scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase0)", "is a simplified version of the class-based routines in spot_model.py.", "depends on latitude period = ((scipy.sin(lat) - 0.5) * diffrot", "0) + (decay[i] == 0): area = scipy.ones_like(time) * amax[i]", "limits (to # avoid edge effects) extra = 3 *", "ff, amp_eff]) res1 = scipy.zeros((4, len(time))) res1[0,:] = time res1[1,:]", "sin(latitude) lat = scipy.arcsin(scipy.rand(nspot_tot)) # spot rotation rate optionally depends", "point like spots with no limb-darkening. Parameters: nspot = desired", "% ff) # print('Desired amplitude was %.4f, actual amplitude was", "# all spots have the evolution timescale decay = scipy.ones(nspot_tot)", "time is equatorial rotation period)''' # print('Period = ', p)", "* diffrot + 1.0 ) * p period0 = scipy.ones(nspot_tot)", "print('Period = ', p) dur = (max(t) - min(t)) #", "Parameters: nspot = desired number of spots present on star", "maximum area # (crude estimate of) filling factor needed per", "Fore-shortening phase = 2 * scipy.pi * time / period[i]", "in sin(latitude) lat = scipy.arcsin(scipy.rand(nspot_tot)) # spot rotation rate optionally", "# add up the contributions of individual spots for i", "incl=(scipy.pi)*5./12., amp=1., tau=30.5, p=10.0): diffrot = 0. ''' This is", "avoid edge effects) extra = 3 * decay.max() pk =", "entire # time-series nspot_tot = int(nspot * dur / 2", "np import scipy import scipy.io import pylab import numpy import", "dF_tot res1[3,:] = dF_tot0 # print('Used %d spots in total", "# avoid edge effects) extra = 3 * decay.max() pk", "* scipy.cos(lat[i]) * scipy.cos(phase) mu0 = scipy.cos(incl) * scipy.sin(lat[i]) +", "effects) extra = 3 * decay.max() pk = scipy.rand(nspot_tot) *", "import numpy import glob import pyfits def mklc(t, nspot=200, incl=(scipy.pi)*5./12.,", "tau) # uniform distribution of spot longitudes lon = scipy.rand(nspot_tot)", "rotation periods.' % (nspot_tot, dur)) # print('Mean filling factor of", "pk = scipy.rand(nspot_tot) * (dur + 2 * extra) -", "all spots have the same maximum area # (crude estimate", "((scipy.sin(lat) - 0.5) * diffrot + 1.0 ) * p", "ff * scale_fac # all spots have the evolution timescale", "area = scipy.ones_like(time) * amax[i] else: area = amax[i] *", "all spots have the evolution timescale decay = scipy.ones(nspot_tot) *", "= scipy.rand(nspot_tot) * (dur + 2 * extra) - extra", "have the same maximum area # (crude estimate of) filling", "mu0 amp_eff = dF_tot.max()-dF_tot.min() nspot_eff = area_tot / scale_fac /", "extra = 3 * decay.max() pk = scipy.rand(nspot_tot) * (dur", "= 3 * decay.max() pk = scipy.rand(nspot_tot) * (dur +", "dF_tot0 = scipy.zeros_like(time) # add up the contributions of individual", "= scipy.cos(incl) * scipy.sin(lat[i]) + \\ scipy.sin(incl) * scipy.cos(lat[i]) *", "%.4f.' % ff) # print('Desired amplitude was %.4f, actual amplitude", "/ decay[i]**2) area_tot += area # Fore-shortening phase = 2", "(to # avoid edge effects) extra = 3 * decay.max()", "def mklc(t, nspot=200, incl=(scipy.pi)*5./12., amp=1., tau=30.5, p=10.0): diffrot = 0.", "* scipy.cos(phase0) mu[mu < 0] = 0.0 mu0[mu0 < 0]", "rotation period)''' # print('Period = ', p) dur = (max(t)", "* mu dF_tot0 -= area * mu0 amp_eff = dF_tot.max()-dF_tot.min()", "tau # uniform distribution of spot peak times # start", "scipy.zeros_like(time) dF_tot = scipy.zeros_like(time) dF_tot0 = scipy.zeros_like(time) # add up", "time / period[i] + lon[i] phase0 = 2 * scipy.pi", "light curve...\") time = numpy.array(t - min(t)) area_tot = scipy.zeros_like(time)", "0] = 0.0 mu0[mu0 < 0] = 0.0 # Flux", "* p period0 = scipy.ones(nspot_tot) * p # all spots", "scipy.rand(nspot_tot) * 2 * scipy.pi # distribution of spot latitudes", "scipy.zeros_like(time) # add up the contributions of individual spots for", "''' This is a simplified version of the class-based routines", "difference between equatorial and polar rotation period (unit of time", "= scipy.zeros_like(time) # add up the contributions of individual spots", "= 2 * scipy.pi * time / period0[i] + lon[i]", "= amax[i] * \\ scipy.exp(-(time - pk[i])**2 / 2. /", "latitudes uniform in sin(latitude) lat = scipy.arcsin(scipy.rand(nspot_tot)) # spot rotation", "on star at any one time amp = desired light", "mu0[mu0 < 0] = 0.0 # Flux dF_tot -= area", "res0 = scipy.array([nspot_eff.mean(), ff, amp_eff]) res1 = scipy.zeros((4, len(time))) res1[0,:]", "individual spots for i in range(nspot_tot): # Spot area if", "= desired light curve amplitude tau = characteristic spot life-time", "- 0.5) * diffrot + 1.0 ) * p period0", "print(\"Computing light curve...\") time = numpy.array(t - min(t)) area_tot =", "# time-series nspot_tot = int(nspot * dur / 2 /", "during entire # time-series nspot_tot = int(nspot * dur /", "of) filling factor needed per spot ff = amp /", "scipy.pi * time / period[i] + lon[i] phase0 = 2", "numpy.array(t - min(t)) area_tot = scipy.zeros_like(time) dF_tot = scipy.zeros_like(time) dF_tot0", "of individual spots was %.4f.' % ff) # print('Desired amplitude", "\\ scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase) mu0 = scipy.cos(incl) *", "well before and end well after time-series limits (to #", "+ lon[i] mu = scipy.cos(incl) * scipy.sin(lat[i]) + \\ scipy.sin(incl)", "factor of individual spots was %.4f.' % ff) # print('Desired", "dF_tot -= area * mu dF_tot0 -= area * mu0", "class-based routines in spot_model.py. It generates a light curves for", "of) total number of spots needed during entire # time-series", "number of spots present on star at any one time", "per spot ff = amp / scipy.sqrt(nspot) scale_fac = 1", "have the evolution timescale decay = scipy.ones(nspot_tot) * tau #", "p period0 = scipy.ones(nspot_tot) * p # all spots have", "spot ff = amp / scipy.sqrt(nspot) scale_fac = 1 amax", "diffrot = 0. ''' This is a simplified version of", "up the contributions of individual spots for i in range(nspot_tot):", "# Fore-shortening phase = 2 * scipy.pi * time /", "scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase) mu0 = scipy.cos(incl) * scipy.sin(lat[i])", "scipy.cos(phase0) mu[mu < 0] = 0.0 mu0[mu0 < 0] =", "optionally depends on latitude period = ((scipy.sin(lat) - 0.5) *", "mu0 = scipy.cos(incl) * scipy.sin(lat[i]) + \\ scipy.sin(incl) * scipy.cos(lat[i])", "< 0] = 0.0 # Flux dF_tot -= area *", "well after time-series limits (to # avoid edge effects) extra", "= scipy.ones_like(time) * amax[i] else: area = amax[i] * \\", "of spot latitudes uniform in sin(latitude) lat = scipy.arcsin(scipy.rand(nspot_tot)) #", "# (crude estimate of) filling factor needed per spot ff", "on latitude period = ((scipy.sin(lat) - 0.5) * diffrot +", "= dF_tot0 # print('Used %d spots in total over %d" ]
[ "-h, --help show this help message and exit quirks: sorts", "think into the mess at \"sort\" vs \"LC_ALL=C sort\" import", "usage: sort.py [-h] sort lines options: -h, --help show this", "doc -k$N,$N and -n and maybe little else is worth", "a line as different than none ending a line examples:", "\"\"\" usage: sort.py [-h] sort lines options: -h, --help show", "exit quirks: sorts tabs as different than spaces sorts some", "ending a line as different than none ending a line", "message and exit quirks: sorts tabs as different than spaces", "sorts some spaces ending a line as different than none", "vs \"LC_ALL=C sort\" import sys import argdoc def main(): args", "else is worth learning # FIXME: ass -k-1,-1 for negative", "as different than none ending a line examples: Oh no!", "not implemented\\n\") sys.exit(2) # exit 2 from rejecting usage if", "\"LC_ALL=C sort\" import sys import argdoc def main(): args =", "mess at \"sort\" vs \"LC_ALL=C sort\" import sys import argdoc", "worth learning # FIXME: ass -k-1,-1 for negative field indexing", "options: -h, --help show this help message and exit quirks:", "sys.stderr.write(\"{}\\n\".format(argdoc.format_usage().rstrip())) sys.stderr.write(\"sort.py: error: not implemented\\n\") sys.exit(2) # exit 2 from", "💥 💔 💥 \"\"\" # FIXME: doc -k$N,$N and -n", "sys import argdoc def main(): args = argdoc.parse_args() sys.stderr.write(\"{}\\n\".format(args)) sys.stderr.write(\"{}\\n\".format(argdoc.format_usage().rstrip()))", "args = argdoc.parse_args() sys.stderr.write(\"{}\\n\".format(args)) sys.stderr.write(\"{}\\n\".format(argdoc.format_usage().rstrip())) sys.stderr.write(\"sort.py: error: not implemented\\n\") sys.exit(2)", "is worth learning # FIXME: ass -k-1,-1 for negative field", "FIXME: think into the mess at \"sort\" vs \"LC_ALL=C sort\"", "this help message and exit quirks: sorts tabs as different", "little else is worth learning # FIXME: ass -k-1,-1 for", "and exit quirks: sorts tabs as different than spaces sorts", "maybe little else is worth learning # FIXME: ass -k-1,-1", "the mess at \"sort\" vs \"LC_ALL=C sort\" import sys import", "--help show this help message and exit quirks: sorts tabs", "sorts tabs as different than spaces sorts some spaces ending", "FIXME: doc -k$N,$N and -n and maybe little else is", "examples: Oh no! No examples disclosed!! 💥 💔 💥 \"\"\"", "sort lines options: -h, --help show this help message and", "field indexing # FIXME: think into the mess at \"sort\"", "negative field indexing # FIXME: think into the mess at", "# FIXME: think into the mess at \"sort\" vs \"LC_ALL=C", "import argdoc def main(): args = argdoc.parse_args() sys.stderr.write(\"{}\\n\".format(args)) sys.stderr.write(\"{}\\n\".format(argdoc.format_usage().rstrip())) sys.stderr.write(\"sort.py:", "# exit 2 from rejecting usage if __name__ == \"__main__\":", "-k-1,-1 for negative field indexing # FIXME: think into the", "rejecting usage if __name__ == \"__main__\": main() # copied from:", "\"\"\" # FIXME: doc -k$N,$N and -n and maybe little", "python3 \"\"\" usage: sort.py [-h] sort lines options: -h, --help", "__name__ == \"__main__\": main() # copied from: git clone https://github.com/pelavarre/pybashish.git", "different than none ending a line examples: Oh no! No", "2 from rejecting usage if __name__ == \"__main__\": main() #", "usage if __name__ == \"__main__\": main() # copied from: git", "💔 💥 \"\"\" # FIXME: doc -k$N,$N and -n and", "-k$N,$N and -n and maybe little else is worth learning", "show this help message and exit quirks: sorts tabs as", "line as different than none ending a line examples: Oh", "than none ending a line examples: Oh no! No examples", "#!/usr/bin/env python3 \"\"\" usage: sort.py [-h] sort lines options: -h,", "than spaces sorts some spaces ending a line as different", "sys.exit(2) # exit 2 from rejecting usage if __name__ ==", "# FIXME: doc -k$N,$N and -n and maybe little else", "some spaces ending a line as different than none ending", "at \"sort\" vs \"LC_ALL=C sort\" import sys import argdoc def", "sort\" import sys import argdoc def main(): args = argdoc.parse_args()", "spaces ending a line as different than none ending a", "# FIXME: ass -k-1,-1 for negative field indexing # FIXME:", "main(): args = argdoc.parse_args() sys.stderr.write(\"{}\\n\".format(args)) sys.stderr.write(\"{}\\n\".format(argdoc.format_usage().rstrip())) sys.stderr.write(\"sort.py: error: not implemented\\n\")", "examples disclosed!! 💥 💔 💥 \"\"\" # FIXME: doc -k$N,$N", "line examples: Oh no! No examples disclosed!! 💥 💔 💥", "for negative field indexing # FIXME: think into the mess", "error: not implemented\\n\") sys.exit(2) # exit 2 from rejecting usage", "Oh no! No examples disclosed!! 💥 💔 💥 \"\"\" #", "argdoc def main(): args = argdoc.parse_args() sys.stderr.write(\"{}\\n\".format(args)) sys.stderr.write(\"{}\\n\".format(argdoc.format_usage().rstrip())) sys.stderr.write(\"sort.py: error:", "sort.py [-h] sort lines options: -h, --help show this help", "into the mess at \"sort\" vs \"LC_ALL=C sort\" import sys", "import sys import argdoc def main(): args = argdoc.parse_args() sys.stderr.write(\"{}\\n\".format(args))", "exit 2 from rejecting usage if __name__ == \"__main__\": main()", "= argdoc.parse_args() sys.stderr.write(\"{}\\n\".format(args)) sys.stderr.write(\"{}\\n\".format(argdoc.format_usage().rstrip())) sys.stderr.write(\"sort.py: error: not implemented\\n\") sys.exit(2) #", "from rejecting usage if __name__ == \"__main__\": main() # copied", "sys.stderr.write(\"{}\\n\".format(args)) sys.stderr.write(\"{}\\n\".format(argdoc.format_usage().rstrip())) sys.stderr.write(\"sort.py: error: not implemented\\n\") sys.exit(2) # exit 2", "and -n and maybe little else is worth learning #", "if __name__ == \"__main__\": main() # copied from: git clone", "\"sort\" vs \"LC_ALL=C sort\" import sys import argdoc def main():", "quirks: sorts tabs as different than spaces sorts some spaces", "implemented\\n\") sys.exit(2) # exit 2 from rejecting usage if __name__", "def main(): args = argdoc.parse_args() sys.stderr.write(\"{}\\n\".format(args)) sys.stderr.write(\"{}\\n\".format(argdoc.format_usage().rstrip())) sys.stderr.write(\"sort.py: error: not", "No examples disclosed!! 💥 💔 💥 \"\"\" # FIXME: doc", "argdoc.parse_args() sys.stderr.write(\"{}\\n\".format(args)) sys.stderr.write(\"{}\\n\".format(argdoc.format_usage().rstrip())) sys.stderr.write(\"sort.py: error: not implemented\\n\") sys.exit(2) # exit", "💥 \"\"\" # FIXME: doc -k$N,$N and -n and maybe", "as different than spaces sorts some spaces ending a line", "and maybe little else is worth learning # FIXME: ass", "different than spaces sorts some spaces ending a line as", "<filename>bin/sort.py #!/usr/bin/env python3 \"\"\" usage: sort.py [-h] sort lines options:", "a line examples: Oh no! No examples disclosed!! 💥 💔", "tabs as different than spaces sorts some spaces ending a", "[-h] sort lines options: -h, --help show this help message", "indexing # FIXME: think into the mess at \"sort\" vs", "spaces sorts some spaces ending a line as different than", "help message and exit quirks: sorts tabs as different than", "-n and maybe little else is worth learning # FIXME:", "none ending a line examples: Oh no! No examples disclosed!!", "FIXME: ass -k-1,-1 for negative field indexing # FIXME: think", "ending a line examples: Oh no! No examples disclosed!! 💥", "sys.stderr.write(\"sort.py: error: not implemented\\n\") sys.exit(2) # exit 2 from rejecting", "disclosed!! 💥 💔 💥 \"\"\" # FIXME: doc -k$N,$N and", "lines options: -h, --help show this help message and exit", "no! No examples disclosed!! 💥 💔 💥 \"\"\" # FIXME:", "ass -k-1,-1 for negative field indexing # FIXME: think into", "learning # FIXME: ass -k-1,-1 for negative field indexing #" ]
[ "retString = response['status'] if (methodId in (TELLSTICK_TURNON, TELLSTICK_TURNOFF)): logger.debug(\"Turning %s", "for opt, arg in opts: if opt in ('--authenticate'): getAccessToken()", "= doRequest('device/command', {'id': deviceId, 'method': methodId, 'value': methodValue}) if ('error'", "[ --on device ] [ --off device ] [ --bell", "method = 'down' if ('error' in response): name = ''", "authenticate() return try: opts, args = getopt.getopt(argv, \"lsd:n:f:d:b:v:h\", [\"list\", \"list-sensors\",", "%s - %s\" % ( method, deviceId, name, retString)); elif", "(\"-d\", \"--dim\"): if (dimlevel < 0): logger.debug(\"Dimlevel must be set", "consumer, token) headers = oauth_request.to_header() headers['Content-Type'] = 'application/x-www-form-urlencoded' conn =", "getopt.GetoptError: printUsage() sys.exit(2) dimlevel = -1 for opt, arg in", "config consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) token = oauth.OAuthToken(config['requestToken'], config['requestTokenSecret']) request", "in config or config['token'] == ''): authenticate() return try: opts,", "= response['status'] if (methodId in (TELLSTICK_TURNON, TELLSTICK_TURNOFF)): logger.debug(\"Turning %s device", "import datetime from configobj import ConfigObj import logging global logger", "[ --help ]\") print(\" [ --on device ] [ --off", "import davan.util.application_logger as log_manager #insert your own public_key and private_key", "global config config = ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf') consumer = oauth.OAuthConsumer(PUBLIC_KEY,", "def getAccessToken(): global config consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) token =", "logger.debug(\"Turning %s device %s, %s - %s\" % ( method,", "== TELLSTICK_DOWN): method = 'down' if ('error' in response): name", "'bell' elif (methodId == TELLSTICK_UP): method = 'up' elif (methodId", "following url in your webbrowser:\\nhttp://api.telldus.com/oauth/authorize?oauth_token=%s\\n' % token.key) logger.debug( 'After logging", "sensor id number\") print(\"\") print(\"Report bugs to <<EMAIL>>\") def listSensors():", "TELLSTICK_TURNOFF): method = 'off' elif (methodId == TELLSTICK_BELL): method =", "config['requestTokenSecret'] = None config['token'] = str(token.key) config['tokenSecret'] = str(token.secret) logger.debug(", "configuration = config_creator.create() PUBLIC_KEY = configuration[\"TELLDUS_PUBLIC_KEY\"] PRIVATE_KEY = configuration[\"TELLDUS_PRIVATE_KEY\"] TELLSTICK_TURNON", "elif opt in (\"-l\", \"--list\"): listDevices() elif opt in (\"-s\",", "PUBLIC_KEY = configuration[\"TELLDUS_PUBLIC_KEY\"] PRIVATE_KEY = configuration[\"TELLDUS_PRIVATE_KEY\"] TELLSTICK_TURNON = 1 TELLSTICK_TURNOFF", "% len(response['sensor'])); for sensor in response['sensor']: lastupdate = datetime.datetime.fromtimestamp(int(sensor['lastUpdated'])); logger.debug(", "in (\"-b\", \"--bell\"): doMethod(arg, TELLSTICK_BELL) elif opt in (\"-d\", \"--dim\"):", "(device['state'] == TELLSTICK_DIM): state = \"DIMMED\" elif (device['state'] == TELLSTICK_UP):", "token = oauth.OAuthToken.from_string(resp) logger.debug( 'Open the following url in your", "to use this application run:\\n%s --authenticate' % (sys.argv[0])) config['requestToken'] =", "(-v short option)\") print(\" Set dim level. 'level' should an", "print(\" Dims device. 'device' must be an integer of the", "sensors: %i\" % len(response['sensor'])); for sensor in response['sensor']: lastupdate =", "{'id': deviceId, 'method': methodId, 'value': methodValue}) if ('error' in response):", "elif (methodId == TELLSTICK_DOWN): method = 'down' if ('error' in", "TELLSTICK_BELL = 4 TELLSTICK_DIM = 16 TELLSTICK_UP = 128 TELLSTICK_DOWN", "TELLSTICK_TURNOFF | TELLSTICK_BELL | TELLSTICK_DIM | TELLSTICK_UP | TELLSTICK_DOWN; def", "= 'OFF' elif (device['state'] == TELLSTICK_DIM): state = \"DIMMED\" elif", "TELLSTICK_DIM): state = \"DIMMED\" elif (device['state'] == TELLSTICK_UP): state =", "option\") print(\"\") print(\" --off device (-f short option)\") print(\" Turns", "else: state = 'Unknown state' logger.debug(\"%s\\t%s\\t%s\" % (device['id'], device['name'], state));", "!= 200: logger.debug( 'Error retreiving access token, the server replied:\\n%s'", "config['tokenSecret'] = str(token.secret) logger.debug( 'Authentication successful, you can now use", "name is outputed with the --list option\") print(\" Note: The", "os.makedirs(os.environ['HOME'] + '/.config/Telldus') except: pass config.write() def main(argv): global config", "listSensorsAndValues() elif opt in (\"-d\", \"--sensor-data\"): getSensorData(arg) elif opt in", "listDevicesAndValues(): response = doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS}) return response def getSensorData(sensorId):", "= datetime.datetime.fromtimestamp(int(response['lastUpdated'])); sensor_name = response['name']; for data in response['data']: logger.debug(", "TELLSTICK_TURNOFF): state = 'OFF' elif (device['state'] == TELLSTICK_DIM): state =", "elif opt in (\"-d\", \"--sensor-data\"): getSensorData(arg) elif opt in (\"-n\",", "headers['Content-Type'] = 'application/x-www-form-urlencoded' conn = httplib.HTTPConnection(\"api.telldus.com:80\") conn.request('GET', \"/json/\" + method", "token.key) logger.debug( 'After logging in and accepting to use this", "'After logging in and accepting to use this application run:\\n%s", "not in config or config['token'] == ''): authenticate() return try:", "\"list-sensors\", \"sensor-data=\", \"on=\", \"off=\", \"dim=\", \"bell=\", \"dimlevel=\", \"up=\", \"down=\", \"help\"])", "device (-n short option)\") print(\" Turns on device. 'device' must", "\"--on\"): doMethod(arg, TELLSTICK_TURNON) elif opt in (\"-f\", \"--off\"): doMethod(arg, TELLSTICK_TURNOFF)", "device ] [ --bell device ]\") print(\" [ --dimlevel level", "sensors\") print(\"\") print(\" --sensor-data sensor (-d short option)\") print(\" Get", "== TELLSTICK_TURNON): state = 'ON' elif (device['state'] == TELLSTICK_TURNOFF): state", "== TELLSTICK_UP): method = 'up' elif (methodId == TELLSTICK_DOWN): method", "for opt, arg in opts: if opt in (\"-h\", \"--help\"):", "print(\"\") print(\" --dimlevel level (-v short option)\") print(\" Set dim", "getopt, httplib, urllib, json, os import oauth.oauth as oauth import", "logging in and accepting to use this application run:\\n%s --authenticate'", "http_url='http://api.telldus.com/oauth/accessToken') request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token) conn = httplib.HTTPConnection('api.telldus.com:80') conn.request(request.http_method, request.to_url(), headers=request.to_header())", "= oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url=\"http://api.telldus.com/json/\" + method, parameters=params) oauth_request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer,", "getopt.getopt(sys.argv[1:], '', ['authenticate']) for opt, arg in opts: if opt", "%i\" % len(response['sensor'])); for sensor in response['sensor']: lastupdate = datetime.datetime.fromtimestamp(int(sensor['lastUpdated']));", "name is outputed with the --list option\") print(\"\") print(\" --dim", "name, retString)); elif (methodId in (TELLSTICK_BELL, TELLSTICK_UP, TELLSTICK_DOWN)): logger.debug(\"Sending %s", "must be set with --dimlevel before --dim\") else: doMethod(arg, TELLSTICK_DIM,", "= httplib.HTTPConnection(\"api.telldus.com:80\") conn.request('GET', \"/json/\" + method + \"?\" + urllib.urlencode(params,", "(\"-s\", \"--list-sensors\"): listSensors() elif opt in (\"-x\", \"--list-sensorsvalue\"): listSensorsAndValues() elif", "def listDevicesAndValues(): response = doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS}) return response def", "True).replace('+', '%20'), headers=headers) response = conn.getresponse() try: return json.load(response) except:", "opts: if opt in (\"-h\", \"--help\"): printUsage() elif opt in", "token=token, http_method='GET', http_url='http://api.telldus.com/oauth/accessToken') request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token) conn = httplib.HTTPConnection('api.telldus.com:80') conn.request(request.http_method,", "logging global logger logger = logging.getLogger(os.path.basename(__file__)) import davan.util.application_logger as log_manager", "bugs to <<EMAIL>>\") def listSensors(): response = doRequest('sensors/list', {'includeIgnored': 1});", "data['value'], lastupdate) ) def listDevices(): response = doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS})", "json.load(response) except: logger.debug( 'Failed to decode response :%s'%str(response)) return \"\"", "{'id': sensorId }); lastupdate = datetime.datetime.fromtimestamp(int(response['lastUpdated'])); sensor_name = response['name']; for", "in response['data']: logger.debug( \"%s\\t%s\\t%s\\t%s\" % (sensor_name, data['name'], data['value'], lastupdate) )", "methodValue = 0): response = doRequest('device/info', {'id': deviceId}) if (methodId", "and name is outputed with the --list option\") print(\"\") print(\"", "config['requestTokenSecret']) request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url='http://api.telldus.com/oauth/accessToken') request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token)", "return response def getSensorData(sensorId): response = doRequest('sensor/info', {'id': sensorId });", "% token.key) logger.debug( 'After logging in and accepting to use", "except getopt.GetoptError: pass requestToken() def saveConfig(): global config try: os.makedirs(os.environ['HOME']", "print(\" --off device (-f short option)\") print(\" Turns off device.", "(sensor['id'], sensor['name'], lastupdate)) def listSensorsAndValues(): response = doRequest('sensors/list', {'includeValues': 1});", "(TELLSTICK_BELL, TELLSTICK_UP, TELLSTICK_DOWN)): logger.debug(\"Sending %s to: %s %s - %s\"", "[ --list ] [ --help ]\") print(\" [ --on device", "elif (device['state'] == TELLSTICK_DIM): state = \"DIMMED\" elif (device['state'] ==", "# -*- coding: utf-8 -*- import sys, getopt, httplib, urllib,", "an integer of the device-id\") print(\" Device-id and name is", "= TELLSTICK_TURNON | TELLSTICK_TURNOFF | TELLSTICK_BELL | TELLSTICK_DIM | TELLSTICK_UP", "in (\"-h\", \"--help\"): printUsage() elif opt in (\"-l\", \"--list\"): listDevices()", "name is outputed with the --list option\") print(\"\") print(\" --list-sensors", "def listSensors(): response = doRequest('sensors/list', {'includeIgnored': 1}); logger.debug(\"Number of sensors:", "% len(response['device'])); for device in response['device']: if (device['state'] == TELLSTICK_TURNON):", "token, the server replied:\\n%s' % resp.read()) return token = oauth.OAuthToken.from_string(resp.read())", "doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS}) logger.debug(\"Number of devices: %i\" % len(response['device'])); for", "print(\" List currently configured devices.\") print(\"\") print(\" --help (-h short", "config['token'] = str(token.key) config['tokenSecret'] = str(token.secret) logger.debug( 'Authentication successful, you", "to devices supporting this. 'device' must\") print(\" be an integer", "% (sensor['id'], sensor['name'], lastupdate)) def listSensorsAndValues(): response = doRequest('sensors/list', {'includeValues':", "== TELLSTICK_TURNON): method = 'on' elif (methodId == TELLSTICK_TURNOFF): method", "be set before using this option.\") print(\"\") print(\" --dimlevel level", "= 16 TELLSTICK_UP = 128 TELLSTICK_DOWN = 256 SUPPORTED_METHODS =", "= doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS}) return response def getSensorData(sensorId): response =", "(device['state'] == TELLSTICK_DOWN): state = \"DOWN\" else: state = 'Unknown", "\"--list-sensorsvalue\"): listSensorsAndValues() elif opt in (\"-d\", \"--sensor-data\"): getSensorData(arg) elif opt", "''): authenticate() return try: opts, args = getopt.getopt(argv, \"lsd:n:f:d:b:v:h\", [\"list\",", "TELLSTICK_TURNON = 1 TELLSTICK_TURNOFF = 2 TELLSTICK_BELL = 4 TELLSTICK_DIM", "deviceId, 'method': methodId, 'value': methodValue}) if ('error' in response): retString", "up command to devices supporting this. 'device' must\") print(\" be", "configured devices.\") print(\"\") print(\" --help (-h short option)\") print(\" Shows", "ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf') consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) token = oauth.OAuthToken(config['token'],", "= str(token.secret) logger.debug( 'Authentication successful, you can now use tdtool')", "--up device --down device ]\") print(\"\") print(\" --list (-l short", "= response['name']; for data in response['data']: logger.debug( \"%s\\t%s\\t%s\\t%s\" % (sensor_name,", "(-h short option)\") print(\" Shows this screen.\") print(\"\") print(\" --on", "%s - %s\" % (deviceId, name, methodValue, retString)) def doRequest(method,", "TELLSTICK_TURNOFF = 2 TELLSTICK_BELL = 4 TELLSTICK_DIM = 16 TELLSTICK_UP", "None config['token'] = str(token.key) config['tokenSecret'] = str(token.secret) logger.debug( 'Authentication successful,", "--dim device (-d short option)\") print(\" Dims device. 'device' must", "TELLSTICK_DOWN): method = 'down' if ('error' in response): name =", "try: return json.load(response) except: logger.debug( 'Failed to decode response :%s'%str(response))", "print(\" --sensor-data sensor (-d short option)\") print(\" Get sensor data", "getAccessToken(): global config consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) token = oauth.OAuthToken(config['requestToken'],", "= doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS}) logger.debug(\"Number of devices: %i\" % len(response['device']));", "elif opt in (\"-s\", \"--list-sensors\"): listSensors() elif opt in (\"-x\",", "in opts: if opt in (\"-h\", \"--help\"): printUsage() elif opt", "to: %s %s - %s\" % (method, deviceId, name, retString))", "resp = conn.getresponse().read() token = oauth.OAuthToken.from_string(resp) logger.debug( 'Open the following", "Dims device. 'device' must be an integer of the device-id\")", "state' logger.debug(\"%s\\t%s\\t%s\" % (device['id'], device['name'], state)); def doMethod(deviceId, methodId, methodValue", "str(token.key) config['tokenSecret'] = str(token.secret) logger.debug( 'Authentication successful, you can now", "(\"-b\", \"--bell\"): doMethod(arg, TELLSTICK_BELL) elif opt in (\"-d\", \"--dim\"): if", "= httplib.HTTPConnection('api.telldus.com:80') conn.request(request.http_method, request.to_url(), headers=request.to_header()) resp = conn.getresponse() if resp.status", "devices supporting this. 'device' must\") print(\" be an integer of", "outputed with the --list option\") print(\"\") print(\" --dim device (-d", "name = '' retString = response['error'] else: name = response['name']", "] [ --help ]\") print(\" [ --on device ] [", "an integer, 0-255.\") print(\" Note: This parameter must be set", "getopt.GetoptError: pass requestToken() def saveConfig(): global config try: os.makedirs(os.environ['HOME'] +", "the --list option\") print(\"\") print(\" --down device\") print(\" Sends down", "doRequest('sensor/info', {'id': sensorId }); lastupdate = datetime.datetime.fromtimestamp(int(response['lastUpdated'])); sensor_name = response['name'];", "logger.debug(\"Number of devices: %i\" % len(response['device'])); for device in response['device']:", "saveConfig(): global config try: os.makedirs(os.environ['HOME'] + '/.config/Telldus') except: pass config.write()", "== TELLSTICK_DOWN): state = \"DOWN\" else: state = 'Unknown state'", "\"%s\\t%s\\t%s\\t%s\" % (sensor_name, data['name'], data['value'], lastupdate) ) def listDevices(): response", "option\") print(\"\") print(\" --dim device (-d short option)\") print(\" Dims", "print(\" --dim device (-d short option)\") print(\" Dims device. 'device'", "--sensor-data sensor (-d short option)\") print(\" Get sensor data with", "'Unknown state' logger.debug(\"%s\\t%s\\t%s\" % (device['id'], device['name'], state)); def doMethod(deviceId, methodId,", "print(\" Sends bell command to devices supporting this. 'device' must\")", "else: doMethod(arg, TELLSTICK_DIM, dimlevel) elif opt in (\"-v\", \"--dimlevel\"): dimlevel", "state = 'Unknown state' logger.debug(\"%s\\t%s\\t%s\" % (device['id'], device['name'], state)); def", "(device['id'], device['name'], state)); def doMethod(deviceId, methodId, methodValue = 0): response", "listSensorsAndValues(): response = doRequest('sensors/list', {'includeValues': 1}); return response def listDevicesAndValues():", "Shows this screen.\") print(\"\") print(\" --on device (-n short option)\")", "state = \"DIMMED\" elif (device['state'] == TELLSTICK_UP): state = \"UP\"", "devices.\") print(\"\") print(\" --help (-h short option)\") print(\" Shows this", "elif (device['state'] == TELLSTICK_UP): state = \"UP\" elif (device['state'] ==", "--on device (-n short option)\") print(\" Turns on device. 'device'", "'up' elif (methodId == TELLSTICK_DOWN): method = 'down' if ('error'", "option)\") print(\" Sends bell command to devices supporting this. 'device'", "'/.config/Telldus/tdtool.conf') consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) token = oauth.OAuthToken(config['token'], config['tokenSecret']) oauth_request", "global config consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) token = oauth.OAuthToken(config['requestToken'], config['requestTokenSecret'])", "'method': methodId, 'value': methodValue}) if ('error' in response): retString =", "the --list option\") print(\"\") print(\" --up device\") print(\" Sends up", "(methodId in (TELLSTICK_TURNON, TELLSTICK_TURNOFF)): logger.debug(\"Turning %s device %s, %s -", "#!/usr/bin/env python # -*- coding: utf-8 -*- import sys, getopt,", "opt, arg in opts: if opt in (\"-h\", \"--help\"): printUsage()", "Sends down command to devices supporting this. 'device' must\") print(\"", "--up device\") print(\" Sends up command to devices supporting this.", "'/.config/Telldus') except: pass config.write() def main(argv): global config if ('token'", "elif (methodId == TELLSTICK_BELL): method = 'bell' elif (methodId ==", "(sensor_name, data['name'], data['value'], lastupdate) ) def listDevices(): response = doRequest('devices/list',", "doMethod(arg, TELLSTICK_TURNOFF) elif opt in (\"-b\", \"--bell\"): doMethod(arg, TELLSTICK_BELL) elif", "option)\") print(\" Dims device. 'device' must be an integer of", "davan.config.config_creator as config_creator configuration = config_creator.create() PUBLIC_KEY = configuration[\"TELLDUS_PUBLIC_KEY\"] PRIVATE_KEY", "name is outputed with the --list option\") print(\"\") print(\" --off", "Get sensor data with sensor id number\") print(\"\") print(\"Report bugs", "Lists currently configured sensors\") print(\"\") print(\" --sensor-data sensor (-d short", "name = response['name'] response = doRequest('device/command', {'id': deviceId, 'method': methodId,", "= 'ON' elif (device['state'] == TELLSTICK_TURNOFF): state = 'OFF' elif", "TELLSTICK_DOWN) if __name__ == \"__main__\": config = ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf')", "opt, arg in opts: if opt in ('--authenticate'): getAccessToken() return", "[ --off device ] [ --bell device ]\") print(\" [", "response = doRequest('sensors/list', {'includeIgnored': 1}); logger.debug(\"Number of sensors: %i\" %", "config['requestToken'] = str(token.key) config['requestTokenSecret'] = str(token.secret) saveConfig() def getAccessToken(): global", "outputed with the --list option\") print(\"\") print(\" --list-sensors (-s short", "be set before using dim.\") print(\"\") print(\" --bell device (-b", "TELLSTICK_UP, TELLSTICK_DOWN)): logger.debug(\"Sending %s to: %s %s - %s\" %", "conn.getresponse() try: return json.load(response) except: logger.debug( 'Failed to decode response", "in ('--authenticate'): getAccessToken() return except getopt.GetoptError: pass requestToken() def saveConfig():", "method + \"?\" + urllib.urlencode(params, True).replace('+', '%20'), headers=headers) response =", "sys.argv[0]) print(\"\") print(\"Options:\") print(\" -[lnfdbvh] [ --list ] [ --help", "global config if ('token' not in config or config['token'] ==", "oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url=\"http://api.telldus.com/json/\" + method, parameters=params) oauth_request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token)", "replied:\\n%s' % resp.read()) return token = oauth.OAuthToken.from_string(resp.read()) config['requestToken'] = None", "TELLSTICK_DOWN)): logger.debug(\"Sending %s to: %s %s - %s\" % (method,", "with the --list option\") print(\"\") print(\" --down device\") print(\" Sends", "print(\" --down device\") print(\" Sends down command to devices supporting", "= ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf') consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) token =", "args = getopt.getopt(argv, \"lsd:n:f:d:b:v:h\", [\"list\", \"list-sensors\", \"sensor-data=\", \"on=\", \"off=\", \"dim=\",", "Turns on device. 'device' must be an integer of the", "= getopt.getopt(argv, \"lsd:n:f:d:b:v:h\", [\"list\", \"list-sensors\", \"sensor-data=\", \"on=\", \"off=\", \"dim=\", \"bell=\",", "== TELLSTICK_DIM): state = \"DIMMED\" elif (device['state'] == TELLSTICK_UP): state", "% (sensor_name, data['name'], data['value'], lastupdate) ) def listDevices(): response =", "(methodId == TELLSTICK_UP): method = 'up' elif (methodId == TELLSTICK_DOWN):", "== TELLSTICK_DIM): logger.debug(\"Dimming device: %s %s to %s - %s\"", "(\"-h\", \"--help\"): printUsage() elif opt in (\"-l\", \"--list\"): listDevices() elif", "= datetime.datetime.fromtimestamp(int(sensor['lastUpdated'])); logger.debug( \"%s\\t%s\\t%s\" % (sensor['id'], sensor['name'], lastupdate)) def listSensorsAndValues():", "TELLSTICK_TURNON): state = 'ON' elif (device['state'] == TELLSTICK_TURNOFF): state =", "doMethod(arg, TELLSTICK_BELL) elif opt in (\"-d\", \"--dim\"): if (dimlevel <", "oauth_request.to_header() headers['Content-Type'] = 'application/x-www-form-urlencoded' conn = httplib.HTTPConnection(\"api.telldus.com:80\") conn.request('GET', \"/json/\" +", "\"dim=\", \"bell=\", \"dimlevel=\", \"up=\", \"down=\", \"help\"]) except getopt.GetoptError: printUsage() sys.exit(2)", "% resp.read()) return token = oauth.OAuthToken.from_string(resp.read()) config['requestToken'] = None config['requestTokenSecret']", "doMethod(arg, TELLSTICK_DOWN) if __name__ == \"__main__\": config = ConfigObj(os.environ['HOME'] +", "parameters=params) oauth_request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token) headers = oauth_request.to_header() headers['Content-Type'] = 'application/x-www-form-urlencoded'", "print(\" Get sensor data with sensor id number\") print(\"\") print(\"Report", "2 TELLSTICK_BELL = 4 TELLSTICK_DIM = 16 TELLSTICK_UP = 128", "down command to devices supporting this. 'device' must\") print(\" be", "TELLSTICK_TURNON): method = 'on' elif (methodId == TELLSTICK_TURNOFF): method =", "oauth.OAuthToken.from_string(resp) logger.debug( 'Open the following url in your webbrowser:\\nhttp://api.telldus.com/oauth/authorize?oauth_token=%s\\n' %", "Set dim level. 'level' should an integer, 0-255.\") print(\" Note:", "should an integer, 0-255.\") print(\" Note: This parameter must be", "<reponame>davandev/davanserver<gh_stars>0 #!/usr/bin/env python # -*- coding: utf-8 -*- import sys,", "opt in (\"-n\", \"--on\"): doMethod(arg, TELLSTICK_TURNON) elif opt in (\"-f\",", "level. 'level' should an integer, 0-255.\") print(\" Note: This parameter", "print(\"Usage: %s [ options ]\" % sys.argv[0]) print(\"\") print(\"Options:\") print(\"", "authenticate(): try: opts, args = getopt.getopt(sys.argv[1:], '', ['authenticate']) for opt,", "256 SUPPORTED_METHODS = TELLSTICK_TURNON | TELLSTICK_TURNOFF | TELLSTICK_BELL | TELLSTICK_DIM", "None) conn = httplib.HTTPConnection('api.telldus.com:80') conn.request(request.http_method, '/oauth/requestToken', headers=request.to_header()) resp = conn.getresponse().read()", "opt in ('--authenticate'): getAccessToken() return except getopt.GetoptError: pass requestToken() def", "print(\" Sends up command to devices supporting this. 'device' must\")", "print(\" --list-sensors (-s short option)\") print(\" Lists currently configured sensors\")", "= oauth.OAuthToken.from_string(resp) logger.debug( 'Open the following url in your webbrowser:\\nhttp://api.telldus.com/oauth/authorize?oauth_token=%s\\n'", "-1 for opt, arg in opts: if opt in (\"-h\",", "config['requestToken'] = None config['requestTokenSecret'] = None config['token'] = str(token.key) config['tokenSecret']", "\"sensor-data=\", \"on=\", \"off=\", \"dim=\", \"bell=\", \"dimlevel=\", \"up=\", \"down=\", \"help\"]) except", "retString = response['error'] else: name = response['name'] response = doRequest('device/command',", "'/oauth/requestToken', headers=request.to_header()) resp = conn.getresponse().read() token = oauth.OAuthToken.from_string(resp) logger.debug( 'Open", "if opt in ('--authenticate'): getAccessToken() return except getopt.GetoptError: pass requestToken()", "request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, None) conn = httplib.HTTPConnection('api.telldus.com:80') conn.request(request.http_method, '/oauth/requestToken', headers=request.to_header()) resp", "config = ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf') configuration = config_creator.create() log_manager.start_logging(configuration[\"LOGFILE_PATH\"],loglevel=4) main(sys.argv[1:])", "your own public_key and private_key import davan.config.config_creator as config_creator configuration", "method, parameters=params) oauth_request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token) headers = oauth_request.to_header() headers['Content-Type'] =", "before --dim\") else: doMethod(arg, TELLSTICK_DIM, dimlevel) elif opt in (\"-v\",", "--down device\") print(\" Sends down command to devices supporting this.", "print(\"\") print(\" --list-sensors (-s short option)\") print(\" Lists currently configured", "be set with --dimlevel before --dim\") else: doMethod(arg, TELLSTICK_DIM, dimlevel)", "methodValue, retString)) def doRequest(method, params): global config config = ConfigObj(os.environ['HOME']", "('--authenticate'): getAccessToken() return except getopt.GetoptError: pass requestToken() def saveConfig(): global", "lastupdate = datetime.datetime.fromtimestamp(int(sensor['lastUpdated'])); logger.debug( \"%s\\t%s\\t%s\" % (sensor['id'], sensor['name'], lastupdate)) def", "device-id\") print(\" Device-id and name is outputed with the --list", "print(\"\") print(\" --on device (-n short option)\") print(\" Turns on", "short option)\") print(\" Turns off device. 'device' must be an", "in response): retString = response['error'] else: retString = response['status'] if", "device %s, %s - %s\" % ( method, deviceId, name,", "elif opt in (\"-v\", \"--dimlevel\"): dimlevel = arg elif opt", "elif opt in (\"--down\"): doMethod(arg, TELLSTICK_DOWN) if __name__ == \"__main__\":", "successful, you can now use tdtool') saveConfig() def authenticate(): try:", "Note: This parameter must be set before using dim.\") print(\"\")", "doRequest(method, params): global config config = ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf') consumer", "getAccessToken() return except getopt.GetoptError: pass requestToken() def saveConfig(): global config", "'on' elif (methodId == TELLSTICK_TURNOFF): method = 'off' elif (methodId", "print(\"\") print(\" --down device\") print(\" Sends down command to devices", "state = 'ON' elif (device['state'] == TELLSTICK_TURNOFF): state = 'OFF'", "= doRequest('sensor/info', {'id': sensorId }); lastupdate = datetime.datetime.fromtimestamp(int(response['lastUpdated'])); sensor_name =", "'device' must be an integer of the device-id\") print(\" Device-id", "of sensors: %i\" % len(response['sensor'])); for sensor in response['sensor']: lastupdate", "]\" % sys.argv[0]) print(\"\") print(\"Options:\") print(\" -[lnfdbvh] [ --list ]", "- %s\" % (deviceId, name, methodValue, retString)) def doRequest(method, params):", "number\") print(\"\") print(\"Report bugs to <<EMAIL>>\") def listSensors(): response =", "= response['error'] else: name = response['name'] response = doRequest('device/command', {'id':", "\"--bell\"): doMethod(arg, TELLSTICK_BELL) elif opt in (\"-d\", \"--dim\"): if (dimlevel", "def saveConfig(): global config try: os.makedirs(os.environ['HOME'] + '/.config/Telldus') except: pass", "short option)\") print(\" Shows this screen.\") print(\"\") print(\" --on device", "is outputed with the --list option\") print(\"\") print(\" --up device\")", "opt in (\"-d\", \"--dim\"): if (dimlevel < 0): logger.debug(\"Dimlevel must", "response['error'] else: name = response['name'] response = doRequest('device/command', {'id': deviceId,", "('error' in response): retString = response['error'] else: retString = response['status']", "try: opts, args = getopt.getopt(sys.argv[1:], '', ['authenticate']) for opt, arg", "--list (-l short option)\") print(\" List currently configured devices.\") print(\"\")", "config or config['token'] == ''): authenticate() return try: opts, args", "in (\"-d\", \"--dim\"): if (dimlevel < 0): logger.debug(\"Dimlevel must be", "TELLSTICK_UP) elif opt in (\"--down\"): doMethod(arg, TELLSTICK_DOWN) if __name__ ==", "in (\"-l\", \"--list\"): listDevices() elif opt in (\"-s\", \"--list-sensors\"): listSensors()", "print(\"\") print(\" --dim device (-d short option)\") print(\" Dims device.", "http_url=\"http://api.telldus.com/json/\" + method, parameters=params) oauth_request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token) headers = oauth_request.to_header()", "config try: os.makedirs(os.environ['HOME'] + '/.config/Telldus') except: pass config.write() def main(argv):", "with the --list option\") print(\"\") print(\" --list-sensors (-s short option)\")", "| TELLSTICK_BELL | TELLSTICK_DIM | TELLSTICK_UP | TELLSTICK_DOWN; def printUsage():", "str(token.secret) logger.debug( 'Authentication successful, you can now use tdtool') saveConfig()", "--dimlevel before --dim\") else: doMethod(arg, TELLSTICK_DIM, dimlevel) elif opt in", "dimlevel = -1 for opt, arg in opts: if opt", "method, deviceId, name, retString)); elif (methodId in (TELLSTICK_BELL, TELLSTICK_UP, TELLSTICK_DOWN)):", "--on device ] [ --off device ] [ --bell device", "--dim device ]\") print(\" [ --up device --down device ]\")", "with sensor id number\") print(\"\") print(\"Report bugs to <<EMAIL>>\") def", "retString)) elif (methodId == TELLSTICK_DIM): logger.debug(\"Dimming device: %s %s to", "option\") print(\"\") print(\" --list-sensors (-s short option)\") print(\" Lists currently", "= oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url='http://api.telldus.com/oauth/accessToken') request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token) conn =", "as log_manager #insert your own public_key and private_key import davan.config.config_creator", "logger.debug( 'Failed to decode response :%s'%str(response)) return \"\" def requestToken():", "('token' not in config or config['token'] == ''): authenticate() return", "{'includeValues': 1}); return response def listDevicesAndValues(): response = doRequest('devices/list', {'supportedMethods':", "for data in response['data']: logger.debug( \"%s\\t%s\\t%s\\t%s\" % (sensor_name, data['name'], data['value'],", "urllib.urlencode(params, True).replace('+', '%20'), headers=headers) response = conn.getresponse() try: return json.load(response)", "sensor['name'], lastupdate)) def listSensorsAndValues(): response = doRequest('sensors/list', {'includeValues': 1}); return", "consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) token = oauth.OAuthToken(config['requestToken'], config['requestTokenSecret']) request =", "def requestToken(): global config consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) request =", "print(\" Note: This parameter must be set before using dim.\")", "(methodId == TELLSTICK_DOWN): method = 'down' if ('error' in response):", "\"dimlevel=\", \"up=\", \"down=\", \"help\"]) except getopt.GetoptError: printUsage() sys.exit(2) dimlevel =", "elif opt in (\"--up\"): doMethod(arg, TELLSTICK_UP) elif opt in (\"--down\"):", "device. 'device' must be an integer of the device-id\") print(\"", "name is outputed with the --list option\") print(\"\") print(\" --down", "| TELLSTICK_UP | TELLSTICK_DOWN; def printUsage(): print(\"Usage: %s [ options", "0-255.\") print(\" Note: This parameter must be set before using", "= configuration[\"TELLDUS_PUBLIC_KEY\"] PRIVATE_KEY = configuration[\"TELLDUS_PRIVATE_KEY\"] TELLSTICK_TURNON = 1 TELLSTICK_TURNOFF =", "name is outputed with the --list option\") print(\"\") print(\" --up", "oauth.OAuthToken.from_string(resp.read()) config['requestToken'] = None config['requestTokenSecret'] = None config['token'] = str(token.key)", "response['status'] if (methodId in (TELLSTICK_TURNON, TELLSTICK_TURNOFF)): logger.debug(\"Turning %s device %s,", "]\") print(\" [ --on device ] [ --off device ]", "print(\"\") print(\"Report bugs to <<EMAIL>>\") def listSensors(): response = doRequest('sensors/list',", "--list option\") print(\"\") print(\" --off device (-f short option)\") print(\"", "opts, args = getopt.getopt(argv, \"lsd:n:f:d:b:v:h\", [\"list\", \"list-sensors\", \"sensor-data=\", \"on=\", \"off=\",", "\"off=\", \"dim=\", \"bell=\", \"dimlevel=\", \"up=\", \"down=\", \"help\"]) except getopt.GetoptError: printUsage()", "TELLSTICK_BELL) elif opt in (\"-d\", \"--dim\"): if (dimlevel < 0):", "= response['error'] else: retString = response['status'] if (methodId in (TELLSTICK_TURNON,", "conn = httplib.HTTPConnection('api.telldus.com:80') conn.request(request.http_method, '/oauth/requestToken', headers=request.to_header()) resp = conn.getresponse().read() token", "retString)); elif (methodId in (TELLSTICK_BELL, TELLSTICK_UP, TELLSTICK_DOWN)): logger.debug(\"Sending %s to:", "(device['state'] == TELLSTICK_TURNON): state = 'ON' elif (device['state'] == TELLSTICK_TURNOFF):", "if ('error' in response): name = '' retString = response['error']", "--down device ]\") print(\"\") print(\" --list (-l short option)\") print(\"", "{'supportedMethods': SUPPORTED_METHODS}) return response def getSensorData(sensorId): response = doRequest('sensor/info', {'id':", "http_method='GET', http_url=\"http://api.telldus.com/json/\" + method, parameters=params) oauth_request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token) headers =", "= None config['requestTokenSecret'] = None config['token'] = str(token.key) config['tokenSecret'] =", "parameter must be set before using dim.\") print(\"\") print(\" --bell", "(device['state'] == TELLSTICK_TURNOFF): state = 'OFF' elif (device['state'] == TELLSTICK_DIM):", "PRIVATE_KEY) token = oauth.OAuthToken(config['token'], config['tokenSecret']) oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET',", "getSensorData(arg) elif opt in (\"-n\", \"--on\"): doMethod(arg, TELLSTICK_TURNON) elif opt", "= 1 TELLSTICK_TURNOFF = 2 TELLSTICK_BELL = 4 TELLSTICK_DIM =", "application run:\\n%s --authenticate' % (sys.argv[0])) config['requestToken'] = str(token.key) config['requestTokenSecret'] =", "elif (methodId == TELLSTICK_UP): method = 'up' elif (methodId ==", "parameter must be set before using this option.\") print(\"\") print(\"", "response = conn.getresponse() try: return json.load(response) except: logger.debug( 'Failed to", "else: name = response['name'] response = doRequest('device/command', {'id': deviceId, 'method':", "coding: utf-8 -*- import sys, getopt, httplib, urllib, json, os", "print(\" --dimlevel level (-v short option)\") print(\" Set dim level.", "= str(token.key) config['tokenSecret'] = str(token.secret) logger.debug( 'Authentication successful, you can", "%s [ options ]\" % sys.argv[0]) print(\"\") print(\"Options:\") print(\" -[lnfdbvh]", "(-s short option)\") print(\" Lists currently configured sensors\") print(\"\") print(\"", "None config['requestTokenSecret'] = None config['token'] = str(token.key) config['tokenSecret'] = str(token.secret)", "elif opt in (\"-d\", \"--dim\"): if (dimlevel < 0): logger.debug(\"Dimlevel", "logger = logging.getLogger(os.path.basename(__file__)) import davan.util.application_logger as log_manager #insert your own", "= 'bell' elif (methodId == TELLSTICK_UP): method = 'up' elif", "is outputed with the --list option\") print(\"\") print(\" --list-sensors (-s", "print(\" be an integer of the device-id\") print(\" Device-id and", "retString = response['error'] else: retString = response['status'] if (methodId in", "device: %s %s to %s - %s\" % (deviceId, name,", "must\") print(\" be an integer of the device-id\") print(\" Device-id", "def getSensorData(sensorId): response = doRequest('sensor/info', {'id': sensorId }); lastupdate =", "with the --list option\") print(\"\") print(\" --off device (-f short", "in opts: if opt in ('--authenticate'): getAccessToken() return except getopt.GetoptError:", "set before using this option.\") print(\"\") print(\" --dimlevel level (-v", "configuration[\"TELLDUS_PUBLIC_KEY\"] PRIVATE_KEY = configuration[\"TELLDUS_PRIVATE_KEY\"] TELLSTICK_TURNON = 1 TELLSTICK_TURNOFF = 2", "(methodId == TELLSTICK_DIM): logger.debug(\"Dimming device: %s %s to %s -", "'' retString = response['error'] else: name = response['name'] response =", "decode response :%s'%str(response)) return \"\" def requestToken(): global config consumer", "return token = oauth.OAuthToken.from_string(resp.read()) config['requestToken'] = None config['requestTokenSecret'] = None", "= 'up' elif (methodId == TELLSTICK_DOWN): method = 'down' if", "token = oauth.OAuthToken(config['token'], config['tokenSecret']) oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url=\"http://api.telldus.com/json/\"", "oauth.oauth as oauth import datetime from configobj import ConfigObj import", "methodId, methodValue = 0): response = doRequest('device/info', {'id': deviceId}) if", "conn.getresponse() if resp.status != 200: logger.debug( 'Error retreiving access token,", "'device' must\") print(\" be an integer of the device-id\") print(\"", "% (device['id'], device['name'], state)); def doMethod(deviceId, methodId, methodValue = 0):", "= oauth.OAuthRequest.from_consumer_and_token(consumer, http_url='http://api.telldus.com/oauth/requestToken') request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, None) conn = httplib.HTTPConnection('api.telldus.com:80') conn.request(request.http_method,", "set with --dimlevel before --dim\") else: doMethod(arg, TELLSTICK_DIM, dimlevel) elif", "\"--off\"): doMethod(arg, TELLSTICK_TURNOFF) elif opt in (\"-b\", \"--bell\"): doMethod(arg, TELLSTICK_BELL)", "this screen.\") print(\"\") print(\" --on device (-n short option)\") print(\"", "device ]\") print(\" [ --dimlevel level --dim device ]\") print(\"", "doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS}) return response def getSensorData(sensorId): response = doRequest('sensor/info',", "(\"-f\", \"--off\"): doMethod(arg, TELLSTICK_TURNOFF) elif opt in (\"-b\", \"--bell\"): doMethod(arg,", "logger logger = logging.getLogger(os.path.basename(__file__)) import davan.util.application_logger as log_manager #insert your", "if (device['state'] == TELLSTICK_TURNON): state = 'ON' elif (device['state'] ==", "% ( method, deviceId, name, retString)); elif (methodId in (TELLSTICK_BELL,", "= 'down' if ('error' in response): name = '' retString", "methodId, 'value': methodValue}) if ('error' in response): retString = response['error']", "response['device']: if (device['state'] == TELLSTICK_TURNON): state = 'ON' elif (device['state']", "-[lnfdbvh] [ --list ] [ --help ]\") print(\" [ --on", "off device. 'device' must be an integer of the device-id\")", "conn = httplib.HTTPConnection(\"api.telldus.com:80\") conn.request('GET', \"/json/\" + method + \"?\" +", "[ options ]\" % sys.argv[0]) print(\"\") print(\"Options:\") print(\" -[lnfdbvh] [", "this. 'device' must\") print(\" be an integer of the device-id\")", "return json.load(response) except: logger.debug( 'Failed to decode response :%s'%str(response)) return", "the --list option\") print(\" Note: The dimlevel parameter must be", "with the --list option\") print(\"\") print(\" --dim device (-d short", "doMethod(arg, TELLSTICK_TURNON) elif opt in (\"-f\", \"--off\"): doMethod(arg, TELLSTICK_TURNOFF) elif", "\"help\"]) except getopt.GetoptError: printUsage() sys.exit(2) dimlevel = -1 for opt,", "short option)\") print(\" Dims device. 'device' must be an integer", "listSensors(): response = doRequest('sensors/list', {'includeIgnored': 1}); logger.debug(\"Number of sensors: %i\"", ":%s'%str(response)) return \"\" def requestToken(): global config consumer = oauth.OAuthConsumer(PUBLIC_KEY,", "oauth.OAuthToken(config['requestToken'], config['requestTokenSecret']) request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url='http://api.telldus.com/oauth/accessToken') request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer,", "doRequest('sensors/list', {'includeIgnored': 1}); logger.debug(\"Number of sensors: %i\" % len(response['sensor'])); for", "in (TELLSTICK_TURNON, TELLSTICK_TURNOFF)): logger.debug(\"Turning %s device %s, %s - %s\"", "currently configured devices.\") print(\"\") print(\" --help (-h short option)\") print(\"", "logger.debug( 'Error retreiving access token, the server replied:\\n%s' % resp.read())", "short option)\") print(\" Get sensor data with sensor id number\")", "TELLSTICK_DOWN = 256 SUPPORTED_METHODS = TELLSTICK_TURNON | TELLSTICK_TURNOFF | TELLSTICK_BELL", "% (deviceId, name, methodValue, retString)) def doRequest(method, params): global config", "if ('error' in response): retString = response['error'] else: retString =", "TELLSTICK_UP): method = 'up' elif (methodId == TELLSTICK_DOWN): method =", "print(\" Turns off device. 'device' must be an integer of", "to %s - %s\" % (deviceId, name, methodValue, retString)) def", "except: logger.debug( 'Failed to decode response :%s'%str(response)) return \"\" def", "oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url=\"http://api.telldus.com/json/\" + method, parameters=params) oauth_request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(),", "'%20'), headers=headers) response = conn.getresponse() try: return json.load(response) except: logger.debug(", "def listDevices(): response = doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS}) logger.debug(\"Number of devices:", "{'includeIgnored': 1}); logger.debug(\"Number of sensors: %i\" % len(response['sensor'])); for sensor", "option)\") print(\" Shows this screen.\") print(\"\") print(\" --on device (-n", "%s\" % ( method, deviceId, name, retString)); elif (methodId in", "in (\"--down\"): doMethod(arg, TELLSTICK_DOWN) if __name__ == \"__main__\": config =", "oauth_request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token) headers = oauth_request.to_header() headers['Content-Type'] = 'application/x-www-form-urlencoded' conn", "def listSensorsAndValues(): response = doRequest('sensors/list', {'includeValues': 1}); return response def", "print(\"\") print(\"Options:\") print(\" -[lnfdbvh] [ --list ] [ --help ]\")", "print(\" Note: The dimlevel parameter must be set before using", "print(\" --bell device (-b short option)\") print(\" Sends bell command", "% (method, deviceId, name, retString)) elif (methodId == TELLSTICK_DIM): logger.debug(\"Dimming", "use this application run:\\n%s --authenticate' % (sys.argv[0])) config['requestToken'] = str(token.key)", "config_creator configuration = config_creator.create() PUBLIC_KEY = configuration[\"TELLDUS_PUBLIC_KEY\"] PRIVATE_KEY = configuration[\"TELLDUS_PRIVATE_KEY\"]", "sensor (-d short option)\") print(\" Get sensor data with sensor", "the --list option\") print(\"\") print(\" --off device (-f short option)\")", "logger.debug( 'After logging in and accepting to use this application", "len(response['device'])); for device in response['device']: if (device['state'] == TELLSTICK_TURNON): state", "logger.debug( 'Open the following url in your webbrowser:\\nhttp://api.telldus.com/oauth/authorize?oauth_token=%s\\n' % token.key)", "TELLSTICK_TURNON) elif opt in (\"-f\", \"--off\"): doMethod(arg, TELLSTICK_TURNOFF) elif opt", "Note: The dimlevel parameter must be set before using this", "device ]\") print(\" [ --up device --down device ]\") print(\"\")", "print(\" [ --up device --down device ]\") print(\"\") print(\" --list", "request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url='http://api.telldus.com/oauth/accessToken') request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token) conn", "<<EMAIL>>\") def listSensors(): response = doRequest('sensors/list', {'includeIgnored': 1}); logger.debug(\"Number of", "logger.debug( \"%s\\t%s\\t%s\" % (sensor['id'], sensor['name'], lastupdate)) def listSensorsAndValues(): response =", "doRequest('device/info', {'id': deviceId}) if (methodId == TELLSTICK_TURNON): method = 'on'", "--list option\") print(\"\") print(\" --list-sensors (-s short option)\") print(\" Lists", "name, retString)) elif (methodId == TELLSTICK_DIM): logger.debug(\"Dimming device: %s %s", "token) conn = httplib.HTTPConnection('api.telldus.com:80') conn.request(request.http_method, request.to_url(), headers=request.to_header()) resp = conn.getresponse()", "%s\" % (deviceId, name, methodValue, retString)) def doRequest(method, params): global", "config['token'] == ''): authenticate() return try: opts, args = getopt.getopt(argv,", "args = getopt.getopt(sys.argv[1:], '', ['authenticate']) for opt, arg in opts:", "method = 'off' elif (methodId == TELLSTICK_BELL): method = 'bell'", "url in your webbrowser:\\nhttp://api.telldus.com/oauth/authorize?oauth_token=%s\\n' % token.key) logger.debug( 'After logging in", "print(\" --on device (-n short option)\") print(\" Turns on device.", "= 128 TELLSTICK_DOWN = 256 SUPPORTED_METHODS = TELLSTICK_TURNON | TELLSTICK_TURNOFF", "--bell device ]\") print(\" [ --dimlevel level --dim device ]\")", "(-b short option)\") print(\" Sends bell command to devices supporting", "is outputed with the --list option\") print(\"\") print(\" --down device\")", "import sys, getopt, httplib, urllib, json, os import oauth.oauth as", "response def listDevicesAndValues(): response = doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS}) return response", "level (-v short option)\") print(\" Set dim level. 'level' should", "response = doRequest('sensors/list', {'includeValues': 1}); return response def listDevicesAndValues(): response", "listDevices(): response = doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS}) logger.debug(\"Number of devices: %i\"", "-*- import sys, getopt, httplib, urllib, json, os import oauth.oauth", "utf-8 -*- import sys, getopt, httplib, urllib, json, os import", "= \"UP\" elif (device['state'] == TELLSTICK_DOWN): state = \"DOWN\" else:", "the device-id\") print(\" Device-id and name is outputed with the", "opt in (\"-f\", \"--off\"): doMethod(arg, TELLSTICK_TURNOFF) elif opt in (\"-b\",", "(dimlevel < 0): logger.debug(\"Dimlevel must be set with --dimlevel before", "request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token) conn = httplib.HTTPConnection('api.telldus.com:80') conn.request(request.http_method, request.to_url(), headers=request.to_header()) resp", "printUsage() elif opt in (\"-l\", \"--list\"): listDevices() elif opt in", "currently configured sensors\") print(\"\") print(\" --sensor-data sensor (-d short option)\")", "TELLSTICK_TURNON | TELLSTICK_TURNOFF | TELLSTICK_BELL | TELLSTICK_DIM | TELLSTICK_UP |", "return \"\" def requestToken(): global config consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY)", "option\") print(\" Note: The dimlevel parameter must be set before", "TELLSTICK_UP | TELLSTICK_DOWN; def printUsage(): print(\"Usage: %s [ options ]\"", "consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) token = oauth.OAuthToken(config['token'], config['tokenSecret']) oauth_request =", "options ]\" % sys.argv[0]) print(\"\") print(\"Options:\") print(\" -[lnfdbvh] [ --list", "response['error'] else: retString = response['status'] if (methodId in (TELLSTICK_TURNON, TELLSTICK_TURNOFF)):", "your webbrowser:\\nhttp://api.telldus.com/oauth/authorize?oauth_token=%s\\n' % token.key) logger.debug( 'After logging in and accepting", "16 TELLSTICK_UP = 128 TELLSTICK_DOWN = 256 SUPPORTED_METHODS = TELLSTICK_TURNON", "method = 'bell' elif (methodId == TELLSTICK_UP): method = 'up'", "print(\"\") print(\" --sensor-data sensor (-d short option)\") print(\" Get sensor", "%s device %s, %s - %s\" % ( method, deviceId,", "if (dimlevel < 0): logger.debug(\"Dimlevel must be set with --dimlevel", "Turns off device. 'device' must be an integer of the", "| TELLSTICK_DIM | TELLSTICK_UP | TELLSTICK_DOWN; def printUsage(): print(\"Usage: %s", "oauth.OAuthRequest.from_consumer_and_token(consumer, http_url='http://api.telldus.com/oauth/requestToken') request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, None) conn = httplib.HTTPConnection('api.telldus.com:80') conn.request(request.http_method, '/oauth/requestToken',", "print(\" Sends down command to devices supporting this. 'device' must\")", "as oauth import datetime from configobj import ConfigObj import logging", "elif opt in (\"-x\", \"--list-sensorsvalue\"): listSensorsAndValues() elif opt in (\"-d\",", "\"?\" + urllib.urlencode(params, True).replace('+', '%20'), headers=headers) response = conn.getresponse() try:", "TELLSTICK_DIM | TELLSTICK_UP | TELLSTICK_DOWN; def printUsage(): print(\"Usage: %s [", "'Failed to decode response :%s'%str(response)) return \"\" def requestToken(): global", "4 TELLSTICK_DIM = 16 TELLSTICK_UP = 128 TELLSTICK_DOWN = 256", "--help ]\") print(\" [ --on device ] [ --off device", "if (methodId in (TELLSTICK_TURNON, TELLSTICK_TURNOFF)): logger.debug(\"Turning %s device %s, %s", "'application/x-www-form-urlencoded' conn = httplib.HTTPConnection(\"api.telldus.com:80\") conn.request('GET', \"/json/\" + method + \"?\"", "access token, the server replied:\\n%s' % resp.read()) return token =", "128 TELLSTICK_DOWN = 256 SUPPORTED_METHODS = TELLSTICK_TURNON | TELLSTICK_TURNOFF |", "set before using dim.\") print(\"\") print(\" --bell device (-b short", "= oauth.OAuthToken.from_string(resp.read()) config['requestToken'] = None config['requestTokenSecret'] = None config['token'] =", "getopt.getopt(argv, \"lsd:n:f:d:b:v:h\", [\"list\", \"list-sensors\", \"sensor-data=\", \"on=\", \"off=\", \"dim=\", \"bell=\", \"dimlevel=\",", "= getopt.getopt(sys.argv[1:], '', ['authenticate']) for opt, arg in opts: if", "device\") print(\" Sends up command to devices supporting this. 'device'", "data['name'], data['value'], lastupdate) ) def listDevices(): response = doRequest('devices/list', {'supportedMethods':", "+ '/.config/Telldus/tdtool.conf') consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) token = oauth.OAuthToken(config['token'], config['tokenSecret'])", "this application run:\\n%s --authenticate' % (sys.argv[0])) config['requestToken'] = str(token.key) config['requestTokenSecret']", "PRIVATE_KEY) token = oauth.OAuthToken(config['requestToken'], config['requestTokenSecret']) request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET',", "= 2 TELLSTICK_BELL = 4 TELLSTICK_DIM = 16 TELLSTICK_UP =", "method = 'on' elif (methodId == TELLSTICK_TURNOFF): method = 'off'", "- %s\" % ( method, deviceId, name, retString)); elif (methodId", "printUsage() sys.exit(2) dimlevel = -1 for opt, arg in opts:", "short option)\") print(\" Turns on device. 'device' must be an", "and name is outputed with the --list option\") print(\" Note:", "List currently configured devices.\") print(\"\") print(\" --help (-h short option)\")", "response = doRequest('sensor/info', {'id': sensorId }); lastupdate = datetime.datetime.fromtimestamp(int(response['lastUpdated'])); sensor_name", "retString)) def doRequest(method, params): global config config = ConfigObj(os.environ['HOME'] +", "import logging global logger logger = logging.getLogger(os.path.basename(__file__)) import davan.util.application_logger as", "of devices: %i\" % len(response['device'])); for device in response['device']: if", "+ urllib.urlencode(params, True).replace('+', '%20'), headers=headers) response = conn.getresponse() try: return", "saveConfig() def authenticate(): try: opts, args = getopt.getopt(sys.argv[1:], '', ['authenticate'])", "you can now use tdtool') saveConfig() def authenticate(): try: opts,", "arg in opts: if opt in ('--authenticate'): getAccessToken() return except", "--dimlevel level --dim device ]\") print(\" [ --up device --down", "before using this option.\") print(\"\") print(\" --dimlevel level (-v short", "= oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) token = oauth.OAuthToken(config['token'], config['tokenSecret']) oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,", "= -1 for opt, arg in opts: if opt in", "requestToken() def saveConfig(): global config try: os.makedirs(os.environ['HOME'] + '/.config/Telldus') except:", "doMethod(deviceId, methodId, methodValue = 0): response = doRequest('device/info', {'id': deviceId})", "token = oauth.OAuthToken.from_string(resp.read()) config['requestToken'] = None config['requestTokenSecret'] = None config['token']", "(-f short option)\") print(\" Turns off device. 'device' must be", "resp.status != 200: logger.debug( 'Error retreiving access token, the server", "= 'off' elif (methodId == TELLSTICK_BELL): method = 'bell' elif", "TELLSTICK_DOWN; def printUsage(): print(\"Usage: %s [ options ]\" % sys.argv[0])", "'', ['authenticate']) for opt, arg in opts: if opt in", "with --dimlevel before --dim\") else: doMethod(arg, TELLSTICK_DIM, dimlevel) elif opt", "data with sensor id number\") print(\"\") print(\"Report bugs to <<EMAIL>>\")", "opts, args = getopt.getopt(sys.argv[1:], '', ['authenticate']) for opt, arg in", "= str(token.key) config['requestTokenSecret'] = str(token.secret) saveConfig() def getAccessToken(): global config", "arg elif opt in (\"--up\"): doMethod(arg, TELLSTICK_UP) elif opt in", "logging.getLogger(os.path.basename(__file__)) import davan.util.application_logger as log_manager #insert your own public_key and", "print(\"\") print(\" --help (-h short option)\") print(\" Shows this screen.\")", "print(\"\") print(\" --bell device (-b short option)\") print(\" Sends bell", "\"on=\", \"off=\", \"dim=\", \"bell=\", \"dimlevel=\", \"up=\", \"down=\", \"help\"]) except getopt.GetoptError:", "consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) request = oauth.OAuthRequest.from_consumer_and_token(consumer, http_url='http://api.telldus.com/oauth/requestToken') request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer,", "(methodId == TELLSTICK_BELL): method = 'bell' elif (methodId == TELLSTICK_UP):", "config consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) request = oauth.OAuthRequest.from_consumer_and_token(consumer, http_url='http://api.telldus.com/oauth/requestToken') request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(),", "= oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) request = oauth.OAuthRequest.from_consumer_and_token(consumer, http_url='http://api.telldus.com/oauth/requestToken') request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, None)", "(TELLSTICK_TURNON, TELLSTICK_TURNOFF)): logger.debug(\"Turning %s device %s, %s - %s\" %", "sensor data with sensor id number\") print(\"\") print(\"Report bugs to", "sensorId }); lastupdate = datetime.datetime.fromtimestamp(int(response['lastUpdated'])); sensor_name = response['name']; for data", "urllib, json, os import oauth.oauth as oauth import datetime from", "return response def listDevicesAndValues(): response = doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS}) return", "(device['state'] == TELLSTICK_UP): state = \"UP\" elif (device['state'] == TELLSTICK_DOWN):", "opt in (\"-s\", \"--list-sensors\"): listSensors() elif opt in (\"-x\", \"--list-sensorsvalue\"):", "__name__ == \"__main__\": config = ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf') configuration =", "def printUsage(): print(\"Usage: %s [ options ]\" % sys.argv[0]) print(\"\")", "The dimlevel parameter must be set before using this option.\")", "% sys.argv[0]) print(\"\") print(\"Options:\") print(\" -[lnfdbvh] [ --list ] [", "davan.util.application_logger as log_manager #insert your own public_key and private_key import", "opt in (\"-v\", \"--dimlevel\"): dimlevel = arg elif opt in", "in (\"-d\", \"--sensor-data\"): getSensorData(arg) elif opt in (\"-n\", \"--on\"): doMethod(arg,", "Sends bell command to devices supporting this. 'device' must\") print(\"", "dimlevel) elif opt in (\"-v\", \"--dimlevel\"): dimlevel = arg elif", "print(\"Options:\") print(\" -[lnfdbvh] [ --list ] [ --help ]\") print(\"", "opts: if opt in ('--authenticate'): getAccessToken() return except getopt.GetoptError: pass", "public_key and private_key import davan.config.config_creator as config_creator configuration = config_creator.create()", "deviceId, name, retString)) elif (methodId == TELLSTICK_DIM): logger.debug(\"Dimming device: %s", "in (\"--up\"): doMethod(arg, TELLSTICK_UP) elif opt in (\"--down\"): doMethod(arg, TELLSTICK_DOWN)", "response['data']: logger.debug( \"%s\\t%s\\t%s\\t%s\" % (sensor_name, data['name'], data['value'], lastupdate) ) def", "using this option.\") print(\"\") print(\" --dimlevel level (-v short option)\")", "{'id': deviceId}) if (methodId == TELLSTICK_TURNON): method = 'on' elif", "( method, deviceId, name, retString)); elif (methodId in (TELLSTICK_BELL, TELLSTICK_UP,", "saveConfig() def getAccessToken(): global config consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) token", "pass config.write() def main(argv): global config if ('token' not in", "oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) request = oauth.OAuthRequest.from_consumer_and_token(consumer, http_url='http://api.telldus.com/oauth/requestToken') request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, None) conn", "'Error retreiving access token, the server replied:\\n%s' % resp.read()) return", "conn.request('GET', \"/json/\" + method + \"?\" + urllib.urlencode(params, True).replace('+', '%20'),", "must be an integer of the device-id\") print(\" Device-id and", "if opt in (\"-h\", \"--help\"): printUsage() elif opt in (\"-l\",", "sys.exit(2) dimlevel = -1 for opt, arg in opts: if", "else: retString = response['status'] if (methodId in (TELLSTICK_TURNON, TELLSTICK_TURNOFF)): logger.debug(\"Turning", "print(\" Turns on device. 'device' must be an integer of", "dim.\") print(\"\") print(\" --bell device (-b short option)\") print(\" Sends", "%s, %s - %s\" % ( method, deviceId, name, retString));", "config['requestTokenSecret'] = str(token.secret) saveConfig() def getAccessToken(): global config consumer =", "accepting to use this application run:\\n%s --authenticate' % (sys.argv[0])) config['requestToken']", "dim level. 'level' should an integer, 0-255.\") print(\" Note: This", "the following url in your webbrowser:\\nhttp://api.telldus.com/oauth/authorize?oauth_token=%s\\n' % token.key) logger.debug( 'After", "data in response['data']: logger.debug( \"%s\\t%s\\t%s\\t%s\" % (sensor_name, data['name'], data['value'], lastupdate)", "opt in (\"--down\"): doMethod(arg, TELLSTICK_DOWN) if __name__ == \"__main__\": config", "TELLSTICK_UP = 128 TELLSTICK_DOWN = 256 SUPPORTED_METHODS = TELLSTICK_TURNON |", "'OFF' elif (device['state'] == TELLSTICK_DIM): state = \"DIMMED\" elif (device['state']", "elif opt in (\"-n\", \"--on\"): doMethod(arg, TELLSTICK_TURNON) elif opt in", "\"--dimlevel\"): dimlevel = arg elif opt in (\"--up\"): doMethod(arg, TELLSTICK_UP)", "| TELLSTICK_DOWN; def printUsage(): print(\"Usage: %s [ options ]\" %", "outputed with the --list option\") print(\"\") print(\" --off device (-f", "= 'application/x-www-form-urlencoded' conn = httplib.HTTPConnection(\"api.telldus.com:80\") conn.request('GET', \"/json/\" + method +", "oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url='http://api.telldus.com/oauth/accessToken') request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token) conn = httplib.HTTPConnection('api.telldus.com:80')", "-*- coding: utf-8 -*- import sys, getopt, httplib, urllib, json,", "| TELLSTICK_TURNOFF | TELLSTICK_BELL | TELLSTICK_DIM | TELLSTICK_UP | TELLSTICK_DOWN;", "level --dim device ]\") print(\" [ --up device --down device", "if ('token' not in config or config['token'] == ''): authenticate()", "try: os.makedirs(os.environ['HOME'] + '/.config/Telldus') except: pass config.write() def main(argv): global", "] [ --off device ] [ --bell device ]\") print(\"", "configobj import ConfigObj import logging global logger logger = logging.getLogger(os.path.basename(__file__))", "]\") print(\" [ --dimlevel level --dim device ]\") print(\" [", "id number\") print(\"\") print(\"Report bugs to <<EMAIL>>\") def listSensors(): response", "device in response['device']: if (device['state'] == TELLSTICK_TURNON): state = 'ON'", "response = doRequest('device/info', {'id': deviceId}) if (methodId == TELLSTICK_TURNON): method", "= 4 TELLSTICK_DIM = 16 TELLSTICK_UP = 128 TELLSTICK_DOWN =", "(method, deviceId, name, retString)) elif (methodId == TELLSTICK_DIM): logger.debug(\"Dimming device:", "python # -*- coding: utf-8 -*- import sys, getopt, httplib,", "the --list option\") print(\"\") print(\" --list-sensors (-s short option)\") print(\"", "opt in (\"-h\", \"--help\"): printUsage() elif opt in (\"-l\", \"--list\"):", "== TELLSTICK_TURNOFF): method = 'off' elif (methodId == TELLSTICK_BELL): method", "command to devices supporting this. 'device' must\") print(\" be an", "(deviceId, name, methodValue, retString)) def doRequest(method, params): global config config", "(-l short option)\") print(\" List currently configured devices.\") print(\"\") print(\"", "state = 'OFF' elif (device['state'] == TELLSTICK_DIM): state = \"DIMMED\"", "use tdtool') saveConfig() def authenticate(): try: opts, args = getopt.getopt(sys.argv[1:],", "tdtool') saveConfig() def authenticate(): try: opts, args = getopt.getopt(sys.argv[1:], '',", "def authenticate(): try: opts, args = getopt.getopt(sys.argv[1:], '', ['authenticate']) for", "\"DOWN\" else: state = 'Unknown state' logger.debug(\"%s\\t%s\\t%s\" % (device['id'], device['name'],", "TELLSTICK_BELL | TELLSTICK_DIM | TELLSTICK_UP | TELLSTICK_DOWN; def printUsage(): print(\"Usage:", "be an integer of the device-id\") print(\" Device-id and name", "\"up=\", \"down=\", \"help\"]) except getopt.GetoptError: printUsage() sys.exit(2) dimlevel = -1", "opt in (\"-d\", \"--sensor-data\"): getSensorData(arg) elif opt in (\"-n\", \"--on\"):", "if resp.status != 200: logger.debug( 'Error retreiving access token, the", "== TELLSTICK_TURNOFF): state = 'OFF' elif (device['state'] == TELLSTICK_DIM): state", "(\"-x\", \"--list-sensorsvalue\"): listSensorsAndValues() elif opt in (\"-d\", \"--sensor-data\"): getSensorData(arg) elif", "device\") print(\" Sends down command to devices supporting this. 'device'", "os import oauth.oauth as oauth import datetime from configobj import", "\"--dim\"): if (dimlevel < 0): logger.debug(\"Dimlevel must be set with", "print(\" --help (-h short option)\") print(\" Shows this screen.\") print(\"\")", "print(\" --up device\") print(\" Sends up command to devices supporting", "in (\"-x\", \"--list-sensorsvalue\"): listSensorsAndValues() elif opt in (\"-d\", \"--sensor-data\"): getSensorData(arg)", "printUsage(): print(\"Usage: %s [ options ]\" % sys.argv[0]) print(\"\") print(\"Options:\")", "option)\") print(\" Lists currently configured sensors\") print(\"\") print(\" --sensor-data sensor", "option)\") print(\" Get sensor data with sensor id number\") print(\"\")", "= 0): response = doRequest('device/info', {'id': deviceId}) if (methodId ==", "response): retString = response['error'] else: retString = response['status'] if (methodId", "print(\" Device-id and name is outputed with the --list option\")", "TELLSTICK_UP): state = \"UP\" elif (device['state'] == TELLSTICK_DOWN): state =", "--off device (-f short option)\") print(\" Turns off device. 'device'", "--list-sensors (-s short option)\") print(\" Lists currently configured sensors\") print(\"\")", "response): name = '' retString = response['error'] else: name =", "\"/json/\" + method + \"?\" + urllib.urlencode(params, True).replace('+', '%20'), headers=headers)", "--list option\") print(\"\") print(\" --down device\") print(\" Sends down command", "option\") print(\"\") print(\" --down device\") print(\" Sends down command to", "== TELLSTICK_BELL): method = 'bell' elif (methodId == TELLSTICK_UP): method", "elif (methodId == TELLSTICK_TURNOFF): method = 'off' elif (methodId ==", "state = \"UP\" elif (device['state'] == TELLSTICK_DOWN): state = \"DOWN\"", "response['name'] response = doRequest('device/command', {'id': deviceId, 'method': methodId, 'value': methodValue})", "= config_creator.create() PUBLIC_KEY = configuration[\"TELLDUS_PUBLIC_KEY\"] PRIVATE_KEY = configuration[\"TELLDUS_PRIVATE_KEY\"] TELLSTICK_TURNON =", "(-d short option)\") print(\" Get sensor data with sensor id", "must be set before using dim.\") print(\"\") print(\" --bell device", "\"%s\\t%s\\t%s\" % (sensor['id'], sensor['name'], lastupdate)) def listSensorsAndValues(): response = doRequest('sensors/list',", "in response['sensor']: lastupdate = datetime.datetime.fromtimestamp(int(sensor['lastUpdated'])); logger.debug( \"%s\\t%s\\t%s\" % (sensor['id'], sensor['name'],", "params): global config config = ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf') consumer =", "token=token, http_method='GET', http_url=\"http://api.telldus.com/json/\" + method, parameters=params) oauth_request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token) headers", "conn.request(request.http_method, '/oauth/requestToken', headers=request.to_header()) resp = conn.getresponse().read() token = oauth.OAuthToken.from_string(resp) logger.debug(", "--authenticate' % (sys.argv[0])) config['requestToken'] = str(token.key) config['requestTokenSecret'] = str(token.secret) saveConfig()", "import davan.config.config_creator as config_creator configuration = config_creator.create() PUBLIC_KEY = configuration[\"TELLDUS_PUBLIC_KEY\"]", "TELLSTICK_TURNOFF)): logger.debug(\"Turning %s device %s, %s - %s\" % (", "print(\"\") print(\" --off device (-f short option)\") print(\" Turns off", "'value': methodValue}) if ('error' in response): retString = response['error'] else:", "and accepting to use this application run:\\n%s --authenticate' % (sys.argv[0]))", "= str(token.secret) saveConfig() def getAccessToken(): global config consumer = oauth.OAuthConsumer(PUBLIC_KEY,", "retreiving access token, the server replied:\\n%s' % resp.read()) return token", "print(\" Lists currently configured sensors\") print(\"\") print(\" --sensor-data sensor (-d", "logger.debug(\"Sending %s to: %s %s - %s\" % (method, deviceId,", "device ] [ --off device ] [ --bell device ]\")", "global config consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) request = oauth.OAuthRequest.from_consumer_and_token(consumer, http_url='http://api.telldus.com/oauth/requestToken')", "print(\" --list (-l short option)\") print(\" List currently configured devices.\")", "oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) token = oauth.OAuthToken(config['token'], config['tokenSecret']) oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token,", "resp = conn.getresponse() if resp.status != 200: logger.debug( 'Error retreiving", "option.\") print(\"\") print(\" --dimlevel level (-v short option)\") print(\" Set", "\"--sensor-data\"): getSensorData(arg) elif opt in (\"-n\", \"--on\"): doMethod(arg, TELLSTICK_TURNON) elif", "ConfigObj import logging global logger logger = logging.getLogger(os.path.basename(__file__)) import davan.util.application_logger", "sensor in response['sensor']: lastupdate = datetime.datetime.fromtimestamp(int(sensor['lastUpdated'])); logger.debug( \"%s\\t%s\\t%s\" % (sensor['id'],", ") def listDevices(): response = doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS}) logger.debug(\"Number of", "= conn.getresponse().read() token = oauth.OAuthToken.from_string(resp) logger.debug( 'Open the following url", "TELLSTICK_DIM): logger.debug(\"Dimming device: %s %s to %s - %s\" %", "1}); return response def listDevicesAndValues(): response = doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS})", "return try: opts, args = getopt.getopt(argv, \"lsd:n:f:d:b:v:h\", [\"list\", \"list-sensors\", \"sensor-data=\",", "dimlevel = arg elif opt in (\"--up\"): doMethod(arg, TELLSTICK_UP) elif", "= arg elif opt in (\"--up\"): doMethod(arg, TELLSTICK_UP) elif opt", "server replied:\\n%s' % resp.read()) return token = oauth.OAuthToken.from_string(resp.read()) config['requestToken'] =", "%s %s to %s - %s\" % (deviceId, name, methodValue,", "#insert your own public_key and private_key import davan.config.config_creator as config_creator", "for sensor in response['sensor']: lastupdate = datetime.datetime.fromtimestamp(int(sensor['lastUpdated'])); logger.debug( \"%s\\t%s\\t%s\" %", "bell command to devices supporting this. 'device' must\") print(\" be", "print(\" [ --on device ] [ --off device ] [", "from configobj import ConfigObj import logging global logger logger =", "in and accepting to use this application run:\\n%s --authenticate' %", "outputed with the --list option\") print(\" Note: The dimlevel parameter", "deviceId, name, retString)); elif (methodId in (TELLSTICK_BELL, TELLSTICK_UP, TELLSTICK_DOWN)): logger.debug(\"Sending", "(-n short option)\") print(\" Turns on device. 'device' must be", "Device-id and name is outputed with the --list option\") print(\"\")", "the --list option\") print(\"\") print(\" --dim device (-d short option)\")", "print(\" [ --dimlevel level --dim device ]\") print(\" [ --up", "if (methodId == TELLSTICK_TURNON): method = 'on' elif (methodId ==", "%i\" % len(response['device'])); for device in response['device']: if (device['state'] ==", "'off' elif (methodId == TELLSTICK_BELL): method = 'bell' elif (methodId", "except: pass config.write() def main(argv): global config if ('token' not", "lastupdate)) def listSensorsAndValues(): response = doRequest('sensors/list', {'includeValues': 1}); return response", "conn = httplib.HTTPConnection('api.telldus.com:80') conn.request(request.http_method, request.to_url(), headers=request.to_header()) resp = conn.getresponse() if", "str(token.key) config['requestTokenSecret'] = str(token.secret) saveConfig() def getAccessToken(): global config consumer", "response['sensor']: lastupdate = datetime.datetime.fromtimestamp(int(sensor['lastUpdated'])); logger.debug( \"%s\\t%s\\t%s\" % (sensor['id'], sensor['name'], lastupdate))", "consumer, token) conn = httplib.HTTPConnection('api.telldus.com:80') conn.request(request.http_method, request.to_url(), headers=request.to_header()) resp =", "global logger logger = logging.getLogger(os.path.basename(__file__)) import davan.util.application_logger as log_manager #insert", "token) headers = oauth_request.to_header() headers['Content-Type'] = 'application/x-www-form-urlencoded' conn = httplib.HTTPConnection(\"api.telldus.com:80\")", "in response['device']: if (device['state'] == TELLSTICK_TURNON): state = 'ON' elif", "str(token.secret) saveConfig() def getAccessToken(): global config consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY)", "= 256 SUPPORTED_METHODS = TELLSTICK_TURNON | TELLSTICK_TURNOFF | TELLSTICK_BELL |", "of the device-id\") print(\" Device-id and name is outputed with", "now use tdtool') saveConfig() def authenticate(): try: opts, args =", "supporting this. 'device' must\") print(\" be an integer of the", "Sends up command to devices supporting this. 'device' must\") print(\"", "httplib.HTTPConnection('api.telldus.com:80') conn.request(request.http_method, '/oauth/requestToken', headers=request.to_header()) resp = conn.getresponse().read() token = oauth.OAuthToken.from_string(resp)", "= \"DIMMED\" elif (device['state'] == TELLSTICK_UP): state = \"UP\" elif", "option)\") print(\" Turns off device. 'device' must be an integer", "PRIVATE_KEY = configuration[\"TELLDUS_PRIVATE_KEY\"] TELLSTICK_TURNON = 1 TELLSTICK_TURNOFF = 2 TELLSTICK_BELL", "elif opt in (\"-b\", \"--bell\"): doMethod(arg, TELLSTICK_BELL) elif opt in", "= doRequest('sensors/list', {'includeIgnored': 1}); logger.debug(\"Number of sensors: %i\" % len(response['sensor']));", "1 TELLSTICK_TURNOFF = 2 TELLSTICK_BELL = 4 TELLSTICK_DIM = 16", "== TELLSTICK_UP): state = \"UP\" elif (device['state'] == TELLSTICK_DOWN): state", "as config_creator configuration = config_creator.create() PUBLIC_KEY = configuration[\"TELLDUS_PUBLIC_KEY\"] PRIVATE_KEY =", "option)\") print(\" Set dim level. 'level' should an integer, 0-255.\")", "--dimlevel level (-v short option)\") print(\" Set dim level. 'level'", "outputed with the --list option\") print(\"\") print(\" --down device\") print(\"", "the server replied:\\n%s' % resp.read()) return token = oauth.OAuthToken.from_string(resp.read()) config['requestToken']", "for device in response['device']: if (device['state'] == TELLSTICK_TURNON): state =", "state)); def doMethod(deviceId, methodId, methodValue = 0): response = doRequest('device/info',", "--list option\") print(\"\") print(\" --dim device (-d short option)\") print(\"", "logger.debug(\"Dimlevel must be set with --dimlevel before --dim\") else: doMethod(arg,", "response['name']; for data in response['data']: logger.debug( \"%s\\t%s\\t%s\\t%s\" % (sensor_name, data['name'],", "must be set before using this option.\") print(\"\") print(\" --dimlevel", "%s\" % (method, deviceId, name, retString)) elif (methodId == TELLSTICK_DIM):", "short option)\") print(\" Sends bell command to devices supporting this.", "elif (device['state'] == TELLSTICK_DOWN): state = \"DOWN\" else: state =", "\"bell=\", \"dimlevel=\", \"up=\", \"down=\", \"help\"]) except getopt.GetoptError: printUsage() sys.exit(2) dimlevel", "http_method='GET', http_url='http://api.telldus.com/oauth/accessToken') request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token) conn = httplib.HTTPConnection('api.telldus.com:80') conn.request(request.http_method, request.to_url(),", "deviceId}) if (methodId == TELLSTICK_TURNON): method = 'on' elif (methodId", "= conn.getresponse() if resp.status != 200: logger.debug( 'Error retreiving access", "= doRequest('device/info', {'id': deviceId}) if (methodId == TELLSTICK_TURNON): method =", "response = doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS}) return response def getSensorData(sensorId): response", "= response['name'] response = doRequest('device/command', {'id': deviceId, 'method': methodId, 'value':", "listDevices() elif opt in (\"-s\", \"--list-sensors\"): listSensors() elif opt in", "+ method + \"?\" + urllib.urlencode(params, True).replace('+', '%20'), headers=headers) response", "['authenticate']) for opt, arg in opts: if opt in ('--authenticate'):", "(-d short option)\") print(\" Dims device. 'device' must be an", "import ConfigObj import logging global logger logger = logging.getLogger(os.path.basename(__file__)) import", "}); lastupdate = datetime.datetime.fromtimestamp(int(response['lastUpdated'])); sensor_name = response['name']; for data in", "config['tokenSecret']) oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url=\"http://api.telldus.com/json/\" + method, parameters=params)", "dimlevel parameter must be set before using this option.\") print(\"\")", "oauth.OAuthToken(config['token'], config['tokenSecret']) oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url=\"http://api.telldus.com/json/\" + method,", "\"--list-sensors\"): listSensors() elif opt in (\"-x\", \"--list-sensorsvalue\"): listSensorsAndValues() elif opt", "- %s\" % (method, deviceId, name, retString)) elif (methodId ==", "opt in (\"--up\"): doMethod(arg, TELLSTICK_UP) elif opt in (\"--down\"): doMethod(arg,", "\"lsd:n:f:d:b:v:h\", [\"list\", \"list-sensors\", \"sensor-data=\", \"on=\", \"off=\", \"dim=\", \"bell=\", \"dimlevel=\", \"up=\",", "or config['token'] == ''): authenticate() return try: opts, args =", "TELLSTICK_BELL): method = 'bell' elif (methodId == TELLSTICK_UP): method =", "\"--list\"): listDevices() elif opt in (\"-s\", \"--list-sensors\"): listSensors() elif opt", "is outputed with the --list option\") print(\"\") print(\" --dim device", "doMethod(arg, TELLSTICK_UP) elif opt in (\"--down\"): doMethod(arg, TELLSTICK_DOWN) if __name__", "opt in (\"-b\", \"--bell\"): doMethod(arg, TELLSTICK_BELL) elif opt in (\"-d\",", "[ --dimlevel level --dim device ]\") print(\" [ --up device", "'ON' elif (device['state'] == TELLSTICK_TURNOFF): state = 'OFF' elif (device['state']", "config.write() def main(argv): global config if ('token' not in config", "private_key import davan.config.config_creator as config_creator configuration = config_creator.create() PUBLIC_KEY =", "with the --list option\") print(\"\") print(\" --up device\") print(\" Sends", "]\") print(\" [ --up device --down device ]\") print(\"\") print(\"", "%s to: %s %s - %s\" % (method, deviceId, name,", "resp.read()) return token = oauth.OAuthToken.from_string(resp.read()) config['requestToken'] = None config['requestTokenSecret'] =", "in (\"-v\", \"--dimlevel\"): dimlevel = arg elif opt in (\"--up\"):", "\"down=\", \"help\"]) except getopt.GetoptError: printUsage() sys.exit(2) dimlevel = -1 for", "'level' should an integer, 0-255.\") print(\" Note: This parameter must", "return except getopt.GetoptError: pass requestToken() def saveConfig(): global config try:", "short option)\") print(\" Lists currently configured sensors\") print(\"\") print(\" --sensor-data", "= httplib.HTTPConnection('api.telldus.com:80') conn.request(request.http_method, '/oauth/requestToken', headers=request.to_header()) resp = conn.getresponse().read() token =", "global config try: os.makedirs(os.environ['HOME'] + '/.config/Telldus') except: pass config.write() def", "in (\"-f\", \"--off\"): doMethod(arg, TELLSTICK_TURNOFF) elif opt in (\"-b\", \"--bell\"):", "datetime.datetime.fromtimestamp(int(sensor['lastUpdated'])); logger.debug( \"%s\\t%s\\t%s\" % (sensor['id'], sensor['name'], lastupdate)) def listSensorsAndValues(): response", "is outputed with the --list option\") print(\"\") print(\" --off device", "name, methodValue, retString)) def doRequest(method, params): global config config =", "SUPPORTED_METHODS = TELLSTICK_TURNON | TELLSTICK_TURNOFF | TELLSTICK_BELL | TELLSTICK_DIM |", "(sys.argv[0])) config['requestToken'] = str(token.key) config['requestTokenSecret'] = str(token.secret) saveConfig() def getAccessToken():", "elif opt in (\"-f\", \"--off\"): doMethod(arg, TELLSTICK_TURNOFF) elif opt in", "response def getSensorData(sensorId): response = doRequest('sensor/info', {'id': sensorId }); lastupdate", "configuration[\"TELLDUS_PRIVATE_KEY\"] TELLSTICK_TURNON = 1 TELLSTICK_TURNOFF = 2 TELLSTICK_BELL = 4", "lastupdate = datetime.datetime.fromtimestamp(int(response['lastUpdated'])); sensor_name = response['name']; for data in response['data']:", "sys, getopt, httplib, urllib, json, os import oauth.oauth as oauth", "can now use tdtool') saveConfig() def authenticate(): try: opts, args", "response = doRequest('device/command', {'id': deviceId, 'method': methodId, 'value': methodValue}) if", "logger.debug(\"%s\\t%s\\t%s\" % (device['id'], device['name'], state)); def doMethod(deviceId, methodId, methodValue =", "if __name__ == \"__main__\": config = ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf') configuration", "] [ --bell device ]\") print(\" [ --dimlevel level --dim", "configured sensors\") print(\"\") print(\" --sensor-data sensor (-d short option)\") print(\"", "logger.debug(\"Number of sensors: %i\" % len(response['sensor'])); for sensor in response['sensor']:", "to <<EMAIL>>\") def listSensors(): response = doRequest('sensors/list', {'includeIgnored': 1}); logger.debug(\"Number", "config = ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf') consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) token", "state = \"DOWN\" else: state = 'Unknown state' logger.debug(\"%s\\t%s\\t%s\" %", "print(\" Set dim level. 'level' should an integer, 0-255.\") print(\"", "%s %s - %s\" % (method, deviceId, name, retString)) elif", "\"UP\" elif (device['state'] == TELLSTICK_DOWN): state = \"DOWN\" else: state", "on device. 'device' must be an integer of the device-id\")", "= oauth.OAuthToken(config['requestToken'], config['requestTokenSecret']) request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url='http://api.telldus.com/oauth/accessToken') request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(),", "'Authentication successful, you can now use tdtool') saveConfig() def authenticate():", "TELLSTICK_TURNOFF) elif opt in (\"-b\", \"--bell\"): doMethod(arg, TELLSTICK_BELL) elif opt", "= configuration[\"TELLDUS_PRIVATE_KEY\"] TELLSTICK_TURNON = 1 TELLSTICK_TURNOFF = 2 TELLSTICK_BELL =", "print(\"\") print(\" --up device\") print(\" Sends up command to devices", "conn.getresponse().read() token = oauth.OAuthToken.from_string(resp) logger.debug( 'Open the following url in", "def doMethod(deviceId, methodId, methodValue = 0): response = doRequest('device/info', {'id':", "--list option\") print(\" Note: The dimlevel parameter must be set", "pass requestToken() def saveConfig(): global config try: os.makedirs(os.environ['HOME'] + '/.config/Telldus')", "= doRequest('sensors/list', {'includeValues': 1}); return response def listDevicesAndValues(): response =", "config_creator.create() PUBLIC_KEY = configuration[\"TELLDUS_PUBLIC_KEY\"] PRIVATE_KEY = configuration[\"TELLDUS_PRIVATE_KEY\"] TELLSTICK_TURNON = 1", "device ]\") print(\"\") print(\" --list (-l short option)\") print(\" List", "+ method, parameters=params) oauth_request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token) headers = oauth_request.to_header() headers['Content-Type']", "= oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) token = oauth.OAuthToken(config['requestToken'], config['requestTokenSecret']) request = oauth.OAuthRequest.from_consumer_and_token(consumer,", "this option.\") print(\"\") print(\" --dimlevel level (-v short option)\") print(\"", "option)\") print(\" List currently configured devices.\") print(\"\") print(\" --help (-h", "device (-b short option)\") print(\" Sends bell command to devices", "(\"-d\", \"--sensor-data\"): getSensorData(arg) elif opt in (\"-n\", \"--on\"): doMethod(arg, TELLSTICK_TURNON)", "token = oauth.OAuthToken(config['requestToken'], config['requestTokenSecret']) request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url='http://api.telldus.com/oauth/accessToken')", "option)\") print(\" Turns on device. 'device' must be an integer", "log_manager #insert your own public_key and private_key import davan.config.config_creator as", "doMethod(arg, TELLSTICK_DIM, dimlevel) elif opt in (\"-v\", \"--dimlevel\"): dimlevel =", "200: logger.debug( 'Error retreiving access token, the server replied:\\n%s' %", "import oauth.oauth as oauth import datetime from configobj import ConfigObj", "print(\" Shows this screen.\") print(\"\") print(\" --on device (-n short", "SUPPORTED_METHODS}) return response def getSensorData(sensorId): response = doRequest('sensor/info', {'id': sensorId", "lastupdate) ) def listDevices(): response = doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS}) logger.debug(\"Number", "--off device ] [ --bell device ]\") print(\" [ --dimlevel", "%s to %s - %s\" % (deviceId, name, methodValue, retString))", "headers=request.to_header()) resp = conn.getresponse() if resp.status != 200: logger.debug( 'Error", "1}); logger.debug(\"Number of sensors: %i\" % len(response['sensor'])); for sensor in", "in response): name = '' retString = response['error'] else: name", "logger.debug( 'Authentication successful, you can now use tdtool') saveConfig() def", "except getopt.GetoptError: printUsage() sys.exit(2) dimlevel = -1 for opt, arg", "in (\"-s\", \"--list-sensors\"): listSensors() elif opt in (\"-x\", \"--list-sensorsvalue\"): listSensorsAndValues()", "using dim.\") print(\"\") print(\" --bell device (-b short option)\") print(\"", "= \"DOWN\" else: state = 'Unknown state' logger.debug(\"%s\\t%s\\t%s\" % (device['id'],", "device --down device ]\") print(\"\") print(\" --list (-l short option)\")", "{'supportedMethods': SUPPORTED_METHODS}) logger.debug(\"Number of devices: %i\" % len(response['device'])); for device", "to decode response :%s'%str(response)) return \"\" def requestToken(): global config", "= oauth_request.to_header() headers['Content-Type'] = 'application/x-www-form-urlencoded' conn = httplib.HTTPConnection(\"api.telldus.com:80\") conn.request('GET', \"/json/\"", "request.to_url(), headers=request.to_header()) resp = conn.getresponse() if resp.status != 200: logger.debug(", "in (\"-n\", \"--on\"): doMethod(arg, TELLSTICK_TURNON) elif opt in (\"-f\", \"--off\"):", "(\"-v\", \"--dimlevel\"): dimlevel = arg elif opt in (\"--up\"): doMethod(arg,", "0): logger.debug(\"Dimlevel must be set with --dimlevel before --dim\") else:", "SUPPORTED_METHODS}) logger.debug(\"Number of devices: %i\" % len(response['device'])); for device in", "def doRequest(method, params): global config config = ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf')", "arg in opts: if opt in (\"-h\", \"--help\"): printUsage() elif", "%s - %s\" % (method, deviceId, name, retString)) elif (methodId", "own public_key and private_key import davan.config.config_creator as config_creator configuration =", "+ \"?\" + urllib.urlencode(params, True).replace('+', '%20'), headers=headers) response = conn.getresponse()", "datetime.datetime.fromtimestamp(int(response['lastUpdated'])); sensor_name = response['name']; for data in response['data']: logger.debug( \"%s\\t%s\\t%s\\t%s\"", "elif (methodId in (TELLSTICK_BELL, TELLSTICK_UP, TELLSTICK_DOWN)): logger.debug(\"Sending %s to: %s", "short option)\") print(\" Set dim level. 'level' should an integer,", "'Open the following url in your webbrowser:\\nhttp://api.telldus.com/oauth/authorize?oauth_token=%s\\n' % token.key) logger.debug(", "\"__main__\": config = ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf') configuration = config_creator.create() log_manager.start_logging(configuration[\"LOGFILE_PATH\"],loglevel=4)", "(\"-n\", \"--on\"): doMethod(arg, TELLSTICK_TURNON) elif opt in (\"-f\", \"--off\"): doMethod(arg,", "--list ] [ --help ]\") print(\" [ --on device ]", "< 0): logger.debug(\"Dimlevel must be set with --dimlevel before --dim\")", "getSensorData(sensorId): response = doRequest('sensor/info', {'id': sensorId }); lastupdate = datetime.datetime.fromtimestamp(int(response['lastUpdated']));", "httplib, urllib, json, os import oauth.oauth as oauth import datetime", "doRequest('sensors/list', {'includeValues': 1}); return response def listDevicesAndValues(): response = doRequest('devices/list',", "config config = ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf') consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY)", "\"\" def requestToken(): global config consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) request", "headers=request.to_header()) resp = conn.getresponse().read() token = oauth.OAuthToken.from_string(resp) logger.debug( 'Open the", "listSensors() elif opt in (\"-x\", \"--list-sensorsvalue\"): listSensorsAndValues() elif opt in", "response = doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS}) logger.debug(\"Number of devices: %i\" %", "Device-id and name is outputed with the --list option\") print(\"", "= oauth.OAuthToken(config['token'], config['tokenSecret']) oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url=\"http://api.telldus.com/json/\" +", "webbrowser:\\nhttp://api.telldus.com/oauth/authorize?oauth_token=%s\\n' % token.key) logger.debug( 'After logging in and accepting to", "oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) token = oauth.OAuthToken(config['requestToken'], config['requestTokenSecret']) request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token,", "device (-f short option)\") print(\" Turns off device. 'device' must", "method = 'up' elif (methodId == TELLSTICK_DOWN): method = 'down'", "(methodId in (TELLSTICK_BELL, TELLSTICK_UP, TELLSTICK_DOWN)): logger.debug(\"Sending %s to: %s %s", "= logging.getLogger(os.path.basename(__file__)) import davan.util.application_logger as log_manager #insert your own public_key", "'down' if ('error' in response): name = '' retString =", "with the --list option\") print(\" Note: The dimlevel parameter must", "= None config['token'] = str(token.key) config['tokenSecret'] = str(token.secret) logger.debug( 'Authentication", "short option)\") print(\" List currently configured devices.\") print(\"\") print(\" --help", "(\"--down\"): doMethod(arg, TELLSTICK_DOWN) if __name__ == \"__main__\": config = ConfigObj(os.environ['HOME']", "]\") print(\"\") print(\" --list (-l short option)\") print(\" List currently", "opt in (\"-x\", \"--list-sensorsvalue\"): listSensorsAndValues() elif opt in (\"-d\", \"--sensor-data\"):", "consumer, None) conn = httplib.HTTPConnection('api.telldus.com:80') conn.request(request.http_method, '/oauth/requestToken', headers=request.to_header()) resp =", "integer, 0-255.\") print(\" Note: This parameter must be set before", "TELLSTICK_DIM, dimlevel) elif opt in (\"-v\", \"--dimlevel\"): dimlevel = arg", "logger.debug( \"%s\\t%s\\t%s\\t%s\" % (sensor_name, data['name'], data['value'], lastupdate) ) def listDevices():", "before using dim.\") print(\"\") print(\" --bell device (-b short option)\")", "[ --up device --down device ]\") print(\"\") print(\" --list (-l", "conn.request(request.http_method, request.to_url(), headers=request.to_header()) resp = conn.getresponse() if resp.status != 200:", "= 'Unknown state' logger.debug(\"%s\\t%s\\t%s\" % (device['id'], device['name'], state)); def doMethod(deviceId,", "('error' in response): name = '' retString = response['error'] else:", "[ --bell device ]\") print(\" [ --dimlevel level --dim device", "main(argv): global config if ('token' not in config or config['token']", "== ''): authenticate() return try: opts, args = getopt.getopt(argv, \"lsd:n:f:d:b:v:h\",", "print(\"\") print(\" --list (-l short option)\") print(\" List currently configured", "device (-d short option)\") print(\" Dims device. 'device' must be", "= conn.getresponse() try: return json.load(response) except: logger.debug( 'Failed to decode", "print(\" -[lnfdbvh] [ --list ] [ --help ]\") print(\" [", "oauth import datetime from configobj import ConfigObj import logging global", "PRIVATE_KEY) request = oauth.OAuthRequest.from_consumer_and_token(consumer, http_url='http://api.telldus.com/oauth/requestToken') request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, None) conn =", "integer of the device-id\") print(\" Device-id and name is outputed", "device['name'], state)); def doMethod(deviceId, methodId, methodValue = 0): response =", "sensor_name = response['name']; for data in response['data']: logger.debug( \"%s\\t%s\\t%s\\t%s\" %", "= 'on' elif (methodId == TELLSTICK_TURNOFF): method = 'off' elif", "datetime from configobj import ConfigObj import logging global logger logger", "\"DIMMED\" elif (device['state'] == TELLSTICK_UP): state = \"UP\" elif (device['state']", "(\"--up\"): doMethod(arg, TELLSTICK_UP) elif opt in (\"--down\"): doMethod(arg, TELLSTICK_DOWN) if", "def main(argv): global config if ('token' not in config or", "TELLSTICK_DIM = 16 TELLSTICK_UP = 128 TELLSTICK_DOWN = 256 SUPPORTED_METHODS", "devices: %i\" % len(response['device'])); for device in response['device']: if (device['state']", "--list option\") print(\"\") print(\" --up device\") print(\" Sends up command", "[\"list\", \"list-sensors\", \"sensor-data=\", \"on=\", \"off=\", \"dim=\", \"bell=\", \"dimlevel=\", \"up=\", \"down=\",", "opt in (\"-l\", \"--list\"): listDevices() elif opt in (\"-s\", \"--list-sensors\"):", "= '' retString = response['error'] else: name = response['name'] response", "and private_key import davan.config.config_creator as config_creator configuration = config_creator.create() PUBLIC_KEY", "--bell device (-b short option)\") print(\" Sends bell command to", "config if ('token' not in config or config['token'] == ''):", "doRequest('device/command', {'id': deviceId, 'method': methodId, 'value': methodValue}) if ('error' in", "\"--help\"): printUsage() elif opt in (\"-l\", \"--list\"): listDevices() elif opt", "outputed with the --list option\") print(\"\") print(\" --up device\") print(\"", "httplib.HTTPConnection(\"api.telldus.com:80\") conn.request('GET', \"/json/\" + method + \"?\" + urllib.urlencode(params, True).replace('+',", "requestToken(): global config consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY) request = oauth.OAuthRequest.from_consumer_and_token(consumer,", "headers = oauth_request.to_header() headers['Content-Type'] = 'application/x-www-form-urlencoded' conn = httplib.HTTPConnection(\"api.telldus.com:80\") conn.request('GET',", "--dim\") else: doMethod(arg, TELLSTICK_DIM, dimlevel) elif opt in (\"-v\", \"--dimlevel\"):", "print(\"Report bugs to <<EMAIL>>\") def listSensors(): response = doRequest('sensors/list', {'includeIgnored':", "This parameter must be set before using dim.\") print(\"\") print(\"", "+ '/.config/Telldus') except: pass config.write() def main(argv): global config if", "option\") print(\"\") print(\" --up device\") print(\" Sends up command to", "elif (methodId == TELLSTICK_DIM): logger.debug(\"Dimming device: %s %s to %s", "elif (device['state'] == TELLSTICK_TURNOFF): state = 'OFF' elif (device['state'] ==", "headers=headers) response = conn.getresponse() try: return json.load(response) except: logger.debug( 'Failed", "json, os import oauth.oauth as oauth import datetime from configobj", "len(response['sensor'])); for sensor in response['sensor']: lastupdate = datetime.datetime.fromtimestamp(int(sensor['lastUpdated'])); logger.debug( \"%s\\t%s\\t%s\"", "response :%s'%str(response)) return \"\" def requestToken(): global config consumer =", "in your webbrowser:\\nhttp://api.telldus.com/oauth/authorize?oauth_token=%s\\n' % token.key) logger.debug( 'After logging in and", "in (TELLSTICK_BELL, TELLSTICK_UP, TELLSTICK_DOWN)): logger.debug(\"Sending %s to: %s %s -", "--help (-h short option)\") print(\" Shows this screen.\") print(\"\") print(\"", "methodValue}) if ('error' in response): retString = response['error'] else: retString", "run:\\n%s --authenticate' % (sys.argv[0])) config['requestToken'] = str(token.key) config['requestTokenSecret'] = str(token.secret)", "TELLSTICK_DOWN): state = \"DOWN\" else: state = 'Unknown state' logger.debug(\"%s\\t%s\\t%s\"", "% (sys.argv[0])) config['requestToken'] = str(token.key) config['requestTokenSecret'] = str(token.secret) saveConfig() def", "httplib.HTTPConnection('api.telldus.com:80') conn.request(request.http_method, request.to_url(), headers=request.to_header()) resp = conn.getresponse() if resp.status !=", "(\"-l\", \"--list\"): listDevices() elif opt in (\"-s\", \"--list-sensors\"): listSensors() elif", "== \"__main__\": config = ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf') configuration = config_creator.create()", "(methodId == TELLSTICK_TURNOFF): method = 'off' elif (methodId == TELLSTICK_BELL):", "screen.\") print(\"\") print(\" --on device (-n short option)\") print(\" Turns", "logger.debug(\"Dimming device: %s %s to %s - %s\" % (deviceId,", "try: opts, args = getopt.getopt(argv, \"lsd:n:f:d:b:v:h\", [\"list\", \"list-sensors\", \"sensor-data=\", \"on=\",", "(methodId == TELLSTICK_TURNON): method = 'on' elif (methodId == TELLSTICK_TURNOFF):", "request = oauth.OAuthRequest.from_consumer_and_token(consumer, http_url='http://api.telldus.com/oauth/requestToken') request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, None) conn = httplib.HTTPConnection('api.telldus.com:80')", "0): response = doRequest('device/info', {'id': deviceId}) if (methodId == TELLSTICK_TURNON):", "http_url='http://api.telldus.com/oauth/requestToken') request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, None) conn = httplib.HTTPConnection('api.telldus.com:80') conn.request(request.http_method, '/oauth/requestToken', headers=request.to_header())", "is outputed with the --list option\") print(\" Note: The dimlevel" ]
[ "self._parse_dict(item, report, self.position_id, self.position_map) blues = self._parse_list(item, report, self.blue_id, self.blue_map)", "models. \"\"\" # *_id maps a source section id to", "redis_client) if queue.ready(): export_task.delay(config.name, queue_key) if data_queue.ready(): self.task.apply_countdown() class ReportExporter(object):", "data collected in the web application. It is the single", "for processing. for queue_key in config.partitions(redis_client): queue = config.queue(queue_key, redis_client)", "entrypoint from which all other data pipelines get their data.", "2 + 1)) if success: METRICS.incr(\"data.export.batch\", tags=self.stats_tags) break if success", "False time.sleep(self._retry_wait * (i ** 2 + 1)) if success:", "def _parse_dict(self, item, report, key_map, field_map): value = {} item_source", "if not count: continue type_, action = name.split(\"_\") if type_", "[\"key:\" + self.config.name] @staticmethod def export(task, name, queue_key): with task.db_session(commit=False)", "source, target = spec else: source = spec target =", "= ApiKey.__table__.c rows = session.execute( select([columns.valid_key]).where(columns.valid_key.in_(keys)) ).fetchall() for row in", "% type_] + api_tag METRICS.incr(\"data.%s.%s\" % (suffix, action), count, tags=tags)", "if key_map[1] is None: report.update(value) else: report[key_map[1]] = value return", "gps_age = position.get(\"age\", 0) timestamp = item.get(\"timestamp\") if timestamp: #", ") from ichnaea.models.content import encode_datamap_grid from ichnaea import util WHITESPACE", "WifiObservation, WifiReport, WifiShard, ) from ichnaea.models.content import encode_datamap_grid from ichnaea", "= item.get(key_map[0]) if item_source: value = self._map_dict(item_source, field_map) if value:", ") s3 = boto3.resource(\"s3\") bucket = s3.Bucket(bucketname) obj = bucket.Object(obj_name)", "web application. It is the single entrypoint from which all", "def __call__(self, item): report = {} self._parse_dict(item, report, self.position_id, self.position_map)", "\"report\": item[\"report\"]} ) with self.task.db_session(commit=False) as session: export_configs = ExportConfig.all(session)", "away queue prefix again parts = self.queue_key.split(\":\") source = parts[1]", "self.queue.ready(): self.task.apply_countdown(args=[self.config.name, self.queue_key]) def send(self, queue_items): raise NotImplementedError() class DummyExporter(ReportExporter):", "\"frequency\", (\"radioType\", \"radio\"), (\"signalToNoiseRatio\", \"snr\"), (\"signalStrength\", \"signal\"), ] def _map_dict(self,", "= [item[\"report\"] for item in queue_items] headers = { \"Content-Encoding\":", "item in data: grouped[(item[\"api_key\"], item.get(\"source\", \"gnss\"))].append( {\"api_key\": item[\"api_key\"], \"report\": item[\"report\"]}", "def send(self, queue_items): # ignore metadata reports = [item[\"report\"] for", "botocore.exceptions import markus import redis.exceptions import requests import requests.exceptions from", "= item_obs obs = { \"blue\": observations[\"blue\"].values(), \"cell\": observations[\"cell\"].values(), \"wifi\":", "self.queue_observations(pipe, observations) if _map_content_enabled and positions: self.process_datamap(pipe, positions) self.emit_metrics(api_keys_known, metrics)", "False for i in range(self._retries): try: with METRICS.timer(\"data.export.upload.timing\", tags=self.stats_tags): self.send(queue_items)", "item[\"api_key\"] report = item[\"report\"] obs, malformed_obs = self.process_report(report) any_data =", "and extract set of API keys item[\"report\"] = self.transform(item[\"report\"]) if", "suffix = \"observation\" tags = [\"type:%s\" % type_] + api_tag", "- gps_age if gps_age: # Normalize age fields to be", "\"gzip\", \"Content-Type\": \"application/json\", \"User-Agent\": \"ichnaea\", } response = requests.post( self.config.url,", "self.blue_id, self.blue_map) cells = self._parse_list(item, report, self.cell_id, self.cell_map) wifis =", "): malformed[name] = 0 observations[name] = {} if data.get(name): for", "inside the section from source to target id # if", "by sharded queue shard_id = shard_model.shard_id(getattr(obs, shard_key)) queue_id = queue_prefix", "id # if the names are equal, a simple string", "tuple): source, target = spec else: source = spec target", "value: values.append(value) if values: report[key_map[1]] = values return values def", "source), items in grouped.items(): for config in export_configs: if config.allowed(api_key,", "cells = self._parse_list(item, report, self.cell_id, self.cell_map) wifis = self._parse_list(item, report,", "import urlparse import uuid import boto3 import boto3.exceptions import botocore.exceptions", "value = self._map_dict(value_item, field_map) if value: values.append(value) if values: report[key_map[1]]", "and specific report data into one item_obs = obs_cls.combine(report, item_report)", "import _map_content_enabled from ichnaea.models import ( ApiKey, BlueObservation, BlueReport, BlueShard,", "position = item.get(\"position\") or {} gps_age = position.get(\"age\", 0) timestamp", "import boto3 import boto3.exceptions import botocore.exceptions import markus import redis.exceptions", "observations) if _map_content_enabled and positions: self.process_datamap(pipe, positions) self.emit_metrics(api_keys_known, metrics) def", "It is the single entrypoint from which all other data", "api_key = parts[2] obj_name = path.format( source=source, api_key=api_key, year=year, month=month,", "queue_id = queue_prefix + shard_id queued_obs[queue_id].append(obs.to_json()) for queue_id, values in", "self.task.apply_countdown() class ReportExporter(object): _retriable = (IOError,) _retries = 3 _retry_wait", "not None and lon is not None: grids.add(DataMap.scale(lat, lon)) shards", "\"mac\", \"update_blue_\"), (\"cell\", CellShard, \"cellid\", \"update_cell_\"), (\"wifi\", WifiShard, \"mac\", \"update_wifi_\"),", "else: metrics[api_key][\"report_drop\"] += 1 with self.task.redis_pipeline() as pipe: self.queue_observations(pipe, observations)", "observations[\"cell\"].values(), \"wifi\": observations[\"wifi\"].values(), } return (obs, malformed) def process_datamap(self, pipe,", "\"cell\", \"wifi\"): if obs.get(name): observations[name].extend(obs[name]) metrics[api_key][name + \"_upload\"] += len(obs[name])", "name.split(\"_\") if type_ == \"report\": suffix = \"report\" tags =", "ExportConfig.get(session, name) exporter_types = { \"dummy\": DummyExporter, \"geosubmit\": GeosubmitExporter, \"internal\":", "== \"report\": suffix = \"report\" tags = api_tag else: suffix", "metrics = {} items = [] for item in queue_items:", "_retriable = (IOError, redis.exceptions.RedisError, sqlalchemy.exc.InternalError) transform = InternalTransform() def send(self,", "= item_obs.unique_key # if we have better data for the", "BlueShard, CellObservation, CellReport, CellShard, DataMap, ExportConfig, Report, WifiObservation, WifiReport, WifiShard,", "queued_obs[queue_id].append(obs.to_json()) for queue_id, values in queued_obs.items(): # enqueue values for", "= set() metrics = {} items = [] for item", "0) metrics[api_key][\"report_upload\"] += 1 if any_data: positions.append((report[\"lat\"], report[\"lon\"])) else: metrics[api_key][\"report_drop\"]", "WifiShard, ) from ichnaea.models.content import encode_datamap_grid from ichnaea import util", "urllib.parse import urlparse import uuid import boto3 import boto3.exceptions import", "BlueReport, BlueShard, CellObservation, CellReport, CellShard, DataMap, ExportConfig, Report, WifiObservation, WifiReport,", "v1 schema used in our own database models. \"\"\" #", "= name.split(\"_\") if type_ == \"report\": suffix = \"report\" tags", "is None: report.update(value) else: report[key_map[1]] = value return value def", "a two-tuple position_id = (\"position\", None) position_map = [ (\"latitude\",", "blue/cell/wifi specific fields item_report = report_cls.create(**item) if item_report is None:", "queue_prefix + shard_id queued_obs[queue_id].append(obs.to_json()) for queue_id, values in queued_obs.items(): #", "re import time from urllib.parse import urlparse import uuid import", "= 3 _retry_wait = 1.0 def __init__(self, task, config, queue_key):", "None and lon is not None: grids.add(DataMap.scale(lat, lon)) shards =", "metrics[api_key][\"report_drop\"] += 1 with self.task.redis_pipeline() as pipe: self.queue_observations(pipe, observations) if", "queues, checks those queues and if they contain enough or", "in metrics.items(): api_tag = [] if api_key and api_key in", "if config.allowed(api_key, source): queue_key = config.queue_key(api_key, source) queue = config.queue(queue_key,", "the web application. It is the single entrypoint from which", "pipe: for (api_key, source), items in grouped.items(): for config in", "report[key_map[1]] = value return value def _parse_list(self, item, report, key_map,", "api_key] for name, count in key_metrics.items(): if not count: continue", "{ \"Content-Encoding\": \"gzip\", \"Content-Type\": \"application/json\", \"User-Agent\": \"ichnaea\", } response =", "defaultdict import json import re import time from urllib.parse import", "= item_source.get(source) if source_value is not None: value[target] = source_value", "if success and self.queue.ready(): self.task.apply_countdown(args=[self.config.name, self.queue_key]) def send(self, queue_items): raise", "\"Content-Encoding\": \"gzip\", \"Content-Type\": \"application/json\", \"User-Agent\": \"ichnaea\", } response = requests.post(", "source_value is not None: value[target] = source_value return value def", "response.raise_for_status() class S3Exporter(ReportExporter): _retriable = ( IOError, boto3.exceptions.Boto3Error, botocore.exceptions.BotoCoreError, )", "ready for processing. for queue_key in config.partitions(redis_client): queue = config.queue(queue_key,", "queue_key)() def __call__(self): queue_items = self.queue.dequeue() if not queue_items: return", "keys: columns = ApiKey.__table__.c rows = session.execute( select([columns.valid_key]).where(columns.valid_key.in_(keys)) ).fetchall() for", "\"signal\"), ] def _map_dict(self, item_source, field_map): value = {} for", ") response.raise_for_status() class S3Exporter(ReportExporter): _retriable = ( IOError, boto3.exceptions.Boto3Error, botocore.exceptions.BotoCoreError,", "METRICS.timer(\"data.export.upload.timing\", tags=self.stats_tags): self.send(queue_items) success = True except self._retriable: success =", "= queue_key self.queue = config.queue(queue_key, task.redis_client) self.stats_tags = [\"key:\" +", "view code and external transfers (backup, forward to partners) to", "Normalize age fields to be relative to GPS time for", "the names are equal, a simple string can be specified", "source_value = item_source.get(source) if source_value is not None: value[target] =", "from ichnaea import util WHITESPACE = re.compile(r\"\\s\", flags=re.UNICODE) METRICS =", "# if we have better data for the same key,", "item.get(\"source\", \"gnss\"))].append( {\"api_key\": item[\"api_key\"], \"report\": item[\"report\"]} ) with self.task.db_session(commit=False) as", "in (\"blue\", \"cell\", \"wifi\"): if obs.get(name): observations[name].extend(obs[name]) metrics[api_key][name + \"_upload\"]", "api_keys: metrics[api_key] = {} for type_ in (\"report\", \"blue\", \"cell\",", "session: # limit database session to get API keys keys", "[], \"wifi\": []} for item in items: api_key = item[\"api_key\"]", "CellObservation, CellReport, CellShard, DataMap, ExportConfig, Report, WifiObservation, WifiReport, WifiShard, )", "{}) malformed = {} observations = {} for name, report_cls,", "is the single entrypoint from which all other data pipelines", "NotImplementedError() class DummyExporter(ReportExporter): def send(self, queue_items): pass class GeosubmitExporter(ReportExporter): _retriable", "def __call__(self, export_task): redis_client = self.task.redis_client data_queue = self.task.app.data_queues[\"update_incoming\"] data", "combine general and specific report data into one item_obs =", "_map_content_enabled and positions: self.process_datamap(pipe, positions) self.emit_metrics(api_keys_known, metrics) def queue_observations(self, pipe,", "in the web application. It is the single entrypoint from", "not count: continue type_, action = name.split(\"_\") if type_ ==", "metrics[api_key][\"%s_%s\" % (type_, action)] = 0 with self.task.db_session(commit=False) as session:", ") except Exception: METRICS.incr( \"data.export.upload\", tags=self.stats_tags + [\"status:failure\"] ) raise", "\"data.export.upload\", tags=self.stats_tags + [\"status:%s\" % response.status_code], ) response.raise_for_status() class S3Exporter(ReportExporter):", "simple string can be specified instead # of a two-tuple", "compresslevel=7 ) s3 = boto3.resource(\"s3\") bucket = s3.Bucket(bucketname) obj =", "and if they contain enough or old enough data schedules", "equal, a simple string can be specified instead # of", "= item.get(\"position\") or {} gps_age = position.get(\"age\", 0) timestamp =", "items.append(item) api_keys.add(item[\"api_key\"]) for api_key in api_keys: metrics[api_key] = {} for", "task to be re-tried METRICS.incr( \"data.export.upload\", tags=self.stats_tags + [\"status:%s\" %", "malformed_obs.get(name, 0) metrics[api_key][\"report_upload\"] += 1 if any_data: positions.append((report[\"lat\"], report[\"lon\"])) else:", "= {} for type_ in (\"report\", \"blue\", \"cell\", \"wifi\"): for", "try: data = util.encode_gzip( json.dumps({\"items\": reports}).encode(), compresslevel=7 ) s3 =", "(\"latitude\", \"lat\"), (\"longitude\", \"lon\"), \"accuracy\", \"altitude\", (\"altitudeAccuracy\", \"altitude_accuracy\"), \"heading\", \"pressure\",", "self.cell_map) wifis = self._parse_list(item, report, self.wifi_id, self.wifi_map) position = item.get(\"position\")", ") with self.task.db_session(commit=False) as session: export_configs = ExportConfig.all(session) with self.task.redis_pipeline()", "key in api_keys if key] if keys: columns = ApiKey.__table__.c", "= self.task.app.data_queues[\"update_incoming\"] data = data_queue.dequeue() grouped = defaultdict(list) for item", "the task to be re-tried METRICS.incr( \"data.export.upload\", tags=self.stats_tags + [\"status:%s\"", "report, self.cell_id, self.cell_map) wifis = self._parse_list(item, report, self.wifi_id, self.wifi_map) position", "(\"wifiAccessPoints\", \"wifi\") wifi_map = [ (\"macAddress\", \"mac\"), \"age\", \"channel\", \"frequency\",", "= {} item_source = item.get(key_map[0]) if item_source: value = self._map_dict(item_source,", "get their data. It distributes the data into the configured", "report, self.blue_id, self.blue_map) cells = self._parse_list(item, report, self.cell_id, self.cell_map) wifis", "} response = requests.post( self.config.url, data=util.encode_gzip( json.dumps({\"items\": reports}).encode(), compresslevel=5 ),", "api_tag METRICS.incr(\"data.%s.%s\" % (suffix, action), count, tags=tags) def process_report(self, data):", "= self._parse_list(item, report, self.wifi_id, self.wifi_map) position = item.get(\"position\") or {}", "bucket = s3.Bucket(bucketname) obj = bucket.Object(obj_name) obj.put(Body=data, ContentEncoding=\"gzip\", ContentType=\"application/json\") METRICS.incr(", "to partners) to the internal submit v1 schema used in", "parts[2] obj_name = path.format( source=source, api_key=api_key, year=year, month=month, day=day )", "set() for lat, lon in positions: if lat is not", "the internal submit v1 schema used in our own database", "is not None and existing.better(item_obs): continue observations[name][item_key] = item_obs obs", "\"wifi\"): for action in (\"drop\", \"upload\"): metrics[api_key][\"%s_%s\" % (type_, action)]", "the blue/cell/wifi specific fields item_report = report_cls.create(**item) if item_report is", "class InternalExporter(ReportExporter): _retriable = (IOError, redis.exceptions.RedisError, sqlalchemy.exc.InternalError) transform = InternalTransform()", "for datatype, shard_model, shard_key, queue_prefix in ( (\"blue\", BlueShard, \"mac\",", "trigger exception for bad responses # this causes the task", "# enqueue values for each queue queue = self.task.app.data_queues[queue_id] queue.enqueue(values,", "item_report) item_key = item_obs.unique_key # if we have better data", "\"signal\"), (\"timingAdvance\", \"ta\"), ] wifi_id = (\"wifiAccessPoints\", \"wifi\") wifi_map =", "from sqlalchemy import select import sqlalchemy.exc from ichnaea.data import _map_content_enabled", "report data into one item_obs = obs_cls.combine(report, item_report) item_key =", "positions: if lat is not None and lon is not", "import time from urllib.parse import urlparse import uuid import boto3", "path.endswith(\"/\"): path += \"/\" year, month, day = util.utcnow().timetuple()[:3] #", "(\"wifi\", WifiShard, \"mac\", \"update_wifi_\"), ): queued_obs = defaultdict(list) for obs", "for item in queue_items] headers = { \"Content-Encoding\": \"gzip\", \"Content-Type\":", "success: METRICS.incr(\"data.export.batch\", tags=self.stats_tags) break if success and self.queue.ready(): self.task.apply_countdown(args=[self.config.name, self.queue_key])", "is not None and lon is not None: grids.add(DataMap.scale(lat, lon))", "name, count in key_metrics.items(): if not count: continue type_, action", "class GeosubmitExporter(ReportExporter): _retriable = (IOError, requests.exceptions.RequestException) def send(self, queue_items): #", "and self.queue.ready(): self.task.apply_countdown(args=[self.config.name, self.queue_key]) def send(self, queue_items): raise NotImplementedError() class", "# s3 key names start without a leading slash path", "\"Content-Type\": \"application/json\", \"User-Agent\": \"ichnaea\", } response = requests.post( self.config.url, data=util.encode_gzip(", "for api_key in api_keys: metrics[api_key] = {} for type_ in", "and external transfers (backup, forward to partners) to the internal", "path += \"/\" year, month, day = util.utcnow().timetuple()[:3] # strip", "in grouped.items(): for config in export_configs: if config.allowed(api_key, source): queue_key", "limit database session to get API keys keys = [key", "in item.get(key_map[0], ()): value = self._map_dict(value_item, field_map) if value: values.append(value)", "= config.queue(queue_key, task.redis_client) self.stats_tags = [\"key:\" + self.config.name] @staticmethod def", "requests import requests.exceptions from sqlalchemy import select import sqlalchemy.exc from", "item[\"report\"] = self.transform(item[\"report\"]) if item[\"report\"]: items.append(item) api_keys.add(item[\"api_key\"]) for api_key in", "(suffix, action), count, tags=tags) def process_report(self, data): report = Report.create(**data)", "api_keys_known, metrics): for api_key, key_metrics in metrics.items(): api_tag = []", "return value def _parse_list(self, item, report, key_map, field_map): values =", "(\"radioType\", \"radio\"), (\"mobileCountryCode\", \"mcc\"), (\"mobileNetworkCode\", \"mnc\"), (\"locationAreaCode\", \"lac\"), (\"cellId\", \"cid\"),", "True except self._retriable: success = False time.sleep(self._retry_wait * (i **", "field_map): values = [] for value_item in item.get(key_map[0], ()): value", "source section id to a target section id # *_map", "observations[\"wifi\"].values(), } return (obs, malformed) def process_datamap(self, pipe, positions): grids", "data=util.encode_gzip( json.dumps({\"items\": reports}).encode(), compresslevel=5 ), headers=headers, timeout=60.0, ) # log", "action in (\"drop\", \"upload\"): metrics[api_key][\"%s_%s\" % (type_, action)] = 0", "in field_map: if isinstance(spec, tuple): source, target = spec else:", "as session: export_configs = ExportConfig.all(session) with self.task.redis_pipeline() as pipe: for", "if exporter_type is not None: exporter_type(task, config, queue_key)() def __call__(self):", "\"mac\", \"update_wifi_\"), ): queued_obs = defaultdict(list) for obs in observations[datatype]:", "+ [\"status:success\"] ) except Exception: METRICS.incr( \"data.export.upload\", tags=self.stats_tags + [\"status:failure\"]", "self.process_datamap(pipe, positions) self.emit_metrics(api_keys_known, metrics) def queue_observations(self, pipe, observations): for datatype,", "report return {} class InternalExporter(ReportExporter): _retriable = (IOError, redis.exceptions.RedisError, sqlalchemy.exc.InternalError)", "% response.status_code], ) response.raise_for_status() class S3Exporter(ReportExporter): _retriable = ( IOError,", "general and specific report data into one item_obs = obs_cls.combine(report,", "redis_client = self.task.redis_client data_queue = self.task.app.data_queues[\"update_incoming\"] data = data_queue.dequeue() grouped", "source = spec target = spec source_value = item_source.get(source) if", "any_data = False for name in (\"blue\", \"cell\", \"wifi\"): if", "pipe=pipe) def emit_metrics(self, api_keys_known, metrics): for api_key, key_metrics in metrics.items():", "responses # this causes the task to be re-tried METRICS.incr(", "True metrics[api_key][name + \"_drop\"] += malformed_obs.get(name, 0) metrics[api_key][\"report_upload\"] += 1", "shard_key, queue_prefix in ( (\"blue\", BlueShard, \"mac\", \"update_blue_\"), (\"cell\", CellShard,", "observations[name].extend(obs[name]) metrics[api_key][name + \"_upload\"] += len(obs[name]) any_data = True metrics[api_key][name", "validate the blue/cell/wifi specific fields item_report = report_cls.create(**item) if item_report", "field_map) if value: values.append(value) if values: report[key_map[1]] = values return", "database session to get API keys keys = [key for", "grids: shards[DataMap.shard_id(lat, lon)].add(encode_datamap_grid(lat, lon)) for shard_id, values in shards.items(): queue", "each queue. \"\"\" def __init__(self, task): self.task = task def", "pass class GeosubmitExporter(ReportExporter): _retriable = (IOError, requests.exceptions.RequestException) def send(self, queue_items):", "self.task.redis_pipeline() as pipe: for (api_key, source), items in grouped.items(): for", "grids = set() for lat, lon in positions: if lat", ") def send(self, queue_items): # ignore metadata reports = [item[\"report\"]", "(\"drop\", \"upload\"): metrics[api_key][\"%s_%s\" % (type_, action)] = 0 with self.task.db_session(commit=False)", "datatype, shard_model, shard_key, queue_prefix in ( (\"blue\", BlueShard, \"mac\", \"update_blue_\"),", "queue_items): api_keys = set() api_keys_known = set() metrics = {}", "to GPS time for type_ in (\"blue\", \"cell\", \"wifi\"): for", "return values def __call__(self, item): report = {} self._parse_dict(item, report,", "tags=self.stats_tags + [\"status:success\"] ) except Exception: METRICS.incr( \"data.export.upload\", tags=self.stats_tags +", "position.get(\"age\", 0) timestamp = item.get(\"timestamp\") if timestamp: # turn timestamp", "= self._parse_list(item, report, self.cell_id, self.cell_map) wifis = self._parse_list(item, report, self.wifi_id,", "def _map_dict(self, item_source, field_map): value = {} for spec in", "*_id maps a source section id to a target section", "and api_key in api_keys_known: api_tag = [\"key:%s\" % api_key] for", "= task self.config = config self.queue_key = queue_key self.queue =", "data or # old enough data to be ready for", "\"speed\", \"source\", ] blue_id = (\"bluetoothBeacons\", \"blue\") blue_map = [(\"macAddress\",", "with self.task.redis_pipeline() as pipe: for (api_key, source), items in grouped.items():", "if _map_content_enabled and positions: self.process_datamap(pipe, positions) self.emit_metrics(api_keys_known, metrics) def queue_observations(self,", "item_source.get(source) if source_value is not None: value[target] = source_value return", "schedules an async export task to process the data in", "items and extract set of API keys item[\"report\"] = self.transform(item[\"report\"])", "[] for value_item in item.get(key_map[0], ()): value = self._map_dict(value_item, field_map)", "queue_items = self.queue.dequeue() if not queue_items: return success = False", "they now contain enough data or # old enough data", "# old enough data to be ready for processing. for", "string can be specified instead # of a two-tuple position_id", "source to target id # if the names are equal,", "self.task.app.data_queues[\"update_incoming\"] data = data_queue.dequeue() grouped = defaultdict(list) for item in", "gps_age if gps_age: # Normalize age fields to be relative", "if lat is not None and lon is not None:", "\"altitude\", (\"altitudeAccuracy\", \"altitude_accuracy\"), \"heading\", \"pressure\", \"speed\", \"source\", ] blue_id =", "\"mcc\"), (\"mobileNetworkCode\", \"mnc\"), (\"locationAreaCode\", \"lac\"), (\"cellId\", \"cid\"), \"age\", \"asu\", (\"primaryScramblingCode\",", "a target section id # *_map maps fields inside the", "# ignore metadata reports = [item[\"report\"] for item in queue_items]", "config.queue(queue_key, task.redis_client) self.stats_tags = [\"key:\" + self.config.name] @staticmethod def export(task,", "existing = observations[name].get(item_key) if existing is not None and existing.better(item_obs):", "] wifi_id = (\"wifiAccessPoints\", \"wifi\") wifi_map = [ (\"macAddress\", \"mac\"),", "= 0 with self.task.db_session(commit=False) as session: # limit database session", "this causes the task to be re-tried METRICS.incr( \"data.export.upload\", tags=self.stats_tags", "timeout=60.0, ) # log upload_status and trigger exception for bad", "= [key for key in api_keys if key] if keys:", "keys keys = [key for key in api_keys if key]", "parts = self.queue_key.split(\":\") source = parts[1] api_key = parts[2] obj_name", "(type_, action)] = 0 with self.task.db_session(commit=False) as session: # limit", "item.get(\"position\") or {} gps_age = position.get(\"age\", 0) timestamp = item.get(\"timestamp\")", "ichnaea.data import _map_content_enabled from ichnaea.models import ( ApiKey, BlueObservation, BlueReport,", "export task to process the data in each queue. \"\"\"", "for obs in observations[datatype]: # group by sharded queue shard_id", "import json import re import time from urllib.parse import urlparse", "contain enough or old enough data schedules an async export", "return {} class InternalExporter(ReportExporter): _retriable = (IOError, redis.exceptions.RedisError, sqlalchemy.exc.InternalError) transform", "for item in queue_items: # preprocess items and extract set", "field_map): value = {} for spec in field_map: if isinstance(spec,", "range(self._retries): try: with METRICS.timer(\"data.export.upload.timing\", tags=self.stats_tags): self.send(queue_items) success = True except", "for key in api_keys if key] if keys: columns =", "S3Exporter(ReportExporter): _retriable = ( IOError, boto3.exceptions.Boto3Error, botocore.exceptions.BotoCoreError, ) def send(self,", "self.task.app.data_queues[queue_id] queue.enqueue(values, pipe=pipe) def emit_metrics(self, api_keys_known, metrics): for api_key, key_metrics", "= parts[1] api_key = parts[2] obj_name = path.format( source=source, api_key=api_key,", "data = data_queue.dequeue() grouped = defaultdict(list) for item in data:", "\"lat\"), (\"longitude\", \"lon\"), \"accuracy\", \"altitude\", (\"altitudeAccuracy\", \"altitude_accuracy\"), \"heading\", \"pressure\", \"speed\",", "item_source = item.get(key_map[0]) if item_source: value = self._map_dict(item_source, field_map) if", "self.task.db_session(commit=False) as session: # limit database session to get API", "queue = config.queue(queue_key, redis_client) queue.enqueue(items, pipe=pipe) for config in export_configs:", "key_metrics.items(): if not count: continue type_, action = name.split(\"_\") if", "+ 1)) if success: METRICS.incr(\"data.export.batch\", tags=self.stats_tags) break if success and", "(IOError, redis.exceptions.RedisError, sqlalchemy.exc.InternalError) transform = InternalTransform() def send(self, queue_items): api_keys", "\"report\" tags = api_tag else: suffix = \"observation\" tags =", "= requests.post( self.config.url, data=util.encode_gzip( json.dumps({\"items\": reports}).encode(), compresslevel=5 ), headers=headers, timeout=60.0,", "class S3Exporter(ReportExporter): _retriable = ( IOError, boto3.exceptions.Boto3Error, botocore.exceptions.BotoCoreError, ) def", "target section id # *_map maps fields inside the section", "export_configs: if config.allowed(api_key, source): queue_key = config.queue_key(api_key, source) queue =", "(\"primaryScramblingCode\", \"psc\"), \"serving\", (\"signalStrength\", \"signal\"), (\"timingAdvance\", \"ta\"), ] wifi_id =", "= item.get(\"timestamp\") if timestamp: # turn timestamp into GPS timestamp", "in (\"blue\", \"cell\", \"wifi\"): for record in report.get(type_, ()): record[\"age\"]", "\"cid\"), \"age\", \"asu\", (\"primaryScramblingCode\", \"psc\"), \"serving\", (\"signalStrength\", \"signal\"), (\"timingAdvance\", \"ta\"),", "wifi_id = (\"wifiAccessPoints\", \"wifi\") wifi_map = [ (\"macAddress\", \"mac\"), \"age\",", "enough data or # old enough data to be ready", "+= 1 continue # combine general and specific report data", "self._retriable: success = False time.sleep(self._retry_wait * (i ** 2 +", "checks those queues and if they contain enough or old", "if value: if key_map[1] is None: report.update(value) else: report[key_map[1]] =", "import botocore.exceptions import markus import redis.exceptions import requests import requests.exceptions", "= (IOError, redis.exceptions.RedisError, sqlalchemy.exc.InternalError) transform = InternalTransform() def send(self, queue_items):", "export_task): redis_client = self.task.redis_client data_queue = self.task.app.data_queues[\"update_incoming\"] data = data_queue.dequeue()", "key names start without a leading slash path = path.lstrip(\"/\")", "self.task.apply_countdown(args=[self.config.name, self.queue_key]) def send(self, queue_items): raise NotImplementedError() class DummyExporter(ReportExporter): def", "\"age\", (\"signalStrength\", \"signal\")] cell_id = (\"cellTowers\", \"cell\") cell_map = [", "queue_id, values in queued_obs.items(): # enqueue values for each queue", "values: report[key_map[1]] = values return values def __call__(self, item): report", "and trigger exception for bad responses # this causes the", "a simple string can be specified instead # of a", "METRICS.incr( \"data.export.upload\", tags=self.stats_tags + [\"status:success\"] ) except Exception: METRICS.incr( \"data.export.upload\",", "\"ichnaea\", } response = requests.post( self.config.url, data=util.encode_gzip( json.dumps({\"items\": reports}).encode(), compresslevel=5", "\"upload\"): metrics[api_key][\"%s_%s\" % (type_, action)] = 0 with self.task.db_session(commit=False) as", "for lat, lon in grids: shards[DataMap.shard_id(lat, lon)].add(encode_datamap_grid(lat, lon)) for shard_id,", "\"radio\"), (\"signalToNoiseRatio\", \"snr\"), (\"signalStrength\", \"signal\"), ] def _map_dict(self, item_source, field_map):", "now contain enough data or # old enough data to", "items = [] for item in queue_items: # preprocess items", "ichnaea.models import ( ApiKey, BlueObservation, BlueReport, BlueShard, CellObservation, CellReport, CellShard,", "values for each queue queue = self.task.app.data_queues[queue_id] queue.enqueue(values, pipe=pipe) def", "self.queue = config.queue(queue_key, task.redis_client) self.stats_tags = [\"key:\" + self.config.name] @staticmethod", "data for the same key, ignore existing = observations[name].get(item_key) if", "i in range(self._retries): try: with METRICS.timer(\"data.export.upload.timing\", tags=self.stats_tags): self.send(queue_items) success =", "\"internal\": InternalExporter, \"s3\": S3Exporter, } exporter_type = exporter_types.get(config.schema) if exporter_type", "{} for spec in field_map: if isinstance(spec, tuple): source, target", "= [ (\"latitude\", \"lat\"), (\"longitude\", \"lon\"), \"accuracy\", \"altitude\", (\"altitudeAccuracy\", \"altitude_accuracy\"),", "values return values def __call__(self, item): report = {} self._parse_dict(item,", "self.task = task self.config = config self.queue_key = queue_key self.queue", "ApiKey.__table__.c rows = session.execute( select([columns.valid_key]).where(columns.valid_key.in_(keys)) ).fetchall() for row in rows:", "into one item_obs = obs_cls.combine(report, item_report) item_key = item_obs.unique_key #", "+= uuid.uuid1().hex + \".json.gz\" try: data = util.encode_gzip( json.dumps({\"items\": reports}).encode(),", "values def __call__(self, item): report = {} self._parse_dict(item, report, self.position_id,", "DummyExporter(ReportExporter): def send(self, queue_items): pass class GeosubmitExporter(ReportExporter): _retriable = (IOError,", "report[key_map[1]] = values return values def __call__(self, item): report =", "metrics[api_key][name + \"_upload\"] += len(obs[name]) any_data = True metrics[api_key][name +", "def __call__(self): queue_items = self.queue.dequeue() if not queue_items: return success", "ExportConfig.all(session) with self.task.redis_pipeline() as pipe: for (api_key, source), items in", "reports = [item[\"report\"] for item in queue_items] _, bucketname, path", "external transfers (backup, forward to partners) to the internal submit", "item.get(key_map[0]) if item_source: value = self._map_dict(item_source, field_map) if value: if", "re.compile(r\"\\s\", flags=re.UNICODE) METRICS = markus.get_metrics() class IncomingQueue(object): \"\"\" The incoming", "gps_age if blues or cells or wifis: return report return", "in each queue. \"\"\" def __init__(self, task): self.task = task", "= position.get(\"age\", 0) timestamp = item.get(\"timestamp\") if timestamp: # turn", "in api_keys if key] if keys: columns = ApiKey.__table__.c rows", "value = {} item_source = item.get(key_map[0]) if item_source: value =", "if existing is not None and existing.better(item_obs): continue observations[name][item_key] =", "\"dummy\": DummyExporter, \"geosubmit\": GeosubmitExporter, \"internal\": InternalExporter, \"s3\": S3Exporter, } exporter_type", "\"cell\", \"wifi\"): for action in (\"drop\", \"upload\"): metrics[api_key][\"%s_%s\" % (type_,", "\"update_wifi_\"), ): queued_obs = defaultdict(list) for obs in observations[datatype]: #", "causes the task to be re-tried METRICS.incr( \"data.export.upload\", tags=self.stats_tags +", "age fields to be relative to GPS time for type_", "+ shard_id queued_obs[queue_id].append(obs.to_json()) for queue_id, values in queued_obs.items(): # enqueue", "type_ in (\"report\", \"blue\", \"cell\", \"wifi\"): for action in (\"drop\",", "\"_upload\"] += len(obs[name]) any_data = True metrics[api_key][name + \"_drop\"] +=", "in range(self._retries): try: with METRICS.timer(\"data.export.upload.timing\", tags=self.stats_tags): self.send(queue_items) success = True", "in config.partitions(redis_client): queue = config.queue(queue_key, redis_client) if queue.ready(): export_task.delay(config.name, queue_key)", "defaultdict(set) for lat, lon in grids: shards[DataMap.shard_id(lat, lon)].add(encode_datamap_grid(lat, lon)) for", "for i in range(self._retries): try: with METRICS.timer(\"data.export.upload.timing\", tags=self.stats_tags): self.send(queue_items) success", "\"serving\", (\"signalStrength\", \"signal\"), (\"timingAdvance\", \"ta\"), ] wifi_id = (\"wifiAccessPoints\", \"wifi\")", "} return (obs, malformed) def process_datamap(self, pipe, positions): grids =", "api_key and api_key in api_keys_known: api_tag = [\"key:%s\" % api_key]", "leading slash path = path.lstrip(\"/\") if not path.endswith(\"/\"): path +=", "in (\"drop\", \"upload\"): metrics[api_key][\"%s_%s\" % (type_, action)] = 0 with", "key, ignore existing = observations[name].get(item_key) if existing is not None", "except Exception: METRICS.incr( \"data.export.upload\", tags=self.stats_tags + [\"status:failure\"] ) raise class", "in queued_obs.items(): # enqueue values for each queue queue =", "keys item[\"report\"] = self.transform(item[\"report\"]) if item[\"report\"]: items.append(item) api_keys.add(item[\"api_key\"]) for api_key", "None: exporter_type(task, config, queue_key)() def __call__(self): queue_items = self.queue.dequeue() if", "can be specified instead # of a two-tuple position_id =", "if not path.endswith(\"/\"): path += \"/\" year, month, day =", "queue_items): raise NotImplementedError() class DummyExporter(ReportExporter): def send(self, queue_items): pass class", "internal submit v1 schema used in our own database models.", "task self.config = config self.queue_key = queue_key self.queue = config.queue(queue_key,", "= re.compile(r\"\\s\", flags=re.UNICODE) METRICS = markus.get_metrics() class IncomingQueue(object): \"\"\" The", "data: grouped[(item[\"api_key\"], item.get(\"source\", \"gnss\"))].append( {\"api_key\": item[\"api_key\"], \"report\": item[\"report\"]} ) with", "import util WHITESPACE = re.compile(r\"\\s\", flags=re.UNICODE) METRICS = markus.get_metrics() class", "= [(\"macAddress\", \"mac\"), \"age\", (\"signalStrength\", \"signal\")] cell_id = (\"cellTowers\", \"cell\")", "defaultdict(list) for item in data: grouped[(item[\"api_key\"], item.get(\"source\", \"gnss\"))].append( {\"api_key\": item[\"api_key\"],", "= report_cls.create(**item) if item_report is None: malformed[name] += 1 continue", "_retries = 3 _retry_wait = 1.0 def __init__(self, task, config,", "(\"signalStrength\", \"signal\")] cell_id = (\"cellTowers\", \"cell\") cell_map = [ (\"radioType\",", "in (\"report\", \"blue\", \"cell\", \"wifi\"): for action in (\"drop\", \"upload\"):", "are equal, a simple string can be specified instead #", "3 _retry_wait = 1.0 def __init__(self, task, config, queue_key): self.task", "api_keys_known: api_tag = [\"key:%s\" % api_key] for name, count in", "= self._map_dict(item_source, field_map) if value: if key_map[1] is None: report.update(value)", "+ \"_drop\"] += malformed_obs.get(name, 0) metrics[api_key][\"report_upload\"] += 1 if any_data:", "response = requests.post( self.config.url, data=util.encode_gzip( json.dumps({\"items\": reports}).encode(), compresslevel=5 ), headers=headers,", "schema used in our own database models. \"\"\" # *_id", "success = False time.sleep(self._retry_wait * (i ** 2 + 1))", "The incoming queue contains the data collected in the web", "= ExportConfig.get(session, name) exporter_types = { \"dummy\": DummyExporter, \"geosubmit\": GeosubmitExporter,", "obs_cls in ( (\"blue\", BlueReport, BlueObservation), (\"cell\", CellReport, CellObservation), (\"wifi\",", "boto3.resource(\"s3\") bucket = s3.Bucket(bucketname) obj = bucket.Object(obj_name) obj.put(Body=data, ContentEncoding=\"gzip\", ContentType=\"application/json\")", "= markus.get_metrics() class IncomingQueue(object): \"\"\" The incoming queue contains the", "item_obs = obs_cls.combine(report, item_report) item_key = item_obs.unique_key # if we", "if item_source: value = self._map_dict(item_source, field_map) if value: if key_map[1]", "= (\"position\", None) position_map = [ (\"latitude\", \"lat\"), (\"longitude\", \"lon\"),", "is not None: value[target] = source_value return value def _parse_dict(self,", "GPS timestamp report[\"timestamp\"] = timestamp - gps_age if gps_age: #", "used in view code and external transfers (backup, forward to", "{} self._parse_dict(item, report, self.position_id, self.position_map) blues = self._parse_list(item, report, self.blue_id,", "config in export_configs: # Check all queues if they now", "= self.queue.dequeue() if not queue_items: return success = False for", "WifiReport, WifiShard, ) from ichnaea.models.content import encode_datamap_grid from ichnaea import", "[\"status:failure\"] ) raise class InternalTransform(object): \"\"\" This maps the geosubmit", "+ [\"status:failure\"] ) raise class InternalTransform(object): \"\"\" This maps the", "** 2 + 1)) if success: METRICS.incr(\"data.export.batch\", tags=self.stats_tags) break if", "redis_client) queue.enqueue(items, pipe=pipe) for config in export_configs: # Check all", "1 with self.task.redis_pipeline() as pipe: self.queue_observations(pipe, observations) if _map_content_enabled and", "not None: exporter_type(task, config, queue_key)() def __call__(self): queue_items = self.queue.dequeue()", "else: source = spec target = spec source_value = item_source.get(source)", "report.update(value) else: report[key_map[1]] = value return value def _parse_list(self, item,", "position_id = (\"position\", None) position_map = [ (\"latitude\", \"lat\"), (\"longitude\",", "(\"signalToNoiseRatio\", \"snr\"), (\"signalStrength\", \"signal\"), ] def _map_dict(self, item_source, field_map): value", "(\"mobileNetworkCode\", \"mnc\"), (\"locationAreaCode\", \"lac\"), (\"cellId\", \"cid\"), \"age\", \"asu\", (\"primaryScramblingCode\", \"psc\"),", "sharded queue shard_id = shard_model.shard_id(getattr(obs, shard_key)) queue_id = queue_prefix +", "= [] if api_key and api_key in api_keys_known: api_tag =", "config, queue_key)() def __call__(self): queue_items = self.queue.dequeue() if not queue_items:", "report_cls, obs_cls in ( (\"blue\", BlueReport, BlueObservation), (\"cell\", CellReport, CellObservation),", "self.send(queue_items) success = True except self._retriable: success = False time.sleep(self._retry_wait", "# this causes the task to be re-tried METRICS.incr( \"data.export.upload\",", "if item[\"report\"]: items.append(item) api_keys.add(item[\"api_key\"]) for api_key in api_keys: metrics[api_key] =", "[]} for item in items: api_key = item[\"api_key\"] report =", "import defaultdict import json import re import time from urllib.parse", "the data in each queue. \"\"\" def __init__(self, task): self.task", "break if success and self.queue.ready(): self.task.apply_countdown(args=[self.config.name, self.queue_key]) def send(self, queue_items):", "task.db_session(commit=False) as session: config = ExportConfig.get(session, name) exporter_types = {", "if value: values.append(value) if values: report[key_map[1]] = values return values", "s3 = boto3.resource(\"s3\") bucket = s3.Bucket(bucketname) obj = bucket.Object(obj_name) obj.put(Body=data,", "if isinstance(spec, tuple): source, target = spec else: source =", "set of API keys item[\"report\"] = self.transform(item[\"report\"]) if item[\"report\"]: items.append(item)", "source_value return value def _parse_dict(self, item, report, key_map, field_map): value", "self.queue_key]) def send(self, queue_items): raise NotImplementedError() class DummyExporter(ReportExporter): def send(self,", "spec in field_map: if isinstance(spec, tuple): source, target = spec", "= bucket.Object(obj_name) obj.put(Body=data, ContentEncoding=\"gzip\", ContentType=\"application/json\") METRICS.incr( \"data.export.upload\", tags=self.stats_tags + [\"status:success\"]", "day=day ) obj_name += uuid.uuid1().hex + \".json.gz\" try: data =", "_retriable = (IOError,) _retries = 3 _retry_wait = 1.0 def", "pipe: self.queue_observations(pipe, observations) if _map_content_enabled and positions: self.process_datamap(pipe, positions) self.emit_metrics(api_keys_known,", "for lat, lon in positions: if lat is not None", "name, queue_key): with task.db_session(commit=False) as session: config = ExportConfig.get(session, name)", "\"s3\": S3Exporter, } exporter_type = exporter_types.get(config.schema) if exporter_type is not", "collections import defaultdict import json import re import time from", "json import re import time from urllib.parse import urlparse import", "pipe=pipe) for config in export_configs: # Check all queues if", "value: if key_map[1] is None: report.update(value) else: report[key_map[1]] = value", "type_ in (\"blue\", \"cell\", \"wifi\"): for record in report.get(type_, ()):", "for record in report.get(type_, ()): record[\"age\"] = record.get(\"age\", 0) -", "if any_data: positions.append((report[\"lat\"], report[\"lon\"])) else: metrics[api_key][\"report_drop\"] += 1 with self.task.redis_pipeline()", "# Normalize age fields to be relative to GPS time", "= self.transform(item[\"report\"]) if item[\"report\"]: items.append(item) api_keys.add(item[\"api_key\"]) for api_key in api_keys:", "(\"blue\", \"cell\", \"wifi\"): if obs.get(name): observations[name].extend(obs[name]) metrics[api_key][name + \"_upload\"] +=", "key_metrics in metrics.items(): api_tag = [] if api_key and api_key", "= [\"key:%s\" % api_key] for name, count in key_metrics.items(): if", "source = parts[1] api_key = parts[2] obj_name = path.format( source=source,", "api_keys_known = set() metrics = {} items = [] for", "= { \"Content-Encoding\": \"gzip\", \"Content-Type\": \"application/json\", \"User-Agent\": \"ichnaea\", } response", "continue observations[name][item_key] = item_obs obs = { \"blue\": observations[\"blue\"].values(), \"cell\":", "year, month, day = util.utcnow().timetuple()[:3] # strip away queue prefix", "if not queue_items: return success = False for i in", "[ (\"latitude\", \"lat\"), (\"longitude\", \"lon\"), \"accuracy\", \"altitude\", (\"altitudeAccuracy\", \"altitude_accuracy\"), \"heading\",", "__init__(self, task, config, queue_key): self.task = task self.config = config", "It distributes the data into the configured export queues, checks", "bucketname, path = urlparse(self.config.url)[:3] # s3 key names start without", "with self.task.db_session(commit=False) as session: # limit database session to get", "[\"type:%s\" % type_] + api_tag METRICS.incr(\"data.%s.%s\" % (suffix, action), count,", "= item[\"report\"] obs, malformed_obs = self.process_report(report) any_data = False for", "in data[name]: # validate the blue/cell/wifi specific fields item_report =", "import re import time from urllib.parse import urlparse import uuid", "\"User-Agent\": \"ichnaea\", } response = requests.post( self.config.url, data=util.encode_gzip( json.dumps({\"items\": reports}).encode(),", "\"\"\" The incoming queue contains the data collected in the", "ignore metadata reports = [item[\"report\"] for item in queue_items] headers", "0 observations[name] = {} if data.get(name): for item in data[name]:", "in positions: if lat is not None and lon is", "metadata reports = [item[\"report\"] for item in queue_items] headers =", "shard_id = shard_model.shard_id(getattr(obs, shard_key)) queue_id = queue_prefix + shard_id queued_obs[queue_id].append(obs.to_json())", "(obs, malformed) def process_datamap(self, pipe, positions): grids = set() for", "config.allowed(api_key, source): queue_key = config.queue_key(api_key, source) queue = config.queue(queue_key, redis_client)", "for name, count in key_metrics.items(): if not count: continue type_,", "ApiKey, BlueObservation, BlueReport, BlueShard, CellObservation, CellReport, CellShard, DataMap, ExportConfig, Report,", "(\"altitudeAccuracy\", \"altitude_accuracy\"), \"heading\", \"pressure\", \"speed\", \"source\", ] blue_id = (\"bluetoothBeacons\",", "spec source_value = item_source.get(source) if source_value is not None: value[target]", "in rows: api_keys_known.add(row.valid_key) positions = [] observations = {\"blue\": [],", "[\"status:success\"] ) except Exception: METRICS.incr( \"data.export.upload\", tags=self.stats_tags + [\"status:failure\"] )", "item_source, field_map): value = {} for spec in field_map: if", "continue # combine general and specific report data into one", "if they now contain enough data or # old enough", "BlueObservation, BlueReport, BlueShard, CellObservation, CellReport, CellShard, DataMap, ExportConfig, Report, WifiObservation,", "enough data to be ready for processing. for queue_key in", "is None: malformed[name] += 1 continue # combine general and", "\"asu\", (\"primaryScramblingCode\", \"psc\"), \"serving\", (\"signalStrength\", \"signal\"), (\"timingAdvance\", \"ta\"), ] wifi_id", "obj_name = path.format( source=source, api_key=api_key, year=year, month=month, day=day ) obj_name", "queue_items: return success = False for i in range(self._retries): try:", "_parse_dict(self, item, report, key_map, field_map): value = {} item_source =", "[\"key:%s\" % api_key] for name, count in key_metrics.items(): if not", "= value return value def _parse_list(self, item, report, key_map, field_map):", "wifis: return report return {} class InternalExporter(ReportExporter): _retriable = (IOError,", "class DummyExporter(ReportExporter): def send(self, queue_items): pass class GeosubmitExporter(ReportExporter): _retriable =", "for config in export_configs: # Check all queues if they", "headers=headers, timeout=60.0, ) # log upload_status and trigger exception for", "send(self, queue_items): raise NotImplementedError() class DummyExporter(ReportExporter): def send(self, queue_items): pass", "def __init__(self, task): self.task = task def __call__(self, export_task): redis_client", "from ichnaea.models import ( ApiKey, BlueObservation, BlueReport, BlueShard, CellObservation, CellReport,", "the single entrypoint from which all other data pipelines get", "raise NotImplementedError() class DummyExporter(ReportExporter): def send(self, queue_items): pass class GeosubmitExporter(ReportExporter):", "# of a two-tuple position_id = (\"position\", None) position_map =", "not None: value[target] = source_value return value def _parse_dict(self, item,", "if we have better data for the same key, ignore", "util.utcnow().timetuple()[:3] # strip away queue prefix again parts = self.queue_key.split(\":\")", "ichnaea import util WHITESPACE = re.compile(r\"\\s\", flags=re.UNICODE) METRICS = markus.get_metrics()", "import markus import redis.exceptions import requests import requests.exceptions from sqlalchemy", "boto3.exceptions.Boto3Error, botocore.exceptions.BotoCoreError, ) def send(self, queue_items): # ignore metadata reports", "% (type_, action)] = 0 with self.task.db_session(commit=False) as session: #", "time.sleep(self._retry_wait * (i ** 2 + 1)) if success: METRICS.incr(\"data.export.batch\",", "metadata reports = [item[\"report\"] for item in queue_items] _, bucketname,", "malformed = {} observations = {} for name, report_cls, obs_cls", "id # *_map maps fields inside the section from source", "\"pressure\", \"speed\", \"source\", ] blue_id = (\"bluetoothBeacons\", \"blue\") blue_map =", "api_tag = [\"key:%s\" % api_key] for name, count in key_metrics.items():", "def send(self, queue_items): api_keys = set() api_keys_known = set() metrics", "the section from source to target id # if the", "__call__(self, export_task): redis_client = self.task.redis_client data_queue = self.task.app.data_queues[\"update_incoming\"] data =", "target id # if the names are equal, a simple", "(i ** 2 + 1)) if success: METRICS.incr(\"data.export.batch\", tags=self.stats_tags) break", "\"source\", ] blue_id = (\"bluetoothBeacons\", \"blue\") blue_map = [(\"macAddress\", \"mac\"),", "(IOError, requests.exceptions.RequestException) def send(self, queue_items): # ignore metadata reports =", "WifiObservation), ): malformed[name] = 0 observations[name] = {} if data.get(name):", "{\"api_key\": item[\"api_key\"], \"report\": item[\"report\"]} ) with self.task.db_session(commit=False) as session: export_configs", "= timestamp - gps_age if gps_age: # Normalize age fields", "api_key = item[\"api_key\"] report = item[\"report\"] obs, malformed_obs = self.process_report(report)", "{} observations = {} for name, report_cls, obs_cls in (", "metrics[api_key][name + \"_drop\"] += malformed_obs.get(name, 0) metrics[api_key][\"report_upload\"] += 1 if", "encode_datamap_grid from ichnaea import util WHITESPACE = re.compile(r\"\\s\", flags=re.UNICODE) METRICS", "session: config = ExportConfig.get(session, name) exporter_types = { \"dummy\": DummyExporter,", "queue_items: # preprocess items and extract set of API keys", "compresslevel=5 ), headers=headers, timeout=60.0, ) # log upload_status and trigger", "This maps the geosubmit v2 schema used in view code", "\"cell\") cell_map = [ (\"radioType\", \"radio\"), (\"mobileCountryCode\", \"mcc\"), (\"mobileNetworkCode\", \"mnc\"),", "observations[name] = {} if data.get(name): for item in data[name]: #", "\"ta\"), ] wifi_id = (\"wifiAccessPoints\", \"wifi\") wifi_map = [ (\"macAddress\",", "(api_key, source), items in grouped.items(): for config in export_configs: if", "# combine general and specific report data into one item_obs", "item_obs obs = { \"blue\": observations[\"blue\"].values(), \"cell\": observations[\"cell\"].values(), \"wifi\": observations[\"wifi\"].values(),", "send(self, queue_items): pass class GeosubmitExporter(ReportExporter): _retriable = (IOError, requests.exceptions.RequestException) def", "without a leading slash path = path.lstrip(\"/\") if not path.endswith(\"/\"):", "# group by sharded queue shard_id = shard_model.shard_id(getattr(obs, shard_key)) queue_id", "): queued_obs = defaultdict(list) for obs in observations[datatype]: # group", "target = spec else: source = spec target = spec", "old enough data schedules an async export task to process", "success = False for i in range(self._retries): try: with METRICS.timer(\"data.export.upload.timing\",", "None and existing.better(item_obs): continue observations[name][item_key] = item_obs obs = {", "API keys item[\"report\"] = self.transform(item[\"report\"]) if item[\"report\"]: items.append(item) api_keys.add(item[\"api_key\"]) for", "value return value def _parse_list(self, item, report, key_map, field_map): values", "1)) if success: METRICS.incr(\"data.export.batch\", tags=self.stats_tags) break if success and self.queue.ready():", "def process_datamap(self, pipe, positions): grids = set() for lat, lon", ") obj_name += uuid.uuid1().hex + \".json.gz\" try: data = util.encode_gzip(", "api_keys.add(item[\"api_key\"]) for api_key in api_keys: metrics[api_key] = {} for type_", "(\"locationAreaCode\", \"lac\"), (\"cellId\", \"cid\"), \"age\", \"asu\", (\"primaryScramblingCode\", \"psc\"), \"serving\", (\"signalStrength\",", "class IncomingQueue(object): \"\"\" The incoming queue contains the data collected", "self._map_dict(item_source, field_map) if value: if key_map[1] is None: report.update(value) else:", "[ (\"radioType\", \"radio\"), (\"mobileCountryCode\", \"mcc\"), (\"mobileNetworkCode\", \"mnc\"), (\"locationAreaCode\", \"lac\"), (\"cellId\",", "field_map: if isinstance(spec, tuple): source, target = spec else: source", "item, report, key_map, field_map): values = [] for value_item in", "{ \"dummy\": DummyExporter, \"geosubmit\": GeosubmitExporter, \"internal\": InternalExporter, \"s3\": S3Exporter, }", "__call__(self): queue_items = self.queue.dequeue() if not queue_items: return success =", "export(task, name, queue_key): with task.db_session(commit=False) as session: config = ExportConfig.get(session,", "database models. \"\"\" # *_id maps a source section id", "boto3.exceptions import botocore.exceptions import markus import redis.exceptions import requests import", "S3Exporter, } exporter_type = exporter_types.get(config.schema) if exporter_type is not None:", "item in queue_items] _, bucketname, path = urlparse(self.config.url)[:3] # s3", "item_report is None: malformed[name] += 1 continue # combine general", "queue_items): # ignore metadata reports = [item[\"report\"] for item in", "api_keys_known.add(row.valid_key) positions = [] observations = {\"blue\": [], \"cell\": [],", "observations[datatype]: # group by sharded queue shard_id = shard_model.shard_id(getattr(obs, shard_key))", "data into the configured export queues, checks those queues and", "preprocess items and extract set of API keys item[\"report\"] =", "0 with self.task.db_session(commit=False) as session: # limit database session to", "{ \"blue\": observations[\"blue\"].values(), \"cell\": observations[\"cell\"].values(), \"wifi\": observations[\"wifi\"].values(), } return (obs,", "values.append(value) if values: report[key_map[1]] = values return values def __call__(self,", "InternalExporter, \"s3\": S3Exporter, } exporter_type = exporter_types.get(config.schema) if exporter_type is", "[item[\"report\"] for item in queue_items] _, bucketname, path = urlparse(self.config.url)[:3]", "item in data[name]: # validate the blue/cell/wifi specific fields item_report", "queued_obs = defaultdict(list) for obs in observations[datatype]: # group by", "to target id # if the names are equal, a", "any_data: positions.append((report[\"lat\"], report[\"lon\"])) else: metrics[api_key][\"report_drop\"] += 1 with self.task.redis_pipeline() as", "sqlalchemy import select import sqlalchemy.exc from ichnaea.data import _map_content_enabled from", "(\"blue\", BlueShard, \"mac\", \"update_blue_\"), (\"cell\", CellShard, \"cellid\", \"update_cell_\"), (\"wifi\", WifiShard,", ") raise class InternalTransform(object): \"\"\" This maps the geosubmit v2", "shard_key)) queue_id = queue_prefix + shard_id queued_obs[queue_id].append(obs.to_json()) for queue_id, values", "% (suffix, action), count, tags=tags) def process_report(self, data): report =", "with self.task.db_session(commit=False) as session: export_configs = ExportConfig.all(session) with self.task.redis_pipeline() as", "columns = ApiKey.__table__.c rows = session.execute( select([columns.valid_key]).where(columns.valid_key.in_(keys)) ).fetchall() for row", "botocore.exceptions.BotoCoreError, ) def send(self, queue_items): # ignore metadata reports =", "an async export task to process the data in each", "BlueObservation), (\"cell\", CellReport, CellObservation), (\"wifi\", WifiReport, WifiObservation), ): malformed[name] =", "+ \".json.gz\" try: data = util.encode_gzip( json.dumps({\"items\": reports}).encode(), compresslevel=7 )", "return success = False for i in range(self._retries): try: with", "= spec else: source = spec target = spec source_value", "in observations[datatype]: # group by sharded queue shard_id = shard_model.shard_id(getattr(obs,", "(\"bluetoothBeacons\", \"blue\") blue_map = [(\"macAddress\", \"mac\"), \"age\", (\"signalStrength\", \"signal\")] cell_id", "item in items: api_key = item[\"api_key\"] report = item[\"report\"] obs,", "= spec target = spec source_value = item_source.get(source) if source_value", "values in queued_obs.items(): # enqueue values for each queue queue", "] def _map_dict(self, item_source, field_map): value = {} for spec", "queue_items] _, bucketname, path = urlparse(self.config.url)[:3] # s3 key names", "exporter_type(task, config, queue_key)() def __call__(self): queue_items = self.queue.dequeue() if not", "lat, lon in grids: shards[DataMap.shard_id(lat, lon)].add(encode_datamap_grid(lat, lon)) for shard_id, values", "those queues and if they contain enough or old enough", "None: grids.add(DataMap.scale(lat, lon)) shards = defaultdict(set) for lat, lon in", "= 1.0 def __init__(self, task, config, queue_key): self.task = task", "\".json.gz\" try: data = util.encode_gzip( json.dumps({\"items\": reports}).encode(), compresslevel=7 ) s3", "export_configs: # Check all queues if they now contain enough", "in key_metrics.items(): if not count: continue type_, action = name.split(\"_\")", "{\"blue\": [], \"cell\": [], \"wifi\": []} for item in items:", "item.get(\"timestamp\") if timestamp: # turn timestamp into GPS timestamp report[\"timestamp\"]", "{} for name, report_cls, obs_cls in ( (\"blue\", BlueReport, BlueObservation),", "emit_metrics(self, api_keys_known, metrics): for api_key, key_metrics in metrics.items(): api_tag =", "names start without a leading slash path = path.lstrip(\"/\") if", "observations = {\"blue\": [], \"cell\": [], \"wifi\": []} for item", "# preprocess items and extract set of API keys item[\"report\"]", "of API keys item[\"report\"] = self.transform(item[\"report\"]) if item[\"report\"]: items.append(item) api_keys.add(item[\"api_key\"])", "api_key, key_metrics in metrics.items(): api_tag = [] if api_key and", "METRICS = markus.get_metrics() class IncomingQueue(object): \"\"\" The incoming queue contains", "(\"cell\", CellReport, CellObservation), (\"wifi\", WifiReport, WifiObservation), ): malformed[name] = 0", "config, queue_key): self.task = task self.config = config self.queue_key =", "\"wifi\"): for record in report.get(type_, ()): record[\"age\"] = record.get(\"age\", 0)", "uuid import boto3 import boto3.exceptions import botocore.exceptions import markus import", "report is None: return ({}, {}) malformed = {} observations", "tags=self.stats_tags + [\"status:failure\"] ) raise class InternalTransform(object): \"\"\" This maps", "used in our own database models. \"\"\" # *_id maps", "value = {} for spec in field_map: if isinstance(spec, tuple):", "wifi_map = [ (\"macAddress\", \"mac\"), \"age\", \"channel\", \"frequency\", (\"radioType\", \"radio\"),", "metrics) def queue_observations(self, pipe, observations): for datatype, shard_model, shard_key, queue_prefix", "incoming queue contains the data collected in the web application.", "( IOError, boto3.exceptions.Boto3Error, botocore.exceptions.BotoCoreError, ) def send(self, queue_items): # ignore", "specified instead # of a two-tuple position_id = (\"position\", None)", "[\"status:%s\" % response.status_code], ) response.raise_for_status() class S3Exporter(ReportExporter): _retriable = (", "), headers=headers, timeout=60.0, ) # log upload_status and trigger exception", "obj.put(Body=data, ContentEncoding=\"gzip\", ContentType=\"application/json\") METRICS.incr( \"data.export.upload\", tags=self.stats_tags + [\"status:success\"] ) except", "CellReport, CellObservation), (\"wifi\", WifiReport, WifiObservation), ): malformed[name] = 0 observations[name]", "s3.Bucket(bucketname) obj = bucket.Object(obj_name) obj.put(Body=data, ContentEncoding=\"gzip\", ContentType=\"application/json\") METRICS.incr( \"data.export.upload\", tags=self.stats_tags", "self.config = config self.queue_key = queue_key self.queue = config.queue(queue_key, task.redis_client)", "submit v1 schema used in our own database models. \"\"\"", "for item in items: api_key = item[\"api_key\"] report = item[\"report\"]", "upload_status and trigger exception for bad responses # this causes", "in grids: shards[DataMap.shard_id(lat, lon)].add(encode_datamap_grid(lat, lon)) for shard_id, values in shards.items():", "field_map) if value: if key_map[1] is None: report.update(value) else: report[key_map[1]]", "ExportConfig, Report, WifiObservation, WifiReport, WifiShard, ) from ichnaea.models.content import encode_datamap_grid", "to process the data in each queue. \"\"\" def __init__(self,", "reports = [item[\"report\"] for item in queue_items] headers = {", "for name, report_cls, obs_cls in ( (\"blue\", BlueReport, BlueObservation), (\"cell\",", "lon is not None: grids.add(DataMap.scale(lat, lon)) shards = defaultdict(set) for", "item[\"report\"]} ) with self.task.db_session(commit=False) as session: export_configs = ExportConfig.all(session) with", "tags=tags) def process_report(self, data): report = Report.create(**data) if report is", "\"data.export.upload\", tags=self.stats_tags + [\"status:success\"] ) except Exception: METRICS.incr( \"data.export.upload\", tags=self.stats_tags", "session to get API keys keys = [key for key", "time from urllib.parse import urlparse import uuid import boto3 import", "= config.queue_key(api_key, source) queue = config.queue(queue_key, redis_client) queue.enqueue(items, pipe=pipe) for", "not queue_items: return success = False for i in range(self._retries):", "for action in (\"drop\", \"upload\"): metrics[api_key][\"%s_%s\" % (type_, action)] =", "malformed[name] = 0 observations[name] = {} if data.get(name): for item", "into GPS timestamp report[\"timestamp\"] = timestamp - gps_age if gps_age:", "import ( ApiKey, BlueObservation, BlueReport, BlueShard, CellObservation, CellReport, CellShard, DataMap,", "self.queue_key = queue_key self.queue = config.queue(queue_key, task.redis_client) self.stats_tags = [\"key:\"", "data_queue = self.task.app.data_queues[\"update_incoming\"] data = data_queue.dequeue() grouped = defaultdict(list) for", "as pipe: self.queue_observations(pipe, observations) if _map_content_enabled and positions: self.process_datamap(pipe, positions)", "if they contain enough or old enough data schedules an", "# *_map maps fields inside the section from source to", "send(self, queue_items): api_keys = set() api_keys_known = set() metrics =", "reports}).encode(), compresslevel=7 ) s3 = boto3.resource(\"s3\") bucket = s3.Bucket(bucketname) obj", "count in key_metrics.items(): if not count: continue type_, action =", "\"mac\"), \"age\", (\"signalStrength\", \"signal\")] cell_id = (\"cellTowers\", \"cell\") cell_map =", "def export(task, name, queue_key): with task.db_session(commit=False) as session: config =", "observations[\"blue\"].values(), \"cell\": observations[\"cell\"].values(), \"wifi\": observations[\"wifi\"].values(), } return (obs, malformed) def", "self.task = task def __call__(self, export_task): redis_client = self.task.redis_client data_queue", "suffix = \"report\" tags = api_tag else: suffix = \"observation\"", "type_] + api_tag METRICS.incr(\"data.%s.%s\" % (suffix, action), count, tags=tags) def", "re-tried METRICS.incr( \"data.export.upload\", tags=self.stats_tags + [\"status:%s\" % response.status_code], ) response.raise_for_status()", "else: suffix = \"observation\" tags = [\"type:%s\" % type_] +", "flags=re.UNICODE) METRICS = markus.get_metrics() class IncomingQueue(object): \"\"\" The incoming queue", "queue. \"\"\" def __init__(self, task): self.task = task def __call__(self,", "[] observations = {\"blue\": [], \"cell\": [], \"wifi\": []} for", "self.queue_key.split(\":\") source = parts[1] api_key = parts[2] obj_name = path.format(", "BlueReport, BlueObservation), (\"cell\", CellReport, CellObservation), (\"wifi\", WifiReport, WifiObservation), ): malformed[name]", "specific fields item_report = report_cls.create(**item) if item_report is None: malformed[name]", "to be relative to GPS time for type_ in (\"blue\",", "exporter_type is not None: exporter_type(task, config, queue_key)() def __call__(self): queue_items", "id to a target section id # *_map maps fields", "\"snr\"), (\"signalStrength\", \"signal\"), ] def _map_dict(self, item_source, field_map): value =", "raise class InternalTransform(object): \"\"\" This maps the geosubmit v2 schema", "= config.queue(queue_key, redis_client) if queue.ready(): export_task.delay(config.name, queue_key) if data_queue.ready(): self.task.apply_countdown()", "observations[name].get(item_key) if existing is not None and existing.better(item_obs): continue observations[name][item_key]", "for (api_key, source), items in grouped.items(): for config in export_configs:", "obj_name += uuid.uuid1().hex + \".json.gz\" try: data = util.encode_gzip( json.dumps({\"items\":", "CellReport, CellShard, DataMap, ExportConfig, Report, WifiObservation, WifiReport, WifiShard, ) from", "( (\"blue\", BlueShard, \"mac\", \"update_blue_\"), (\"cell\", CellShard, \"cellid\", \"update_cell_\"), (\"wifi\",", "have better data for the same key, ignore existing =", "item, report, key_map, field_map): value = {} item_source = item.get(key_map[0])", "for row in rows: api_keys_known.add(row.valid_key) positions = [] observations =", "for config in export_configs: if config.allowed(api_key, source): queue_key = config.queue_key(api_key,", "self.wifi_map) position = item.get(\"position\") or {} gps_age = position.get(\"age\", 0)", "lon in grids: shards[DataMap.shard_id(lat, lon)].add(encode_datamap_grid(lat, lon)) for shard_id, values in", "obs in observations[datatype]: # group by sharded queue shard_id =", "None: return ({}, {}) malformed = {} observations = {}", "api_keys = set() api_keys_known = set() metrics = {} items", "= api_tag else: suffix = \"observation\" tags = [\"type:%s\" %", "[item[\"report\"] for item in queue_items] headers = { \"Content-Encoding\": \"gzip\",", "if data.get(name): for item in data[name]: # validate the blue/cell/wifi", "key] if keys: columns = ApiKey.__table__.c rows = session.execute( select([columns.valid_key]).where(columns.valid_key.in_(keys))", "grouped[(item[\"api_key\"], item.get(\"source\", \"gnss\"))].append( {\"api_key\": item[\"api_key\"], \"report\": item[\"report\"]} ) with self.task.db_session(commit=False)", "strip away queue prefix again parts = self.queue_key.split(\":\") source =", "queue = config.queue(queue_key, redis_client) if queue.ready(): export_task.delay(config.name, queue_key) if data_queue.ready():", "headers = { \"Content-Encoding\": \"gzip\", \"Content-Type\": \"application/json\", \"User-Agent\": \"ichnaea\", }", "in queue_items: # preprocess items and extract set of API", "if type_ == \"report\": suffix = \"report\" tags = api_tag", "* (i ** 2 + 1)) if success: METRICS.incr(\"data.export.batch\", tags=self.stats_tags)", "two-tuple position_id = (\"position\", None) position_map = [ (\"latitude\", \"lat\"),", "report, key_map, field_map): value = {} item_source = item.get(key_map[0]) if", "self._map_dict(value_item, field_map) if value: values.append(value) if values: report[key_map[1]] = values", "util WHITESPACE = re.compile(r\"\\s\", flags=re.UNICODE) METRICS = markus.get_metrics() class IncomingQueue(object):", "import select import sqlalchemy.exc from ichnaea.data import _map_content_enabled from ichnaea.models", "# Check all queues if they now contain enough data", "data[name]: # validate the blue/cell/wifi specific fields item_report = report_cls.create(**item)", "cell_map = [ (\"radioType\", \"radio\"), (\"mobileCountryCode\", \"mcc\"), (\"mobileNetworkCode\", \"mnc\"), (\"locationAreaCode\",", "\"cell\": [], \"wifi\": []} for item in items: api_key =", "ContentType=\"application/json\") METRICS.incr( \"data.export.upload\", tags=self.stats_tags + [\"status:success\"] ) except Exception: METRICS.incr(", "item[\"report\"] obs, malformed_obs = self.process_report(report) any_data = False for name", "if api_key and api_key in api_keys_known: api_tag = [\"key:%s\" %", "timestamp = item.get(\"timestamp\") if timestamp: # turn timestamp into GPS", "= self._parse_list(item, report, self.blue_id, self.blue_map) cells = self._parse_list(item, report, self.cell_id,", "+ \"_upload\"] += len(obs[name]) any_data = True metrics[api_key][name + \"_drop\"]", "= False for i in range(self._retries): try: with METRICS.timer(\"data.export.upload.timing\", tags=self.stats_tags):", "ContentEncoding=\"gzip\", ContentType=\"application/json\") METRICS.incr( \"data.export.upload\", tags=self.stats_tags + [\"status:success\"] ) except Exception:", "if obs.get(name): observations[name].extend(obs[name]) metrics[api_key][name + \"_upload\"] += len(obs[name]) any_data =", "or cells or wifis: return report return {} class InternalExporter(ReportExporter):", "self.emit_metrics(api_keys_known, metrics) def queue_observations(self, pipe, observations): for datatype, shard_model, shard_key,", "item_key = item_obs.unique_key # if we have better data for", "= queue_prefix + shard_id queued_obs[queue_id].append(obs.to_json()) for queue_id, values in queued_obs.items():", "if timestamp: # turn timestamp into GPS timestamp report[\"timestamp\"] =", "key_map, field_map): values = [] for value_item in item.get(key_map[0], ()):", "{} if data.get(name): for item in data[name]: # validate the", "for value_item in item.get(key_map[0], ()): value = self._map_dict(value_item, field_map) if", "be specified instead # of a two-tuple position_id = (\"position\",", "# log upload_status and trigger exception for bad responses #", "which all other data pipelines get their data. It distributes", "requests.post( self.config.url, data=util.encode_gzip( json.dumps({\"items\": reports}).encode(), compresslevel=5 ), headers=headers, timeout=60.0, )", "blue_id = (\"bluetoothBeacons\", \"blue\") blue_map = [(\"macAddress\", \"mac\"), \"age\", (\"signalStrength\",", "report[\"lon\"])) else: metrics[api_key][\"report_drop\"] += 1 with self.task.redis_pipeline() as pipe: self.queue_observations(pipe,", "processing. for queue_key in config.partitions(redis_client): queue = config.queue(queue_key, redis_client) if", "1.0 def __init__(self, task, config, queue_key): self.task = task self.config", "= spec source_value = item_source.get(source) if source_value is not None:", "report = item[\"report\"] obs, malformed_obs = self.process_report(report) any_data = False", "data into one item_obs = obs_cls.combine(report, item_report) item_key = item_obs.unique_key", "schema used in view code and external transfers (backup, forward", "task to process the data in each queue. \"\"\" def", "= defaultdict(list) for item in data: grouped[(item[\"api_key\"], item.get(\"source\", \"gnss\"))].append( {\"api_key\":", "self.task.db_session(commit=False) as session: export_configs = ExportConfig.all(session) with self.task.redis_pipeline() as pipe:", "distributes the data into the configured export queues, checks those", "session: export_configs = ExportConfig.all(session) with self.task.redis_pipeline() as pipe: for (api_key,", "set() metrics = {} items = [] for item in", "timestamp - gps_age if gps_age: # Normalize age fields to", "blues or cells or wifis: return report return {} class", "shard_id, values in shards.items(): queue = self.task.app.data_queues[\"update_datamap_\" + shard_id] queue.enqueue(list(values),", "if blues or cells or wifis: return report return {}", "= [\"key:\" + self.config.name] @staticmethod def export(task, name, queue_key): with", "class ReportExporter(object): _retriable = (IOError,) _retries = 3 _retry_wait =", "def __init__(self, task, config, queue_key): self.task = task self.config =", "\"cell\", \"wifi\"): for record in report.get(type_, ()): record[\"age\"] = record.get(\"age\",", "report = Report.create(**data) if report is None: return ({}, {})", "key_map, field_map): value = {} item_source = item.get(key_map[0]) if item_source:", "bucket.Object(obj_name) obj.put(Body=data, ContentEncoding=\"gzip\", ContentType=\"application/json\") METRICS.incr( \"data.export.upload\", tags=self.stats_tags + [\"status:success\"] )", "spec target = spec source_value = item_source.get(source) if source_value is", "json.dumps({\"items\": reports}).encode(), compresslevel=5 ), headers=headers, timeout=60.0, ) # log upload_status", "report_cls.create(**item) if item_report is None: malformed[name] += 1 continue #", "\"data.export.upload\", tags=self.stats_tags + [\"status:failure\"] ) raise class InternalTransform(object): \"\"\" This", "= defaultdict(list) for obs in observations[datatype]: # group by sharded", "with METRICS.timer(\"data.export.upload.timing\", tags=self.stats_tags): self.send(queue_items) success = True except self._retriable: success", "obj = bucket.Object(obj_name) obj.put(Body=data, ContentEncoding=\"gzip\", ContentType=\"application/json\") METRICS.incr( \"data.export.upload\", tags=self.stats_tags +", "tags=self.stats_tags) break if success and self.queue.ready(): self.task.apply_countdown(args=[self.config.name, self.queue_key]) def send(self,", "lon)) for shard_id, values in shards.items(): queue = self.task.app.data_queues[\"update_datamap_\" +", "queue_key self.queue = config.queue(queue_key, task.redis_client) self.stats_tags = [\"key:\" + self.config.name]", "data_queue.dequeue() grouped = defaultdict(list) for item in data: grouped[(item[\"api_key\"], item.get(\"source\",", "of a two-tuple position_id = (\"position\", None) position_map = [", "IncomingQueue(object): \"\"\" The incoming queue contains the data collected in", "config self.queue_key = queue_key self.queue = config.queue(queue_key, task.redis_client) self.stats_tags =", "grouped = defaultdict(list) for item in data: grouped[(item[\"api_key\"], item.get(\"source\", \"gnss\"))].append(", "return report return {} class InternalExporter(ReportExporter): _retriable = (IOError, redis.exceptions.RedisError,", "forward to partners) to the internal submit v1 schema used", "\"wifi\": []} for item in items: api_key = item[\"api_key\"] report", "urlparse(self.config.url)[:3] # s3 key names start without a leading slash", "= False for name in (\"blue\", \"cell\", \"wifi\"): if obs.get(name):", "task): self.task = task def __call__(self, export_task): redis_client = self.task.redis_client", "= path.lstrip(\"/\") if not path.endswith(\"/\"): path += \"/\" year, month,", "queue_key = config.queue_key(api_key, source) queue = config.queue(queue_key, redis_client) queue.enqueue(items, pipe=pipe)", "queue.ready(): export_task.delay(config.name, queue_key) if data_queue.ready(): self.task.apply_countdown() class ReportExporter(object): _retriable =", "source) queue = config.queue(queue_key, redis_client) queue.enqueue(items, pipe=pipe) for config in", "value = self._map_dict(item_source, field_map) if value: if key_map[1] is None:", "ignore metadata reports = [item[\"report\"] for item in queue_items] _,", "queue.enqueue(items, pipe=pipe) for config in export_configs: # Check all queues", "process the data in each queue. \"\"\" def __init__(self, task):", "(\"wifi\", WifiReport, WifiObservation), ): malformed[name] = 0 observations[name] = {}", "record[\"age\"] = record.get(\"age\", 0) - gps_age if blues or cells", "for name in (\"blue\", \"cell\", \"wifi\"): if obs.get(name): observations[name].extend(obs[name]) metrics[api_key][name", "api_tag = [] if api_key and api_key in api_keys_known: api_tag", "a leading slash path = path.lstrip(\"/\") if not path.endswith(\"/\"): path", "own database models. \"\"\" # *_id maps a source section", "to be ready for processing. for queue_key in config.partitions(redis_client): queue", "for queue_key in config.partitions(redis_client): queue = config.queue(queue_key, redis_client) if queue.ready():", "else: report[key_map[1]] = value return value def _parse_list(self, item, report,", "gps_age: # Normalize age fields to be relative to GPS", "api_keys if key] if keys: columns = ApiKey.__table__.c rows =", "( (\"blue\", BlueReport, BlueObservation), (\"cell\", CellReport, CellObservation), (\"wifi\", WifiReport, WifiObservation),", "# if the names are equal, a simple string can", "to a target section id # *_map maps fields inside", "(\"signalStrength\", \"signal\"), ] def _map_dict(self, item_source, field_map): value = {}", "\"radio\"), (\"mobileCountryCode\", \"mcc\"), (\"mobileNetworkCode\", \"mnc\"), (\"locationAreaCode\", \"lac\"), (\"cellId\", \"cid\"), \"age\",", "count, tags=tags) def process_report(self, data): report = Report.create(**data) if report", "data): report = Report.create(**data) if report is None: return ({},", "geosubmit v2 schema used in view code and external transfers", "into the configured export queues, checks those queues and if", "fields to be relative to GPS time for type_ in", "(\"macAddress\", \"mac\"), \"age\", \"channel\", \"frequency\", (\"radioType\", \"radio\"), (\"signalToNoiseRatio\", \"snr\"), (\"signalStrength\",", "\"\"\" # *_id maps a source section id to a", "({}, {}) malformed = {} observations = {} for name,", "obs = { \"blue\": observations[\"blue\"].values(), \"cell\": observations[\"cell\"].values(), \"wifi\": observations[\"wifi\"].values(), }", "set() api_keys_known = set() metrics = {} items = []", "row in rows: api_keys_known.add(row.valid_key) positions = [] observations = {\"blue\":", "except self._retriable: success = False time.sleep(self._retry_wait * (i ** 2", "\"cellid\", \"update_cell_\"), (\"wifi\", WifiShard, \"mac\", \"update_wifi_\"), ): queued_obs = defaultdict(list)", "\"\"\" def __init__(self, task): self.task = task def __call__(self, export_task):", "0) timestamp = item.get(\"timestamp\") if timestamp: # turn timestamp into", "= (\"cellTowers\", \"cell\") cell_map = [ (\"radioType\", \"radio\"), (\"mobileCountryCode\", \"mcc\"),", "(\"blue\", \"cell\", \"wifi\"): for record in report.get(type_, ()): record[\"age\"] =", "report, key_map, field_map): values = [] for value_item in item.get(key_map[0],", "METRICS.incr(\"data.export.batch\", tags=self.stats_tags) break if success and self.queue.ready(): self.task.apply_countdown(args=[self.config.name, self.queue_key]) def", "metrics[api_key] = {} for type_ in (\"report\", \"blue\", \"cell\", \"wifi\"):", "_retry_wait = 1.0 def __init__(self, task, config, queue_key): self.task =", "queued_obs.items(): # enqueue values for each queue queue = self.task.app.data_queues[queue_id]", "self.task.redis_client data_queue = self.task.app.data_queues[\"update_incoming\"] data = data_queue.dequeue() grouped = defaultdict(list)", "self.wifi_id, self.wifi_map) position = item.get(\"position\") or {} gps_age = position.get(\"age\",", "InternalTransform() def send(self, queue_items): api_keys = set() api_keys_known = set()", "export_task.delay(config.name, queue_key) if data_queue.ready(): self.task.apply_countdown() class ReportExporter(object): _retriable = (IOError,)", "log upload_status and trigger exception for bad responses # this", "queue.enqueue(values, pipe=pipe) def emit_metrics(self, api_keys_known, metrics): for api_key, key_metrics in", "queue_items] headers = { \"Content-Encoding\": \"gzip\", \"Content-Type\": \"application/json\", \"User-Agent\": \"ichnaea\",", "return (obs, malformed) def process_datamap(self, pipe, positions): grids = set()", "{} item_source = item.get(key_map[0]) if item_source: value = self._map_dict(item_source, field_map)", "in api_keys: metrics[api_key] = {} for type_ in (\"report\", \"blue\",", "or old enough data schedules an async export task to", "positions) self.emit_metrics(api_keys_known, metrics) def queue_observations(self, pipe, observations): for datatype, shard_model,", "isinstance(spec, tuple): source, target = spec else: source = spec", "markus import redis.exceptions import requests import requests.exceptions from sqlalchemy import", "{} for type_ in (\"report\", \"blue\", \"cell\", \"wifi\"): for action", "year=year, month=month, day=day ) obj_name += uuid.uuid1().hex + \".json.gz\" try:", "obs, malformed_obs = self.process_report(report) any_data = False for name in", "if report is None: return ({}, {}) malformed = {}", "extract set of API keys item[\"report\"] = self.transform(item[\"report\"]) if item[\"report\"]:", "= task def __call__(self, export_task): redis_client = self.task.redis_client data_queue =", "data.get(name): for item in data[name]: # validate the blue/cell/wifi specific", "= {\"blue\": [], \"cell\": [], \"wifi\": []} for item in", "\"blue\", \"cell\", \"wifi\"): for action in (\"drop\", \"upload\"): metrics[api_key][\"%s_%s\" %", "v2 schema used in view code and external transfers (backup,", "(\"cellTowers\", \"cell\") cell_map = [ (\"radioType\", \"radio\"), (\"mobileCountryCode\", \"mcc\"), (\"mobileNetworkCode\",", "# validate the blue/cell/wifi specific fields item_report = report_cls.create(**item) if", "for bad responses # this causes the task to be", "from collections import defaultdict import json import re import time", "fields item_report = report_cls.create(**item) if item_report is None: malformed[name] +=", "partners) to the internal submit v1 schema used in our", "tags=self.stats_tags + [\"status:%s\" % response.status_code], ) response.raise_for_status() class S3Exporter(ReportExporter): _retriable", "config = ExportConfig.get(session, name) exporter_types = { \"dummy\": DummyExporter, \"geosubmit\":", "maps the geosubmit v2 schema used in view code and", "= [item[\"report\"] for item in queue_items] _, bucketname, path =", "queue_key) if data_queue.ready(): self.task.apply_countdown() class ReportExporter(object): _retriable = (IOError,) _retries", "= record.get(\"age\", 0) - gps_age if blues or cells or", "obs_cls.combine(report, item_report) item_key = item_obs.unique_key # if we have better", "shards[DataMap.shard_id(lat, lon)].add(encode_datamap_grid(lat, lon)) for shard_id, values in shards.items(): queue =", "IOError, boto3.exceptions.Boto3Error, botocore.exceptions.BotoCoreError, ) def send(self, queue_items): # ignore metadata", "= InternalTransform() def send(self, queue_items): api_keys = set() api_keys_known =", "api_tag else: suffix = \"observation\" tags = [\"type:%s\" % type_]", "= parts[2] obj_name = path.format( source=source, api_key=api_key, year=year, month=month, day=day", "= ( IOError, boto3.exceptions.Boto3Error, botocore.exceptions.BotoCoreError, ) def send(self, queue_items): #", "configured export queues, checks those queues and if they contain", "None: malformed[name] += 1 continue # combine general and specific", "item): report = {} self._parse_dict(item, report, self.position_id, self.position_map) blues =", "(\"position\", None) position_map = [ (\"latitude\", \"lat\"), (\"longitude\", \"lon\"), \"accuracy\",", "field_map): value = {} item_source = item.get(key_map[0]) if item_source: value", "the configured export queues, checks those queues and if they", "shards = defaultdict(set) for lat, lon in grids: shards[DataMap.shard_id(lat, lon)].add(encode_datamap_grid(lat,", "= Report.create(**data) if report is None: return ({}, {}) malformed", "sqlalchemy.exc from ichnaea.data import _map_content_enabled from ichnaea.models import ( ApiKey,", "action), count, tags=tags) def process_report(self, data): report = Report.create(**data) if", "- gps_age if blues or cells or wifis: return report", "is not None: grids.add(DataMap.scale(lat, lon)) shards = defaultdict(set) for lat,", "value[target] = source_value return value def _parse_dict(self, item, report, key_map,", "items in grouped.items(): for config in export_configs: if config.allowed(api_key, source):", "transform = InternalTransform() def send(self, queue_items): api_keys = set() api_keys_known", "= { \"blue\": observations[\"blue\"].values(), \"cell\": observations[\"cell\"].values(), \"wifi\": observations[\"wifi\"].values(), } return", "observations[name][item_key] = item_obs obs = { \"blue\": observations[\"blue\"].values(), \"cell\": observations[\"cell\"].values(),", "= shard_model.shard_id(getattr(obs, shard_key)) queue_id = queue_prefix + shard_id queued_obs[queue_id].append(obs.to_json()) for", "\"observation\" tags = [\"type:%s\" % type_] + api_tag METRICS.incr(\"data.%s.%s\" %", "\"lon\"), \"accuracy\", \"altitude\", (\"altitudeAccuracy\", \"altitude_accuracy\"), \"heading\", \"pressure\", \"speed\", \"source\", ]", "send(self, queue_items): # ignore metadata reports = [item[\"report\"] for item", "util.encode_gzip( json.dumps({\"items\": reports}).encode(), compresslevel=7 ) s3 = boto3.resource(\"s3\") bucket =", "\"cell\": observations[\"cell\"].values(), \"wifi\": observations[\"wifi\"].values(), } return (obs, malformed) def process_datamap(self,", "for each queue queue = self.task.app.data_queues[queue_id] queue.enqueue(values, pipe=pipe) def emit_metrics(self,", "lon)].add(encode_datamap_grid(lat, lon)) for shard_id, values in shards.items(): queue = self.task.app.data_queues[\"update_datamap_\"", "= self.task.app.data_queues[queue_id] queue.enqueue(values, pipe=pipe) def emit_metrics(self, api_keys_known, metrics): for api_key,", "be relative to GPS time for type_ in (\"blue\", \"cell\",", "timestamp into GPS timestamp report[\"timestamp\"] = timestamp - gps_age if", "for type_ in (\"report\", \"blue\", \"cell\", \"wifi\"): for action in", "queue prefix again parts = self.queue_key.split(\":\") source = parts[1] api_key", "for item in data[name]: # validate the blue/cell/wifi specific fields", "= True metrics[api_key][name + \"_drop\"] += malformed_obs.get(name, 0) metrics[api_key][\"report_upload\"] +=", "blues = self._parse_list(item, report, self.blue_id, self.blue_map) cells = self._parse_list(item, report,", "shard_id queued_obs[queue_id].append(obs.to_json()) for queue_id, values in queued_obs.items(): # enqueue values", "config.queue(queue_key, redis_client) if queue.ready(): export_task.delay(config.name, queue_key) if data_queue.ready(): self.task.apply_countdown() class", "queues and if they contain enough or old enough data", "rows: api_keys_known.add(row.valid_key) positions = [] observations = {\"blue\": [], \"cell\":", "key_map[1] is None: report.update(value) else: report[key_map[1]] = value return value", "self.process_report(report) any_data = False for name in (\"blue\", \"cell\", \"wifi\"):", "grouped.items(): for config in export_configs: if config.allowed(api_key, source): queue_key =", "type_ == \"report\": suffix = \"report\" tags = api_tag else:", "if queue.ready(): export_task.delay(config.name, queue_key) if data_queue.ready(): self.task.apply_countdown() class ReportExporter(object): _retriable", "parts[1] api_key = parts[2] obj_name = path.format( source=source, api_key=api_key, year=year,", "queue contains the data collected in the web application. It", "Report.create(**data) if report is None: return ({}, {}) malformed =", "self.cell_id, self.cell_map) wifis = self._parse_list(item, report, self.wifi_id, self.wifi_map) position =", "select import sqlalchemy.exc from ichnaea.data import _map_content_enabled from ichnaea.models import", "__init__(self, task): self.task = task def __call__(self, export_task): redis_client =", "reports}).encode(), compresslevel=5 ), headers=headers, timeout=60.0, ) # log upload_status and", "existing is not None and existing.better(item_obs): continue observations[name][item_key] = item_obs", "= exporter_types.get(config.schema) if exporter_type is not None: exporter_type(task, config, queue_key)()", "None: value[target] = source_value return value def _parse_dict(self, item, report,", "if the names are equal, a simple string can be", "urlparse import uuid import boto3 import boto3.exceptions import botocore.exceptions import", "+= len(obs[name]) any_data = True metrics[api_key][name + \"_drop\"] += malformed_obs.get(name,", "contains the data collected in the web application. It is", "(IOError,) _retries = 3 _retry_wait = 1.0 def __init__(self, task,", "task def __call__(self, export_task): redis_client = self.task.redis_client data_queue = self.task.app.data_queues[\"update_incoming\"]", "to be re-tried METRICS.incr( \"data.export.upload\", tags=self.stats_tags + [\"status:%s\" % response.status_code],", "DummyExporter, \"geosubmit\": GeosubmitExporter, \"internal\": InternalExporter, \"s3\": S3Exporter, } exporter_type =", "config in export_configs: if config.allowed(api_key, source): queue_key = config.queue_key(api_key, source)", "len(obs[name]) any_data = True metrics[api_key][name + \"_drop\"] += malformed_obs.get(name, 0)", "source=source, api_key=api_key, year=year, month=month, day=day ) obj_name += uuid.uuid1().hex +", "class InternalTransform(object): \"\"\" This maps the geosubmit v2 schema used", "in queue_items] _, bucketname, path = urlparse(self.config.url)[:3] # s3 key", "keys = [key for key in api_keys if key] if", "_retriable = (IOError, requests.exceptions.RequestException) def send(self, queue_items): # ignore metadata", "for api_key, key_metrics in metrics.items(): api_tag = [] if api_key", "DataMap, ExportConfig, Report, WifiObservation, WifiReport, WifiShard, ) from ichnaea.models.content import", "name) exporter_types = { \"dummy\": DummyExporter, \"geosubmit\": GeosubmitExporter, \"internal\": InternalExporter,", "Exception: METRICS.incr( \"data.export.upload\", tags=self.stats_tags + [\"status:failure\"] ) raise class InternalTransform(object):", "and lon is not None: grids.add(DataMap.scale(lat, lon)) shards = defaultdict(set)", "CellShard, \"cellid\", \"update_cell_\"), (\"wifi\", WifiShard, \"mac\", \"update_wifi_\"), ): queued_obs =", "not path.endswith(\"/\"): path += \"/\" year, month, day = util.utcnow().timetuple()[:3]", "report = {} self._parse_dict(item, report, self.position_id, self.position_map) blues = self._parse_list(item,", "exporter_types = { \"dummy\": DummyExporter, \"geosubmit\": GeosubmitExporter, \"internal\": InternalExporter, \"s3\":", "process_report(self, data): report = Report.create(**data) if report is None: return", "tags = api_tag else: suffix = \"observation\" tags = [\"type:%s\"", "day = util.utcnow().timetuple()[:3] # strip away queue prefix again parts", "0) - gps_age if blues or cells or wifis: return", "(\"cell\", CellShard, \"cellid\", \"update_cell_\"), (\"wifi\", WifiShard, \"mac\", \"update_wifi_\"), ): queued_obs", "positions = [] observations = {\"blue\": [], \"cell\": [], \"wifi\":", "def emit_metrics(self, api_keys_known, metrics): for api_key, key_metrics in metrics.items(): api_tag", "enough data schedules an async export task to process the", "for shard_id, values in shards.items(): queue = self.task.app.data_queues[\"update_datamap_\" + shard_id]", "self.config.url, data=util.encode_gzip( json.dumps({\"items\": reports}).encode(), compresslevel=5 ), headers=headers, timeout=60.0, ) #", "the data into the configured export queues, checks those queues", "data. It distributes the data into the configured export queues,", "specific report data into one item_obs = obs_cls.combine(report, item_report) item_key", "def send(self, queue_items): raise NotImplementedError() class DummyExporter(ReportExporter): def send(self, queue_items):", "path = urlparse(self.config.url)[:3] # s3 key names start without a", "obs.get(name): observations[name].extend(obs[name]) metrics[api_key][name + \"_upload\"] += len(obs[name]) any_data = True", "= \"report\" tags = api_tag else: suffix = \"observation\" tags", "WifiShard, \"mac\", \"update_wifi_\"), ): queued_obs = defaultdict(list) for obs in", "Report, WifiObservation, WifiReport, WifiShard, ) from ichnaea.models.content import encode_datamap_grid from", "\"accuracy\", \"altitude\", (\"altitudeAccuracy\", \"altitude_accuracy\"), \"heading\", \"pressure\", \"speed\", \"source\", ] blue_id", "action = name.split(\"_\") if type_ == \"report\": suffix = \"report\"", "start without a leading slash path = path.lstrip(\"/\") if not", "False for name in (\"blue\", \"cell\", \"wifi\"): if obs.get(name): observations[name].extend(obs[name])", "= obs_cls.combine(report, item_report) item_key = item_obs.unique_key # if we have", "if source_value is not None: value[target] = source_value return value", "WHITESPACE = re.compile(r\"\\s\", flags=re.UNICODE) METRICS = markus.get_metrics() class IncomingQueue(object): \"\"\"", "None) position_map = [ (\"latitude\", \"lat\"), (\"longitude\", \"lon\"), \"accuracy\", \"altitude\",", "self.queue.dequeue() if not queue_items: return success = False for i", "application. It is the single entrypoint from which all other", "= ExportConfig.all(session) with self.task.redis_pipeline() as pipe: for (api_key, source), items", "values in shards.items(): queue = self.task.app.data_queues[\"update_datamap_\" + shard_id] queue.enqueue(list(values), pipe=pipe)", "value def _parse_list(self, item, report, key_map, field_map): values = []", "we have better data for the same key, ignore existing", "_, bucketname, path = urlparse(self.config.url)[:3] # s3 key names start", "\"wifi\") wifi_map = [ (\"macAddress\", \"mac\"), \"age\", \"channel\", \"frequency\", (\"radioType\",", "name, report_cls, obs_cls in ( (\"blue\", BlueReport, BlueObservation), (\"cell\", CellReport,", "_map_dict(self, item_source, field_map): value = {} for spec in field_map:", "prefix again parts = self.queue_key.split(\":\") source = parts[1] api_key =", "= config self.queue_key = queue_key self.queue = config.queue(queue_key, task.redis_client) self.stats_tags", "self.config.name] @staticmethod def export(task, name, queue_key): with task.db_session(commit=False) as session:", "[(\"macAddress\", \"mac\"), \"age\", (\"signalStrength\", \"signal\")] cell_id = (\"cellTowers\", \"cell\") cell_map", "\"lac\"), (\"cellId\", \"cid\"), \"age\", \"asu\", (\"primaryScramblingCode\", \"psc\"), \"serving\", (\"signalStrength\", \"signal\"),", "observations): for datatype, shard_model, shard_key, queue_prefix in ( (\"blue\", BlueShard,", "= {} if data.get(name): for item in data[name]: # validate", "exporter_type = exporter_types.get(config.schema) if exporter_type is not None: exporter_type(task, config,", "item.get(key_map[0], ()): value = self._map_dict(value_item, field_map) if value: values.append(value) if", "count: continue type_, action = name.split(\"_\") if type_ == \"report\":", "data_queue.ready(): self.task.apply_countdown() class ReportExporter(object): _retriable = (IOError,) _retries = 3", "import uuid import boto3 import boto3.exceptions import botocore.exceptions import markus", "\"blue\": observations[\"blue\"].values(), \"cell\": observations[\"cell\"].values(), \"wifi\": observations[\"wifi\"].values(), } return (obs, malformed)", "cells or wifis: return report return {} class InternalExporter(ReportExporter): _retriable", "if key] if keys: columns = ApiKey.__table__.c rows = session.execute(", "data in each queue. \"\"\" def __init__(self, task): self.task =", "data to be ready for processing. for queue_key in config.partitions(redis_client):", "queues if they now contain enough data or # old", "= [] for value_item in item.get(key_map[0], ()): value = self._map_dict(value_item,", "report, self.wifi_id, self.wifi_map) position = item.get(\"position\") or {} gps_age =", "[ (\"macAddress\", \"mac\"), \"age\", \"channel\", \"frequency\", (\"radioType\", \"radio\"), (\"signalToNoiseRatio\", \"snr\"),", "= False time.sleep(self._retry_wait * (i ** 2 + 1)) if", "section id # *_map maps fields inside the section from", "self.task.redis_pipeline() as pipe: self.queue_observations(pipe, observations) if _map_content_enabled and positions: self.process_datamap(pipe,", "% api_key] for name, count in key_metrics.items(): if not count:", "one item_obs = obs_cls.combine(report, item_report) item_key = item_obs.unique_key # if", "path = path.lstrip(\"/\") if not path.endswith(\"/\"): path += \"/\" year,", "# limit database session to get API keys keys =", "session.execute( select([columns.valid_key]).where(columns.valid_key.in_(keys)) ).fetchall() for row in rows: api_keys_known.add(row.valid_key) positions =", "= util.utcnow().timetuple()[:3] # strip away queue prefix again parts =", "time for type_ in (\"blue\", \"cell\", \"wifi\"): for record in", "the data collected in the web application. It is the", "for queue_id, values in queued_obs.items(): # enqueue values for each", "api_key in api_keys: metrics[api_key] = {} for type_ in (\"report\",", "] blue_id = (\"bluetoothBeacons\", \"blue\") blue_map = [(\"macAddress\", \"mac\"), \"age\",", "and existing.better(item_obs): continue observations[name][item_key] = item_obs obs = { \"blue\":", "import sqlalchemy.exc from ichnaea.data import _map_content_enabled from ichnaea.models import (", "the same key, ignore existing = observations[name].get(item_key) if existing is", "malformed_obs = self.process_report(report) any_data = False for name in (\"blue\",", "METRICS.incr( \"data.export.upload\", tags=self.stats_tags + [\"status:failure\"] ) raise class InternalTransform(object): \"\"\"", "value_item in item.get(key_map[0], ()): value = self._map_dict(value_item, field_map) if value:", "metrics): for api_key, key_metrics in metrics.items(): api_tag = [] if", "or # old enough data to be ready for processing.", "group by sharded queue shard_id = shard_model.shard_id(getattr(obs, shard_key)) queue_id =", "as session: config = ExportConfig.get(session, name) exporter_types = { \"dummy\":", "{} class InternalExporter(ReportExporter): _retriable = (IOError, redis.exceptions.RedisError, sqlalchemy.exc.InternalError) transform =", "task, config, queue_key): self.task = task self.config = config self.queue_key", "in view code and external transfers (backup, forward to partners)", "report, self.position_id, self.position_map) blues = self._parse_list(item, report, self.blue_id, self.blue_map) cells", "= set() for lat, lon in positions: if lat is", "queue_key in config.partitions(redis_client): queue = config.queue(queue_key, redis_client) if queue.ready(): export_task.delay(config.name,", "\"signal\")] cell_id = (\"cellTowers\", \"cell\") cell_map = [ (\"radioType\", \"radio\"),", "s3 key names start without a leading slash path =", "section id to a target section id # *_map maps", "export queues, checks those queues and if they contain enough", "each queue queue = self.task.app.data_queues[queue_id] queue.enqueue(values, pipe=pipe) def emit_metrics(self, api_keys_known,", "+ api_tag METRICS.incr(\"data.%s.%s\" % (suffix, action), count, tags=tags) def process_report(self,", "()): record[\"age\"] = record.get(\"age\", 0) - gps_age if blues or", "timestamp: # turn timestamp into GPS timestamp report[\"timestamp\"] = timestamp", "ignore existing = observations[name].get(item_key) if existing is not None and", "for item in queue_items] _, bucketname, path = urlparse(self.config.url)[:3] #", "(backup, forward to partners) to the internal submit v1 schema", "+ [\"status:%s\" % response.status_code], ) response.raise_for_status() class S3Exporter(ReportExporter): _retriable =", "maps fields inside the section from source to target id", "and positions: self.process_datamap(pipe, positions) self.emit_metrics(api_keys_known, metrics) def queue_observations(self, pipe, observations):", "values = [] for value_item in item.get(key_map[0], ()): value =", "month, day = util.utcnow().timetuple()[:3] # strip away queue prefix again", "turn timestamp into GPS timestamp report[\"timestamp\"] = timestamp - gps_age", "InternalExporter(ReportExporter): _retriable = (IOError, redis.exceptions.RedisError, sqlalchemy.exc.InternalError) transform = InternalTransform() def", "item in queue_items: # preprocess items and extract set of", "= True except self._retriable: success = False time.sleep(self._retry_wait * (i", "queue_prefix in ( (\"blue\", BlueShard, \"mac\", \"update_blue_\"), (\"cell\", CellShard, \"cellid\",", "boto3 import boto3.exceptions import botocore.exceptions import markus import redis.exceptions import", "transfers (backup, forward to partners) to the internal submit v1", "data schedules an async export task to process the data", "\"blue\") blue_map = [(\"macAddress\", \"mac\"), \"age\", (\"signalStrength\", \"signal\")] cell_id =", "blue_map = [(\"macAddress\", \"mac\"), \"age\", (\"signalStrength\", \"signal\")] cell_id = (\"cellTowers\",", "metrics.items(): api_tag = [] if api_key and api_key in api_keys_known:", "lon)) shards = defaultdict(set) for lat, lon in grids: shards[DataMap.shard_id(lat,", "\"application/json\", \"User-Agent\": \"ichnaea\", } response = requests.post( self.config.url, data=util.encode_gzip( json.dumps({\"items\":", "if success: METRICS.incr(\"data.export.batch\", tags=self.stats_tags) break if success and self.queue.ready(): self.task.apply_countdown(args=[self.config.name,", "= observations[name].get(item_key) if existing is not None and existing.better(item_obs): continue", "ichnaea.models.content import encode_datamap_grid from ichnaea import util WHITESPACE = re.compile(r\"\\s\",", "( ApiKey, BlueObservation, BlueReport, BlueShard, CellObservation, CellReport, CellShard, DataMap, ExportConfig,", "a source section id to a target section id #", "try: with METRICS.timer(\"data.export.upload.timing\", tags=self.stats_tags): self.send(queue_items) success = True except self._retriable:", "+ self.config.name] @staticmethod def export(task, name, queue_key): with task.db_session(commit=False) as", "wifis = self._parse_list(item, report, self.wifi_id, self.wifi_map) position = item.get(\"position\") or", "relative to GPS time for type_ in (\"blue\", \"cell\", \"wifi\"):", "if item_report is None: malformed[name] += 1 continue # combine", "bad responses # this causes the task to be re-tried", "pipelines get their data. It distributes the data into the", "if data_queue.ready(): self.task.apply_countdown() class ReportExporter(object): _retriable = (IOError,) _retries =", "from which all other data pipelines get their data. It", "= (\"bluetoothBeacons\", \"blue\") blue_map = [(\"macAddress\", \"mac\"), \"age\", (\"signalStrength\", \"signal\")]", "\"wifi\"): if obs.get(name): observations[name].extend(obs[name]) metrics[api_key][name + \"_upload\"] += len(obs[name]) any_data", "return value def _parse_dict(self, item, report, key_map, field_map): value =", "in data: grouped[(item[\"api_key\"], item.get(\"source\", \"gnss\"))].append( {\"api_key\": item[\"api_key\"], \"report\": item[\"report\"]} )", "exporter_types.get(config.schema) if exporter_type is not None: exporter_type(task, config, queue_key)() def", "\"/\" year, month, day = util.utcnow().timetuple()[:3] # strip away queue", "position_map = [ (\"latitude\", \"lat\"), (\"longitude\", \"lon\"), \"accuracy\", \"altitude\", (\"altitudeAccuracy\",", "if gps_age: # Normalize age fields to be relative to", "the geosubmit v2 schema used in view code and external", "GPS time for type_ in (\"blue\", \"cell\", \"wifi\"): for record", "malformed) def process_datamap(self, pipe, positions): grids = set() for lat,", "API keys keys = [key for key in api_keys if", "()): value = self._map_dict(value_item, field_map) if value: values.append(value) if values:", "= [\"type:%s\" % type_] + api_tag METRICS.incr(\"data.%s.%s\" % (suffix, action),", "= item[\"api_key\"] report = item[\"report\"] obs, malformed_obs = self.process_report(report) any_data", "\"heading\", \"pressure\", \"speed\", \"source\", ] blue_id = (\"bluetoothBeacons\", \"blue\") blue_map", "names are equal, a simple string can be specified instead", "\"channel\", \"frequency\", (\"radioType\", \"radio\"), (\"signalToNoiseRatio\", \"snr\"), (\"signalStrength\", \"signal\"), ] def", "same key, ignore existing = observations[name].get(item_key) if existing is not", "maps a source section id to a target section id", "= s3.Bucket(bucketname) obj = bucket.Object(obj_name) obj.put(Body=data, ContentEncoding=\"gzip\", ContentType=\"application/json\") METRICS.incr( \"data.export.upload\",", "(\"cellId\", \"cid\"), \"age\", \"asu\", (\"primaryScramblingCode\", \"psc\"), \"serving\", (\"signalStrength\", \"signal\"), (\"timingAdvance\",", "def queue_observations(self, pipe, observations): for datatype, shard_model, shard_key, queue_prefix in", "task.redis_client) self.stats_tags = [\"key:\" + self.config.name] @staticmethod def export(task, name,", "GeosubmitExporter(ReportExporter): _retriable = (IOError, requests.exceptions.RequestException) def send(self, queue_items): # ignore", "not None and existing.better(item_obs): continue observations[name][item_key] = item_obs obs =", "ReportExporter(object): _retriable = (IOError,) _retries = 3 _retry_wait = 1.0", "(\"longitude\", \"lon\"), \"accuracy\", \"altitude\", (\"altitudeAccuracy\", \"altitude_accuracy\"), \"heading\", \"pressure\", \"speed\", \"source\",", "instead # of a two-tuple position_id = (\"position\", None) position_map", "\"mac\"), \"age\", \"channel\", \"frequency\", (\"radioType\", \"radio\"), (\"signalToNoiseRatio\", \"snr\"), (\"signalStrength\", \"signal\"),", "for item in data: grouped[(item[\"api_key\"], item.get(\"source\", \"gnss\"))].append( {\"api_key\": item[\"api_key\"], \"report\":", "success = True except self._retriable: success = False time.sleep(self._retry_wait *", "redis.exceptions.RedisError, sqlalchemy.exc.InternalError) transform = InternalTransform() def send(self, queue_items): api_keys =", "name in (\"blue\", \"cell\", \"wifi\"): if obs.get(name): observations[name].extend(obs[name]) metrics[api_key][name +", "_retriable = ( IOError, boto3.exceptions.Boto3Error, botocore.exceptions.BotoCoreError, ) def send(self, queue_items):", "success and self.queue.ready(): self.task.apply_countdown(args=[self.config.name, self.queue_key]) def send(self, queue_items): raise NotImplementedError()", "queue_items): pass class GeosubmitExporter(ReportExporter): _retriable = (IOError, requests.exceptions.RequestException) def send(self,", "(\"timingAdvance\", \"ta\"), ] wifi_id = (\"wifiAccessPoints\", \"wifi\") wifi_map = [", "= {} observations = {} for name, report_cls, obs_cls in", "\"wifi\": observations[\"wifi\"].values(), } return (obs, malformed) def process_datamap(self, pipe, positions):", "self.stats_tags = [\"key:\" + self.config.name] @staticmethod def export(task, name, queue_key):", "\"update_blue_\"), (\"cell\", CellShard, \"cellid\", \"update_cell_\"), (\"wifi\", WifiShard, \"mac\", \"update_wifi_\"), ):", "in api_keys_known: api_tag = [\"key:%s\" % api_key] for name, count", "CellShard, DataMap, ExportConfig, Report, WifiObservation, WifiReport, WifiShard, ) from ichnaea.models.content", "continue type_, action = name.split(\"_\") if type_ == \"report\": suffix", "queue_key): with task.db_session(commit=False) as session: config = ExportConfig.get(session, name) exporter_types", "malformed[name] += 1 continue # combine general and specific report", "import requests.exceptions from sqlalchemy import select import sqlalchemy.exc from ichnaea.data", "again parts = self.queue_key.split(\":\") source = parts[1] api_key = parts[2]", "self.transform(item[\"report\"]) if item[\"report\"]: items.append(item) api_keys.add(item[\"api_key\"]) for api_key in api_keys: metrics[api_key]", "fields inside the section from source to target id #", "api_key in api_keys_known: api_tag = [\"key:%s\" % api_key] for name,", "= (IOError, requests.exceptions.RequestException) def send(self, queue_items): # ignore metadata reports", "# turn timestamp into GPS timestamp report[\"timestamp\"] = timestamp -", "= session.execute( select([columns.valid_key]).where(columns.valid_key.in_(keys)) ).fetchall() for row in rows: api_keys_known.add(row.valid_key) positions", "CellObservation), (\"wifi\", WifiReport, WifiObservation), ): malformed[name] = 0 observations[name] =", "positions: self.process_datamap(pipe, positions) self.emit_metrics(api_keys_known, metrics) def queue_observations(self, pipe, observations): for", "= (IOError,) _retries = 3 _retry_wait = 1.0 def __init__(self,", "= source_value return value def _parse_dict(self, item, report, key_map, field_map):", "report[\"timestamp\"] = timestamp - gps_age if gps_age: # Normalize age", "return ({}, {}) malformed = {} observations = {} for", "all queues if they now contain enough data or #", "= self.queue_key.split(\":\") source = parts[1] api_key = parts[2] obj_name =", "single entrypoint from which all other data pipelines get their", "(\"blue\", BlueReport, BlueObservation), (\"cell\", CellReport, CellObservation), (\"wifi\", WifiReport, WifiObservation), ):", "path.lstrip(\"/\") if not path.endswith(\"/\"): path += \"/\" year, month, day", "response.status_code], ) response.raise_for_status() class S3Exporter(ReportExporter): _retriable = ( IOError, boto3.exceptions.Boto3Error,", "to get API keys keys = [key for key in", "\"geosubmit\": GeosubmitExporter, \"internal\": InternalExporter, \"s3\": S3Exporter, } exporter_type = exporter_types.get(config.schema)", "from source to target id # if the names are", "= data_queue.dequeue() grouped = defaultdict(list) for item in data: grouped[(item[\"api_key\"],", "our own database models. \"\"\" # *_id maps a source", "config.queue(queue_key, redis_client) queue.enqueue(items, pipe=pipe) for config in export_configs: # Check", "\"_drop\"] += malformed_obs.get(name, 0) metrics[api_key][\"report_upload\"] += 1 if any_data: positions.append((report[\"lat\"],", "def process_report(self, data): report = Report.create(**data) if report is None:", "path.format( source=source, api_key=api_key, year=year, month=month, day=day ) obj_name += uuid.uuid1().hex", "= path.format( source=source, api_key=api_key, year=year, month=month, day=day ) obj_name +=", "in ( (\"blue\", BlueReport, BlueObservation), (\"cell\", CellReport, CellObservation), (\"wifi\", WifiReport,", "item_report = report_cls.create(**item) if item_report is None: malformed[name] += 1", "pipe, observations): for datatype, shard_model, shard_key, queue_prefix in ( (\"blue\",", "to the internal submit v1 schema used in our own", "= {} self._parse_dict(item, report, self.position_id, self.position_map) blues = self._parse_list(item, report,", "1 if any_data: positions.append((report[\"lat\"], report[\"lon\"])) else: metrics[api_key][\"report_drop\"] += 1 with", "or wifis: return report return {} class InternalExporter(ReportExporter): _retriable =", "slash path = path.lstrip(\"/\") if not path.endswith(\"/\"): path += \"/\"", "record.get(\"age\", 0) - gps_age if blues or cells or wifis:", "shard_model.shard_id(getattr(obs, shard_key)) queue_id = queue_prefix + shard_id queued_obs[queue_id].append(obs.to_json()) for queue_id,", "+= 1 if any_data: positions.append((report[\"lat\"], report[\"lon\"])) else: metrics[api_key][\"report_drop\"] += 1", "spec else: source = spec target = spec source_value =", "\"update_cell_\"), (\"wifi\", WifiShard, \"mac\", \"update_wifi_\"), ): queued_obs = defaultdict(list) for", "enough or old enough data schedules an async export task", "(\"signalStrength\", \"signal\"), (\"timingAdvance\", \"ta\"), ] wifi_id = (\"wifiAccessPoints\", \"wifi\") wifi_map", "as pipe: for (api_key, source), items in grouped.items(): for config", "\"age\", \"asu\", (\"primaryScramblingCode\", \"psc\"), \"serving\", (\"signalStrength\", \"signal\"), (\"timingAdvance\", \"ta\"), ]", "with self.task.redis_pipeline() as pipe: self.queue_observations(pipe, observations) if _map_content_enabled and positions:", "Check all queues if they now contain enough data or", "WifiReport, WifiObservation), ): malformed[name] = 0 observations[name] = {} if", "= [ (\"macAddress\", \"mac\"), \"age\", \"channel\", \"frequency\", (\"radioType\", \"radio\"), (\"signalToNoiseRatio\",", "if values: report[key_map[1]] = values return values def __call__(self, item):", "(\"radioType\", \"radio\"), (\"signalToNoiseRatio\", \"snr\"), (\"signalStrength\", \"signal\"), ] def _map_dict(self, item_source,", "+= \"/\" year, month, day = util.utcnow().timetuple()[:3] # strip away", "data pipelines get their data. It distributes the data into", "with task.db_session(commit=False) as session: config = ExportConfig.get(session, name) exporter_types =", "[] for item in queue_items: # preprocess items and extract", "from ichnaea.data import _map_content_enabled from ichnaea.models import ( ApiKey, BlueObservation,", "import redis.exceptions import requests import requests.exceptions from sqlalchemy import select", "existing.better(item_obs): continue observations[name][item_key] = item_obs obs = { \"blue\": observations[\"blue\"].values(),", "item[\"api_key\"], \"report\": item[\"report\"]} ) with self.task.db_session(commit=False) as session: export_configs =", "\"\"\" This maps the geosubmit v2 schema used in view", "= \"observation\" tags = [\"type:%s\" % type_] + api_tag METRICS.incr(\"data.%s.%s\"", "= [] observations = {\"blue\": [], \"cell\": [], \"wifi\": []}", "queue = self.task.app.data_queues[queue_id] queue.enqueue(values, pipe=pipe) def emit_metrics(self, api_keys_known, metrics): for", "lat, lon in positions: if lat is not None and", "= {} items = [] for item in queue_items: #", "METRICS.incr(\"data.%s.%s\" % (suffix, action), count, tags=tags) def process_report(self, data): report", "in queue_items] headers = { \"Content-Encoding\": \"gzip\", \"Content-Type\": \"application/json\", \"User-Agent\":", "+= malformed_obs.get(name, 0) metrics[api_key][\"report_upload\"] += 1 if any_data: positions.append((report[\"lat\"], report[\"lon\"]))", "queue queue = self.task.app.data_queues[queue_id] queue.enqueue(values, pipe=pipe) def emit_metrics(self, api_keys_known, metrics):", "cell_id = (\"cellTowers\", \"cell\") cell_map = [ (\"radioType\", \"radio\"), (\"mobileCountryCode\",", "sqlalchemy.exc.InternalError) transform = InternalTransform() def send(self, queue_items): api_keys = set()", "{} items = [] for item in queue_items: # preprocess", "exception for bad responses # this causes the task to", "= 0 observations[name] = {} if data.get(name): for item in", ").fetchall() for row in rows: api_keys_known.add(row.valid_key) positions = [] observations", "from urllib.parse import urlparse import uuid import boto3 import boto3.exceptions", "not None: grids.add(DataMap.scale(lat, lon)) shards = defaultdict(set) for lat, lon", "_map_content_enabled from ichnaea.models import ( ApiKey, BlueObservation, BlueReport, BlueShard, CellObservation,", "import encode_datamap_grid from ichnaea import util WHITESPACE = re.compile(r\"\\s\", flags=re.UNICODE)", "\"report\": suffix = \"report\" tags = api_tag else: suffix =", "for spec in field_map: if isinstance(spec, tuple): source, target =", "= {} for spec in field_map: if isinstance(spec, tuple): source,", "*_map maps fields inside the section from source to target", "as session: # limit database session to get API keys", "api_key=api_key, year=year, month=month, day=day ) obj_name += uuid.uuid1().hex + \".json.gz\"", "\"altitude_accuracy\"), \"heading\", \"pressure\", \"speed\", \"source\", ] blue_id = (\"bluetoothBeacons\", \"blue\")", "their data. It distributes the data into the configured export", "\"mnc\"), (\"locationAreaCode\", \"lac\"), (\"cellId\", \"cid\"), \"age\", \"asu\", (\"primaryScramblingCode\", \"psc\"), \"serving\",", "redis.exceptions import requests import requests.exceptions from sqlalchemy import select import", "GeosubmitExporter, \"internal\": InternalExporter, \"s3\": S3Exporter, } exporter_type = exporter_types.get(config.schema) if", "select([columns.valid_key]).where(columns.valid_key.in_(keys)) ).fetchall() for row in rows: api_keys_known.add(row.valid_key) positions = []", "if keys: columns = ApiKey.__table__.c rows = session.execute( select([columns.valid_key]).where(columns.valid_key.in_(keys)) ).fetchall()", "type_, action = name.split(\"_\") if type_ == \"report\": suffix =", "metrics[api_key][\"report_upload\"] += 1 if any_data: positions.append((report[\"lat\"], report[\"lon\"])) else: metrics[api_key][\"report_drop\"] +=", "code and external transfers (backup, forward to partners) to the", "= self.process_report(report) any_data = False for name in (\"blue\", \"cell\",", "from ichnaea.models.content import encode_datamap_grid from ichnaea import util WHITESPACE =", "= urlparse(self.config.url)[:3] # s3 key names start without a leading", "_parse_list(self, item, report, key_map, field_map): values = [] for value_item", "markus.get_metrics() class IncomingQueue(object): \"\"\" The incoming queue contains the data", "lon in positions: if lat is not None and lon", "source): queue_key = config.queue_key(api_key, source) queue = config.queue(queue_key, redis_client) queue.enqueue(items,", "target = spec source_value = item_source.get(source) if source_value is not", "= defaultdict(set) for lat, lon in grids: shards[DataMap.shard_id(lat, lon)].add(encode_datamap_grid(lat, lon))", "month=month, day=day ) obj_name += uuid.uuid1().hex + \".json.gz\" try: data", "be ready for processing. for queue_key in config.partitions(redis_client): queue =", "pipe, positions): grids = set() for lat, lon in positions:", "in items: api_key = item[\"api_key\"] report = item[\"report\"] obs, malformed_obs", "1 continue # combine general and specific report data into", "export_configs = ExportConfig.all(session) with self.task.redis_pipeline() as pipe: for (api_key, source),", "= [] for item in queue_items: # preprocess items and", "# strip away queue prefix again parts = self.queue_key.split(\":\") source", "item_source: value = self._map_dict(item_source, field_map) if value: if key_map[1] is", "record in report.get(type_, ()): record[\"age\"] = record.get(\"age\", 0) - gps_age", "observations = {} for name, report_cls, obs_cls in ( (\"blue\",", "@staticmethod def export(task, name, queue_key): with task.db_session(commit=False) as session: config", "InternalTransform(object): \"\"\" This maps the geosubmit v2 schema used in", "= {} for name, report_cls, obs_cls in ( (\"blue\", BlueReport,", "old enough data to be ready for processing. for queue_key", "report.get(type_, ()): record[\"age\"] = record.get(\"age\", 0) - gps_age if blues", "item_obs.unique_key # if we have better data for the same", "BlueShard, \"mac\", \"update_blue_\"), (\"cell\", CellShard, \"cellid\", \"update_cell_\"), (\"wifi\", WifiShard, \"mac\",", "defaultdict(list) for obs in observations[datatype]: # group by sharded queue", "(\"mobileCountryCode\", \"mcc\"), (\"mobileNetworkCode\", \"mnc\"), (\"locationAreaCode\", \"lac\"), (\"cellId\", \"cid\"), \"age\", \"asu\",", "(\"report\", \"blue\", \"cell\", \"wifi\"): for action in (\"drop\", \"upload\"): metrics[api_key][\"%s_%s\"", "for type_ in (\"blue\", \"cell\", \"wifi\"): for record in report.get(type_,", "= [ (\"radioType\", \"radio\"), (\"mobileCountryCode\", \"mcc\"), (\"mobileNetworkCode\", \"mnc\"), (\"locationAreaCode\", \"lac\"),", "tags = [\"type:%s\" % type_] + api_tag METRICS.incr(\"data.%s.%s\" % (suffix,", "= self._map_dict(value_item, field_map) if value: values.append(value) if values: report[key_map[1]] =", "in export_configs: # Check all queues if they now contain", "in export_configs: if config.allowed(api_key, source): queue_key = config.queue_key(api_key, source) queue", "= self.task.redis_client data_queue = self.task.app.data_queues[\"update_incoming\"] data = data_queue.dequeue() grouped =", "queue_observations(self, pipe, observations): for datatype, shard_model, shard_key, queue_prefix in (", "collected in the web application. It is the single entrypoint", "[] if api_key and api_key in api_keys_known: api_tag = [\"key:%s\"", "action)] = 0 with self.task.db_session(commit=False) as session: # limit database", "item in queue_items] headers = { \"Content-Encoding\": \"gzip\", \"Content-Type\": \"application/json\",", "{} gps_age = position.get(\"age\", 0) timestamp = item.get(\"timestamp\") if timestamp:", "be re-tried METRICS.incr( \"data.export.upload\", tags=self.stats_tags + [\"status:%s\" % response.status_code], )", "uuid.uuid1().hex + \".json.gz\" try: data = util.encode_gzip( json.dumps({\"items\": reports}).encode(), compresslevel=7", "requests.exceptions.RequestException) def send(self, queue_items): # ignore metadata reports = [item[\"report\"]", "config.partitions(redis_client): queue = config.queue(queue_key, redis_client) if queue.ready(): export_task.delay(config.name, queue_key) if", "[], \"cell\": [], \"wifi\": []} for item in items: api_key", "in ( (\"blue\", BlueShard, \"mac\", \"update_blue_\"), (\"cell\", CellShard, \"cellid\", \"update_cell_\"),", "data = util.encode_gzip( json.dumps({\"items\": reports}).encode(), compresslevel=7 ) s3 = boto3.resource(\"s3\")", "= (\"wifiAccessPoints\", \"wifi\") wifi_map = [ (\"macAddress\", \"mac\"), \"age\", \"channel\",", "timestamp report[\"timestamp\"] = timestamp - gps_age if gps_age: # Normalize", ") # log upload_status and trigger exception for bad responses", "queue shard_id = shard_model.shard_id(getattr(obs, shard_key)) queue_id = queue_prefix + shard_id", "def send(self, queue_items): pass class GeosubmitExporter(ReportExporter): _retriable = (IOError, requests.exceptions.RequestException)", "positions): grids = set() for lat, lon in positions: if", "= { \"dummy\": DummyExporter, \"geosubmit\": GeosubmitExporter, \"internal\": InternalExporter, \"s3\": S3Exporter,", "contain enough data or # old enough data to be", "= util.encode_gzip( json.dumps({\"items\": reports}).encode(), compresslevel=7 ) s3 = boto3.resource(\"s3\") bucket", "= values return values def __call__(self, item): report = {}", "config.queue_key(api_key, source) queue = config.queue(queue_key, redis_client) queue.enqueue(items, pipe=pipe) for config", "shard_model, shard_key, queue_prefix in ( (\"blue\", BlueShard, \"mac\", \"update_blue_\"), (\"cell\",", "positions.append((report[\"lat\"], report[\"lon\"])) else: metrics[api_key][\"report_drop\"] += 1 with self.task.redis_pipeline() as pipe:", "None: report.update(value) else: report[key_map[1]] = value return value def _parse_list(self,", "json.dumps({\"items\": reports}).encode(), compresslevel=7 ) s3 = boto3.resource(\"s3\") bucket = s3.Bucket(bucketname)", "tags=self.stats_tags): self.send(queue_items) success = True except self._retriable: success = False", "better data for the same key, ignore existing = observations[name].get(item_key)", "queue_key): self.task = task self.config = config self.queue_key = queue_key", "+= 1 with self.task.redis_pipeline() as pipe: self.queue_observations(pipe, observations) if _map_content_enabled", "= set() api_keys_known = set() metrics = {} items =", "get API keys keys = [key for key in api_keys", "async export task to process the data in each queue.", "= boto3.resource(\"s3\") bucket = s3.Bucket(bucketname) obj = bucket.Object(obj_name) obj.put(Body=data, ContentEncoding=\"gzip\",", "in our own database models. \"\"\" # *_id maps a", "# *_id maps a source section id to a target", "or {} gps_age = position.get(\"age\", 0) timestamp = item.get(\"timestamp\") if", "self._parse_list(item, report, self.cell_id, self.cell_map) wifis = self._parse_list(item, report, self.wifi_id, self.wifi_map)", "} exporter_type = exporter_types.get(config.schema) if exporter_type is not None: exporter_type(task,", "import boto3.exceptions import botocore.exceptions import markus import redis.exceptions import requests", "self._parse_list(item, report, self.blue_id, self.blue_map) cells = self._parse_list(item, report, self.cell_id, self.cell_map)", "[key for key in api_keys if key] if keys: columns", "any_data = True metrics[api_key][name + \"_drop\"] += malformed_obs.get(name, 0) metrics[api_key][\"report_upload\"]", "item[\"report\"]: items.append(item) api_keys.add(item[\"api_key\"]) for api_key in api_keys: metrics[api_key] = {}", "self._parse_list(item, report, self.wifi_id, self.wifi_map) position = item.get(\"position\") or {} gps_age", "items: api_key = item[\"api_key\"] report = item[\"report\"] obs, malformed_obs =", "def _parse_list(self, item, report, key_map, field_map): values = [] for", "requests.exceptions from sqlalchemy import select import sqlalchemy.exc from ichnaea.data import", "all other data pipelines get their data. It distributes the", "\"psc\"), \"serving\", (\"signalStrength\", \"signal\"), (\"timingAdvance\", \"ta\"), ] wifi_id = (\"wifiAccessPoints\",", "for the same key, ignore existing = observations[name].get(item_key) if existing", "\"gnss\"))].append( {\"api_key\": item[\"api_key\"], \"report\": item[\"report\"]} ) with self.task.db_session(commit=False) as session:", "self.position_map) blues = self._parse_list(item, report, self.blue_id, self.blue_map) cells = self._parse_list(item,", "is None: return ({}, {}) malformed = {} observations =", "self.position_id, self.position_map) blues = self._parse_list(item, report, self.blue_id, self.blue_map) cells =", "other data pipelines get their data. It distributes the data", "\"age\", \"channel\", \"frequency\", (\"radioType\", \"radio\"), (\"signalToNoiseRatio\", \"snr\"), (\"signalStrength\", \"signal\"), ]", "grids.add(DataMap.scale(lat, lon)) shards = defaultdict(set) for lat, lon in grids:", "__call__(self, item): report = {} self._parse_dict(item, report, self.position_id, self.position_map) blues", "is not None: exporter_type(task, config, queue_key)() def __call__(self): queue_items =", "rows = session.execute( select([columns.valid_key]).where(columns.valid_key.in_(keys)) ).fetchall() for row in rows: api_keys_known.add(row.valid_key)", "enqueue values for each queue queue = self.task.app.data_queues[queue_id] queue.enqueue(values, pipe=pipe)", "section from source to target id # if the names", "they contain enough or old enough data schedules an async", "self.blue_map) cells = self._parse_list(item, report, self.cell_id, self.cell_map) wifis = self._parse_list(item,", "process_datamap(self, pipe, positions): grids = set() for lat, lon in", "METRICS.incr( \"data.export.upload\", tags=self.stats_tags + [\"status:%s\" % response.status_code], ) response.raise_for_status() class", "lat is not None and lon is not None: grids.add(DataMap.scale(lat,", "in report.get(type_, ()): record[\"age\"] = record.get(\"age\", 0) - gps_age if", "import requests import requests.exceptions from sqlalchemy import select import sqlalchemy.exc", "= config.queue(queue_key, redis_client) queue.enqueue(items, pipe=pipe) for config in export_configs: #", "value def _parse_dict(self, item, report, key_map, field_map): value = {}" ]
[ "= 16384 num_batches = 1 data_source = \"./dcn_data/file_list_test.txt\" inference_params =", "= hugectr.DataReaderType_t.Norm, check_type = hugectr.Check_t.Sum) grount_truth = np.loadtxt(\"/dump_infer/dcn_pred_2000\") diff =", "False, scaler = 1.0, use_cuda_graph = True, metrics_spec = {hugectr.MetricsType.AUC:", "[hugectr.DataReaderSparseParam(\"data1\", 2, False, 26)])) model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash, workspace_size_per_gpu_in_mb = 300,", "True) inference_session = CreateInferenceSession(\"/dump_infer/dcn.json\", inference_params) predictions = inference_session.predict(num_batches = num_batches,", "hugectr.Layer_t.Concat, bottom_names = [\"dropout2\", \"multicross1\"], top_names = [\"concat2\"])) model.add(hugectr.DenseLayer(layer_type =", "max_eval_batches = 1, batchsize_eval = 16384, batchsize = 16384, lr", "= \"data1\", optimizer = optimizer)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape, bottom_names =", "use_mixed_precision = False, use_cuda_graph = True) inference_session = CreateInferenceSession(\"/dump_infer/dcn.json\", inference_params)", "\"/dump_infer/dcn\") model.export_predictions(\"/dump_infer/dcn_pred_\" + str(2000), \"/dump_infer/dcn_label_\" + str(2000)) from hugectr.inference import", "= hugectr.Layer_t.ReLU, bottom_names = [\"fc1\"], top_names = [\"relu1\"])) model.add(hugectr.DenseLayer(layer_type =", "between DCN multi hot inference and training: {}\".format(mse)) sys.exit(1) else:", "inference_session = CreateInferenceSession(\"/dump_infer/dcn.json\", inference_params) predictions = inference_session.predict(num_batches = num_batches, source", "16, combiner = \"sum\", sparse_embedding_name = \"sparse_embedding1\", bottom_name = \"data1\",", "as np batch_size = 16384 num_batches = 1 data_source =", "False, cache_size_percentage = 1.0, i64_input_key = False, use_mixed_precision = False,", "raise RuntimeError(\"Too large mse between DCN multi hot inference and", "MPI solver = hugectr.CreateSolver(model_name = \"dcn\", max_eval_batches = 1, batchsize_eval", "= [\"fc1\"], top_names = [\"relu1\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout, bottom_names =", "= True, use_mixed_precision = False, scaler = 1.0, use_cuda_graph =", "+ str(2000), \"/dump_infer/dcn_label_\" + str(2000)) from hugectr.inference import InferenceParams, CreateInferenceSession", "0.9, beta2 = 0.999, epsilon = 0.0001) model = hugectr.Model(solver,", "hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm, source = [\"./dcn_data/file_list.txt\"], eval_source = \"./dcn_data/file_list_test.txt\", check_type", "dense_dim = 13, dense_name = \"dense\", data_reader_sparse_param_array = [hugectr.DataReaderSparseParam(\"data1\", 2,", "ranges=[(0,429),(0,429)])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.MultiCross, bottom_names = [\"slice11\"], top_names = [\"multicross1\"],", "[\"fc3\", \"label\"], top_names = [\"loss\"])) model.compile() model.summary() model.graph_to_json(graph_config_file = \"/dump_infer/dcn.json\")", "= 1, batchsize_eval = 16384, batchsize = 16384, lr =", "num_output=1)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss, bottom_names = [\"fc3\", \"label\"], top_names =", "1.0, dense_model_file = \"/dump_infer/dcn_dense_2000.model\", sparse_model_files = [\"/dump_infer/dcn0_sparse_2000.model\"], device_id = 0,", "optimizer = optimizer)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape, bottom_names = [\"sparse_embedding1\"], top_names", "= \"label\", dense_dim = 13, dense_name = \"dense\", data_reader_sparse_param_array =", "sparse_model_files = [\"/dump_infer/dcn0_sparse_2000.model\"], device_id = 0, use_gpu_embedding_cache = False, cache_size_percentage", "model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice, bottom_names = [\"concat1\"], top_names = [\"slice11\", \"slice12\"],", "bottom_names = [\"concat1\"], top_names = [\"slice11\", \"slice12\"], ranges=[(0,429),(0,429)])) model.add(hugectr.DenseLayer(layer_type =", "[\"relu1\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout, bottom_names = [\"relu1\"], top_names = [\"dropout1\"],", "batchsize = 16384, lr = 0.001, vvgpu = [[0]], repeat_dataset", "\"data1\", optimizer = optimizer)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape, bottom_names = [\"sparse_embedding1\"],", "\"sum\", sparse_embedding_name = \"sparse_embedding1\", bottom_name = \"data1\", optimizer = optimizer))", "bottom_names = [\"sparse_embedding1\"], top_names = [\"reshape1\"], leading_dim=416)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,", "model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape, bottom_names = [\"sparse_embedding1\"], top_names = [\"reshape1\"], leading_dim=416))", "model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names = [\"dropout1\"], top_names = [\"fc2\"], num_output=1024))", "\"sparse_embedding1\", bottom_name = \"data1\", optimizer = optimizer)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,", "hugectr.Layer_t.Concat, bottom_names = [\"reshape1\", \"dense\"], top_names = [\"concat1\"])) model.add(hugectr.DenseLayer(layer_type =", "False, 26)])) model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash, workspace_size_per_gpu_in_mb = 300, embedding_vec_size =", "top_names = [\"concat1\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice, bottom_names = [\"concat1\"], top_names", "[\"slice11\"], top_names = [\"multicross1\"], num_layers=1)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names =", "= \"dcn\", max_batchsize = batch_size, hit_rate_threshold = 1.0, dense_model_file =", "model.export_predictions(\"/dump_infer/dcn_pred_\" + str(2000), \"/dump_infer/dcn_label_\" + str(2000)) from hugectr.inference import InferenceParams,", "InferenceParams(model_name = \"dcn\", max_batchsize = batch_size, hit_rate_threshold = 1.0, dense_model_file", "source = [\"./dcn_data/file_list.txt\"], eval_source = \"./dcn_data/file_list_test.txt\", check_type = hugectr.Check_t.Sum, num_workers", "bottom_names = [\"fc2\"], top_names = [\"relu2\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout, bottom_names", "= 2300, display = 200, eval_interval = 2000, snapshot =", "hugectr.Model(solver, reader, optimizer) model.add(hugectr.Input(label_dim = 1, label_name = \"label\", dense_dim", "= [\"concat2\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names = [\"concat2\"], top_names =", "\"dense\"], top_names = [\"concat1\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice, bottom_names = [\"concat1\"],", "print(\"DCN multi hot inference results are consistent with those during", "RuntimeError(\"Too large mse between DCN multi hot inference and training:", "\"multicross1\"], top_names = [\"concat2\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names = [\"concat2\"],", "1 data_source = \"./dcn_data/file_list_test.txt\" inference_params = InferenceParams(model_name = \"dcn\", max_batchsize", "training: {}\".format(mse)) sys.exit(1) else: print(\"DCN multi hot inference results are", "= hugectr.Layer_t.InnerProduct, bottom_names = [\"dropout1\"], top_names = [\"fc2\"], num_output=1024)) model.add(hugectr.DenseLayer(layer_type", "model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout, bottom_names = [\"relu1\"], top_names = [\"dropout1\"], dropout_rate=0.5))", "= [\"slice11\", \"slice12\"], ranges=[(0,429),(0,429)])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.MultiCross, bottom_names = [\"slice11\"],", "16384, batchsize = 16384, lr = 0.001, vvgpu = [[0]],", "= \"./dcn_data/file_list_test.txt\", check_type = hugectr.Check_t.Sum, num_workers = 16) optimizer =", "hugectr.Layer_t.Dropout, bottom_names = [\"relu2\"], top_names = [\"dropout2\"], dropout_rate=0.5)) model.add(hugectr.DenseLayer(layer_type =", "import numpy as np batch_size = 16384 num_batches = 1", "model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat, bottom_names = [\"reshape1\", \"dense\"], top_names = [\"concat1\"]))", "\"label\"], top_names = [\"loss\"])) model.compile() model.summary() model.graph_to_json(graph_config_file = \"/dump_infer/dcn.json\") model.fit(max_iter", "use_mixed_precision = False, scaler = 1.0, use_cuda_graph = True, metrics_spec", "solver = hugectr.CreateSolver(model_name = \"dcn\", max_eval_batches = 1, batchsize_eval =", "top_names = [\"multicross1\"], num_layers=1)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names = [\"slice12\"],", "hugectr.Check_t.Sum, num_workers = 16) optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam, update_type", "workspace_size_per_gpu_in_mb = 300, embedding_vec_size = 16, combiner = \"sum\", sparse_embedding_name", "= 2000, snapshot_prefix = \"/dump_infer/dcn\") model.export_predictions(\"/dump_infer/dcn_pred_\" + str(2000), \"/dump_infer/dcn_label_\" +", "top_names = [\"dropout2\"], dropout_rate=0.5)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat, bottom_names = [\"dropout2\",", "2300, display = 200, eval_interval = 2000, snapshot = 2000,", "batch_size, hit_rate_threshold = 1.0, dense_model_file = \"/dump_infer/dcn_dense_2000.model\", sparse_model_files = [\"/dump_infer/dcn0_sparse_2000.model\"],", "26)])) model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash, workspace_size_per_gpu_in_mb = 300, embedding_vec_size = 16,", "= \"dense\", data_reader_sparse_param_array = [hugectr.DataReaderSparseParam(\"data1\", 2, False, 26)])) model.add(hugectr.SparseEmbedding(embedding_type =", "[\"./dcn_data/file_list.txt\"], eval_source = \"./dcn_data/file_list_test.txt\", check_type = hugectr.Check_t.Sum, num_workers = 16)", "multi hot inference results are consistent with those during training,", "dense_name = \"dense\", data_reader_sparse_param_array = [hugectr.DataReaderSparseParam(\"data1\", 2, False, 26)])) model.add(hugectr.SparseEmbedding(embedding_type", "= [\"concat1\"], top_names = [\"slice11\", \"slice12\"], ranges=[(0,429),(0,429)])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.MultiCross,", "max_batchsize = batch_size, hit_rate_threshold = 1.0, dense_model_file = \"/dump_infer/dcn_dense_2000.model\", sparse_model_files", "= 1.0, dense_model_file = \"/dump_infer/dcn_dense_2000.model\", sparse_model_files = [\"/dump_infer/dcn0_sparse_2000.model\"], device_id =", "{}\".format(mse)) sys.exit(1) else: print(\"DCN multi hot inference results are consistent", "inference_params = InferenceParams(model_name = \"dcn\", max_batchsize = batch_size, hit_rate_threshold =", "sys.exit(1) else: print(\"DCN multi hot inference results are consistent with", "= [\"dropout1\"], dropout_rate=0.5)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names = [\"dropout1\"], top_names", "bottom_names = [\"dropout2\", \"multicross1\"], top_names = [\"concat2\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,", "check_type = hugectr.Check_t.Sum) grount_truth = np.loadtxt(\"/dump_infer/dcn_pred_2000\") diff = predictions-grount_truth mse", "[\"dropout2\"], dropout_rate=0.5)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat, bottom_names = [\"dropout2\", \"multicross1\"], top_names", "top_names = [\"fc3\"], num_output=1)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss, bottom_names = [\"fc3\",", "snapshot = 2000, snapshot_prefix = \"/dump_infer/dcn\") model.export_predictions(\"/dump_infer/dcn_pred_\" + str(2000), \"/dump_infer/dcn_label_\"", "dense_model_file = \"/dump_infer/dcn_dense_2000.model\", sparse_model_files = [\"/dump_infer/dcn0_sparse_2000.model\"], device_id = 0, use_gpu_embedding_cache", "mse between DCN multi hot inference and training: {}\".format(mse)) sys.exit(1)", "= [\"loss\"])) model.compile() model.summary() model.graph_to_json(graph_config_file = \"/dump_infer/dcn.json\") model.fit(max_iter = 2300,", "hugectr.CreateSolver(model_name = \"dcn\", max_eval_batches = 1, batchsize_eval = 16384, batchsize", "= hugectr.DataReaderType_t.Norm, source = [\"./dcn_data/file_list.txt\"], eval_source = \"./dcn_data/file_list_test.txt\", check_type =", "bottom_names = [\"concat2\"], top_names = [\"fc3\"], num_output=1)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss,", "[\"fc2\"], top_names = [\"relu2\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout, bottom_names = [\"relu2\"],", "= inference_session.predict(num_batches = num_batches, source = data_source, data_reader_type = hugectr.DataReaderType_t.Norm,", "model.fit(max_iter = 2300, display = 200, eval_interval = 2000, snapshot", "update_type = hugectr.Update_t.Global, beta1 = 0.9, beta2 = 0.999, epsilon", "model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.MultiCross, bottom_names = [\"slice11\"], top_names = [\"multicross1\"], num_layers=1))", "str(2000)) from hugectr.inference import InferenceParams, CreateInferenceSession import numpy as np", "[\"sparse_embedding1\"], top_names = [\"reshape1\"], leading_dim=416)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat, bottom_names =", "= [\"dropout2\", \"multicross1\"], top_names = [\"concat2\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names", "= 16, combiner = \"sum\", sparse_embedding_name = \"sparse_embedding1\", bottom_name =", "= [\"/dump_infer/dcn0_sparse_2000.model\"], device_id = 0, use_gpu_embedding_cache = False, cache_size_percentage =", "= hugectr.Layer_t.Concat, bottom_names = [\"dropout2\", \"multicross1\"], top_names = [\"concat2\"])) model.add(hugectr.DenseLayer(layer_type", "bottom_name = \"data1\", optimizer = optimizer)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape, bottom_names", "data_reader_sparse_param_array = [hugectr.DataReaderSparseParam(\"data1\", 2, False, 26)])) model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash, workspace_size_per_gpu_in_mb", "= 1.0, use_cuda_graph = True, metrics_spec = {hugectr.MetricsType.AUC: 1.0}) reader", "\"dense\", data_reader_sparse_param_array = [hugectr.DataReaderSparseParam(\"data1\", 2, False, 26)])) model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,", "= 16384, batchsize = 16384, lr = 0.001, vvgpu =", "import hugectr from mpi4py import MPI solver = hugectr.CreateSolver(model_name =", "= optimizer)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape, bottom_names = [\"sparse_embedding1\"], top_names =", "hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash, workspace_size_per_gpu_in_mb = 300, embedding_vec_size = 16, combiner = \"sum\",", "hugectr.DataReaderType_t.Norm, check_type = hugectr.Check_t.Sum) grount_truth = np.loadtxt(\"/dump_infer/dcn_pred_2000\") diff = predictions-grount_truth", "= hugectr.Layer_t.Slice, bottom_names = [\"concat1\"], top_names = [\"slice11\", \"slice12\"], ranges=[(0,429),(0,429)]))", "= [\"multicross1\"], num_layers=1)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names = [\"slice12\"], top_names", "use_cuda_graph = True, metrics_spec = {hugectr.MetricsType.AUC: 1.0}) reader = hugectr.DataReaderParams(data_reader_type", "else: print(\"DCN multi hot inference results are consistent with those", "16384, lr = 0.001, vvgpu = [[0]], repeat_dataset = True,", "hugectr.Layer_t.Reshape, bottom_names = [\"sparse_embedding1\"], top_names = [\"reshape1\"], leading_dim=416)) model.add(hugectr.DenseLayer(layer_type =", "and training: {}\".format(mse)) sys.exit(1) else: print(\"DCN multi hot inference results", "= 0.999, epsilon = 0.0001) model = hugectr.Model(solver, reader, optimizer)", "= num_batches, source = data_source, data_reader_type = hugectr.DataReaderType_t.Norm, check_type =", "model.add(hugectr.Input(label_dim = 1, label_name = \"label\", dense_dim = 13, dense_name", "use_cuda_graph = True) inference_session = CreateInferenceSession(\"/dump_infer/dcn.json\", inference_params) predictions = inference_session.predict(num_batches", "hugectr.DataReaderType_t.Norm, source = [\"./dcn_data/file_list.txt\"], eval_source = \"./dcn_data/file_list_test.txt\", check_type = hugectr.Check_t.Sum,", "= [\"dropout1\"], top_names = [\"fc2\"], num_output=1024)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU, bottom_names", "\"/dump_infer/dcn_label_\" + str(2000)) from hugectr.inference import InferenceParams, CreateInferenceSession import numpy", "[\"dropout1\"], top_names = [\"fc2\"], num_output=1024)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU, bottom_names =", "[\"reshape1\", \"dense\"], top_names = [\"concat1\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice, bottom_names =", "0.001, vvgpu = [[0]], repeat_dataset = True, use_mixed_precision = False,", "= CreateInferenceSession(\"/dump_infer/dcn.json\", inference_params) predictions = inference_session.predict(num_batches = num_batches, source =", "hugectr.Update_t.Global, beta1 = 0.9, beta2 = 0.999, epsilon = 0.0001)", "mse > 1e-3: raise RuntimeError(\"Too large mse between DCN multi", "source = data_source, data_reader_type = hugectr.DataReaderType_t.Norm, check_type = hugectr.Check_t.Sum) grount_truth", "= hugectr.Layer_t.Reshape, bottom_names = [\"sparse_embedding1\"], top_names = [\"reshape1\"], leading_dim=416)) model.add(hugectr.DenseLayer(layer_type", "hugectr.Layer_t.ReLU, bottom_names = [\"fc2\"], top_names = [\"relu2\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout,", "= 0.9, beta2 = 0.999, epsilon = 0.0001) model =", "= [hugectr.DataReaderSparseParam(\"data1\", 2, False, 26)])) model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash, workspace_size_per_gpu_in_mb =", "model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout, bottom_names = [\"relu2\"], top_names = [\"dropout2\"], dropout_rate=0.5))", "embedding_vec_size = 16, combiner = \"sum\", sparse_embedding_name = \"sparse_embedding1\", bottom_name", "16384 num_batches = 1 data_source = \"./dcn_data/file_list_test.txt\" inference_params = InferenceParams(model_name", "diff = predictions-grount_truth mse = np.mean(diff*diff) if mse > 1e-3:", "= 1, label_name = \"label\", dense_dim = 13, dense_name =", "= np.mean(diff*diff) if mse > 1e-3: raise RuntimeError(\"Too large mse", "large mse between DCN multi hot inference and training: {}\".format(mse))", "= hugectr.Optimizer_t.Adam, update_type = hugectr.Update_t.Global, beta1 = 0.9, beta2 =", "\"/dump_infer/dcn_dense_2000.model\", sparse_model_files = [\"/dump_infer/dcn0_sparse_2000.model\"], device_id = 0, use_gpu_embedding_cache = False,", "= hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm, source = [\"./dcn_data/file_list.txt\"], eval_source = \"./dcn_data/file_list_test.txt\",", "top_names = [\"reshape1\"], leading_dim=416)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat, bottom_names = [\"reshape1\",", "hot inference and training: {}\".format(mse)) sys.exit(1) else: print(\"DCN multi hot", "[\"concat1\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice, bottom_names = [\"concat1\"], top_names = [\"slice11\",", "300, embedding_vec_size = 16, combiner = \"sum\", sparse_embedding_name = \"sparse_embedding1\",", "hugectr.Check_t.Sum) grount_truth = np.loadtxt(\"/dump_infer/dcn_pred_2000\") diff = predictions-grount_truth mse = np.mean(diff*diff)", "model.summary() model.graph_to_json(graph_config_file = \"/dump_infer/dcn.json\") model.fit(max_iter = 2300, display = 200,", "top_names = [\"loss\"])) model.compile() model.summary() model.graph_to_json(graph_config_file = \"/dump_infer/dcn.json\") model.fit(max_iter =", "bottom_names = [\"slice12\"], top_names = [\"fc1\"], num_output=1024)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,", "[\"slice12\"], top_names = [\"fc1\"], num_output=1024)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU, bottom_names =", "model.compile() model.summary() model.graph_to_json(graph_config_file = \"/dump_infer/dcn.json\") model.fit(max_iter = 2300, display =", "use_gpu_embedding_cache = False, cache_size_percentage = 1.0, i64_input_key = False, use_mixed_precision", "= hugectr.Layer_t.MultiCross, bottom_names = [\"slice11\"], top_names = [\"multicross1\"], num_layers=1)) model.add(hugectr.DenseLayer(layer_type", "top_names = [\"fc1\"], num_output=1024)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU, bottom_names = [\"fc1\"],", "= hugectr.Layer_t.Dropout, bottom_names = [\"relu1\"], top_names = [\"dropout1\"], dropout_rate=0.5)) model.add(hugectr.DenseLayer(layer_type", "= [\"./dcn_data/file_list.txt\"], eval_source = \"./dcn_data/file_list_test.txt\", check_type = hugectr.Check_t.Sum, num_workers =", "= hugectr.CreateSolver(model_name = \"dcn\", max_eval_batches = 1, batchsize_eval = 16384,", "reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm, source = [\"./dcn_data/file_list.txt\"], eval_source =", "= [\"fc2\"], top_names = [\"relu2\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout, bottom_names =", "= [\"relu2\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout, bottom_names = [\"relu2\"], top_names =", "np batch_size = 16384 num_batches = 1 data_source = \"./dcn_data/file_list_test.txt\"", "= 16) optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam, update_type = hugectr.Update_t.Global,", "scaler = 1.0, use_cuda_graph = True, metrics_spec = {hugectr.MetricsType.AUC: 1.0})", "0.0001) model = hugectr.Model(solver, reader, optimizer) model.add(hugectr.Input(label_dim = 1, label_name", "hugectr.Layer_t.BinaryCrossEntropyLoss, bottom_names = [\"fc3\", \"label\"], top_names = [\"loss\"])) model.compile() model.summary()", "= False, cache_size_percentage = 1.0, i64_input_key = False, use_mixed_precision =", "num_batches = 1 data_source = \"./dcn_data/file_list_test.txt\" inference_params = InferenceParams(model_name =", "= \"sum\", sparse_embedding_name = \"sparse_embedding1\", bottom_name = \"data1\", optimizer =", "[\"reshape1\"], leading_dim=416)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat, bottom_names = [\"reshape1\", \"dense\"], top_names", "data_source, data_reader_type = hugectr.DataReaderType_t.Norm, check_type = hugectr.Check_t.Sum) grount_truth = np.loadtxt(\"/dump_infer/dcn_pred_2000\")", "> 1e-3: raise RuntimeError(\"Too large mse between DCN multi hot", "= hugectr.Layer_t.ReLU, bottom_names = [\"fc2\"], top_names = [\"relu2\"])) model.add(hugectr.DenseLayer(layer_type =", "[\"fc2\"], num_output=1024)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU, bottom_names = [\"fc2\"], top_names =", "= [\"fc3\", \"label\"], top_names = [\"loss\"])) model.compile() model.summary() model.graph_to_json(graph_config_file =", "2000, snapshot = 2000, snapshot_prefix = \"/dump_infer/dcn\") model.export_predictions(\"/dump_infer/dcn_pred_\" + str(2000),", "= hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash, workspace_size_per_gpu_in_mb = 300, embedding_vec_size = 16, combiner =", "= True, metrics_spec = {hugectr.MetricsType.AUC: 1.0}) reader = hugectr.DataReaderParams(data_reader_type =", "np.loadtxt(\"/dump_infer/dcn_pred_2000\") diff = predictions-grount_truth mse = np.mean(diff*diff) if mse >", "[\"dropout2\", \"multicross1\"], top_names = [\"concat2\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names =", "leading_dim=416)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat, bottom_names = [\"reshape1\", \"dense\"], top_names =", "2000, snapshot_prefix = \"/dump_infer/dcn\") model.export_predictions(\"/dump_infer/dcn_pred_\" + str(2000), \"/dump_infer/dcn_label_\" + str(2000))", "= 0.0001) model = hugectr.Model(solver, reader, optimizer) model.add(hugectr.Input(label_dim = 1,", "hugectr.Layer_t.InnerProduct, bottom_names = [\"dropout1\"], top_names = [\"fc2\"], num_output=1024)) model.add(hugectr.DenseLayer(layer_type =", "[[0]], repeat_dataset = True, use_mixed_precision = False, scaler = 1.0,", "2, False, 26)])) model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash, workspace_size_per_gpu_in_mb = 300, embedding_vec_size", "DCN multi hot inference and training: {}\".format(mse)) sys.exit(1) else: print(\"DCN", "= 200, eval_interval = 2000, snapshot = 2000, snapshot_prefix =", "= [\"sparse_embedding1\"], top_names = [\"reshape1\"], leading_dim=416)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat, bottom_names", "hugectr from mpi4py import MPI solver = hugectr.CreateSolver(model_name = \"dcn\",", "= \"./dcn_data/file_list_test.txt\" inference_params = InferenceParams(model_name = \"dcn\", max_batchsize = batch_size,", "top_names = [\"dropout1\"], dropout_rate=0.5)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names = [\"dropout1\"],", "1e-3: raise RuntimeError(\"Too large mse between DCN multi hot inference", "0, use_gpu_embedding_cache = False, cache_size_percentage = 1.0, i64_input_key = False,", "= \"sparse_embedding1\", bottom_name = \"data1\", optimizer = optimizer)) model.add(hugectr.DenseLayer(layer_type =", "bottom_names = [\"relu1\"], top_names = [\"dropout1\"], dropout_rate=0.5)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,", "model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU, bottom_names = [\"fc2\"], top_names = [\"relu2\"])) model.add(hugectr.DenseLayer(layer_type", "= 2000, snapshot = 2000, snapshot_prefix = \"/dump_infer/dcn\") model.export_predictions(\"/dump_infer/dcn_pred_\" +", "\"/dump_infer/dcn.json\") model.fit(max_iter = 2300, display = 200, eval_interval = 2000,", "[\"concat2\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names = [\"concat2\"], top_names = [\"fc3\"],", "[\"relu1\"], top_names = [\"dropout1\"], dropout_rate=0.5)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names =", "= hugectr.Layer_t.InnerProduct, bottom_names = [\"slice12\"], top_names = [\"fc1\"], num_output=1024)) model.add(hugectr.DenseLayer(layer_type", "inference_params) predictions = inference_session.predict(num_batches = num_batches, source = data_source, data_reader_type", "from mpi4py import MPI solver = hugectr.CreateSolver(model_name = \"dcn\", max_eval_batches", "= [\"reshape1\"], leading_dim=416)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat, bottom_names = [\"reshape1\", \"dense\"],", "hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam, update_type = hugectr.Update_t.Global, beta1 = 0.9, beta2", "200, eval_interval = 2000, snapshot = 2000, snapshot_prefix = \"/dump_infer/dcn\")", "import InferenceParams, CreateInferenceSession import numpy as np batch_size = 16384", "= [\"slice11\"], top_names = [\"multicross1\"], num_layers=1)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names", "[\"fc1\"], top_names = [\"relu1\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout, bottom_names = [\"relu1\"],", "reader, optimizer) model.add(hugectr.Input(label_dim = 1, label_name = \"label\", dense_dim =", "True, use_mixed_precision = False, scaler = 1.0, use_cuda_graph = True,", "0.999, epsilon = 0.0001) model = hugectr.Model(solver, reader, optimizer) model.add(hugectr.Input(label_dim", "= \"dcn\", max_eval_batches = 1, batchsize_eval = 16384, batchsize =", "= 16384, lr = 0.001, vvgpu = [[0]], repeat_dataset =", "num_output=1024)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU, bottom_names = [\"fc2\"], top_names = [\"relu2\"]))", "eval_source = \"./dcn_data/file_list_test.txt\", check_type = hugectr.Check_t.Sum, num_workers = 16) optimizer", "bottom_names = [\"fc3\", \"label\"], top_names = [\"loss\"])) model.compile() model.summary() model.graph_to_json(graph_config_file", "from hugectr.inference import InferenceParams, CreateInferenceSession import numpy as np batch_size", "[\"concat2\"], top_names = [\"fc3\"], num_output=1)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss, bottom_names =", "CreateInferenceSession(\"/dump_infer/dcn.json\", inference_params) predictions = inference_session.predict(num_batches = num_batches, source = data_source,", "model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names = [\"slice12\"], top_names = [\"fc1\"], num_output=1024))", "top_names = [\"fc2\"], num_output=1024)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU, bottom_names = [\"fc2\"],", "optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam, update_type = hugectr.Update_t.Global, beta1 =", "= hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam, update_type = hugectr.Update_t.Global, beta1 = 0.9,", "\"slice12\"], ranges=[(0,429),(0,429)])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.MultiCross, bottom_names = [\"slice11\"], top_names =", "model.graph_to_json(graph_config_file = \"/dump_infer/dcn.json\") model.fit(max_iter = 2300, display = 200, eval_interval", "= hugectr.Layer_t.Dropout, bottom_names = [\"relu2\"], top_names = [\"dropout2\"], dropout_rate=0.5)) model.add(hugectr.DenseLayer(layer_type", "1.0, use_cuda_graph = True, metrics_spec = {hugectr.MetricsType.AUC: 1.0}) reader =", "batchsize_eval = 16384, batchsize = 16384, lr = 0.001, vvgpu", "False, use_cuda_graph = True) inference_session = CreateInferenceSession(\"/dump_infer/dcn.json\", inference_params) predictions =", "1, label_name = \"label\", dense_dim = 13, dense_name = \"dense\",", "hugectr.Layer_t.Dropout, bottom_names = [\"relu1\"], top_names = [\"dropout1\"], dropout_rate=0.5)) model.add(hugectr.DenseLayer(layer_type =", "= predictions-grount_truth mse = np.mean(diff*diff) if mse > 1e-3: raise", "\"dcn\", max_eval_batches = 1, batchsize_eval = 16384, batchsize = 16384,", "\"dcn\", max_batchsize = batch_size, hit_rate_threshold = 1.0, dense_model_file = \"/dump_infer/dcn_dense_2000.model\",", "= [\"dropout2\"], dropout_rate=0.5)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat, bottom_names = [\"dropout2\", \"multicross1\"],", "False, use_mixed_precision = False, use_cuda_graph = True) inference_session = CreateInferenceSession(\"/dump_infer/dcn.json\",", "num_output=1024)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU, bottom_names = [\"fc1\"], top_names = [\"relu1\"]))", "str(2000), \"/dump_infer/dcn_label_\" + str(2000)) from hugectr.inference import InferenceParams, CreateInferenceSession import", "True, metrics_spec = {hugectr.MetricsType.AUC: 1.0}) reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm,", "= hugectr.Model(solver, reader, optimizer) model.add(hugectr.Input(label_dim = 1, label_name = \"label\",", "1.0, i64_input_key = False, use_mixed_precision = False, use_cuda_graph = True)", "hit_rate_threshold = 1.0, dense_model_file = \"/dump_infer/dcn_dense_2000.model\", sparse_model_files = [\"/dump_infer/dcn0_sparse_2000.model\"], device_id", "device_id = 0, use_gpu_embedding_cache = False, cache_size_percentage = 1.0, i64_input_key", "= [\"concat2\"], top_names = [\"fc3\"], num_output=1)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss, bottom_names", "model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss, bottom_names = [\"fc3\", \"label\"], top_names = [\"loss\"]))", "model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU, bottom_names = [\"fc1\"], top_names = [\"relu1\"])) model.add(hugectr.DenseLayer(layer_type", "= hugectr.Update_t.Global, beta1 = 0.9, beta2 = 0.999, epsilon =", "predictions-grount_truth mse = np.mean(diff*diff) if mse > 1e-3: raise RuntimeError(\"Too", "= [\"reshape1\", \"dense\"], top_names = [\"concat1\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice, bottom_names", "bottom_names = [\"reshape1\", \"dense\"], top_names = [\"concat1\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,", "dropout_rate=0.5)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names = [\"dropout1\"], top_names = [\"fc2\"],", "= hugectr.Layer_t.InnerProduct, bottom_names = [\"concat2\"], top_names = [\"fc3\"], num_output=1)) model.add(hugectr.DenseLayer(layer_type", "= 13, dense_name = \"dense\", data_reader_sparse_param_array = [hugectr.DataReaderSparseParam(\"data1\", 2, False,", "hugectr.Layer_t.MultiCross, bottom_names = [\"slice11\"], top_names = [\"multicross1\"], num_layers=1)) model.add(hugectr.DenseLayer(layer_type =", "cache_size_percentage = 1.0, i64_input_key = False, use_mixed_precision = False, use_cuda_graph", "batch_size = 16384 num_batches = 1 data_source = \"./dcn_data/file_list_test.txt\" inference_params", "= 1.0, i64_input_key = False, use_mixed_precision = False, use_cuda_graph =", "i64_input_key = False, use_mixed_precision = False, use_cuda_graph = True) inference_session", "mse = np.mean(diff*diff) if mse > 1e-3: raise RuntimeError(\"Too large", "16) optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam, update_type = hugectr.Update_t.Global, beta1", "mpi4py import MPI solver = hugectr.CreateSolver(model_name = \"dcn\", max_eval_batches =", "top_names = [\"relu1\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout, bottom_names = [\"relu1\"], top_names", "= [\"concat1\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice, bottom_names = [\"concat1\"], top_names =", "multi hot inference and training: {}\".format(mse)) sys.exit(1) else: print(\"DCN multi", "vvgpu = [[0]], repeat_dataset = True, use_mixed_precision = False, scaler", "= False, use_mixed_precision = False, use_cuda_graph = True) inference_session =", "[\"dropout1\"], dropout_rate=0.5)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names = [\"dropout1\"], top_names =", "= hugectr.Layer_t.Concat, bottom_names = [\"reshape1\", \"dense\"], top_names = [\"concat1\"])) model.add(hugectr.DenseLayer(layer_type", "[\"/dump_infer/dcn0_sparse_2000.model\"], device_id = 0, use_gpu_embedding_cache = False, cache_size_percentage = 1.0,", "optimizer)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape, bottom_names = [\"sparse_embedding1\"], top_names = [\"reshape1\"],", "top_names = [\"relu2\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout, bottom_names = [\"relu2\"], top_names", "= [\"relu2\"], top_names = [\"dropout2\"], dropout_rate=0.5)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat, bottom_names", "combiner = \"sum\", sparse_embedding_name = \"sparse_embedding1\", bottom_name = \"data1\", optimizer", "\"./dcn_data/file_list_test.txt\" inference_params = InferenceParams(model_name = \"dcn\", max_batchsize = batch_size, hit_rate_threshold", "= [\"slice12\"], top_names = [\"fc1\"], num_output=1024)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU, bottom_names", "[\"relu2\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout, bottom_names = [\"relu2\"], top_names = [\"dropout2\"],", "= 1 data_source = \"./dcn_data/file_list_test.txt\" inference_params = InferenceParams(model_name = \"dcn\",", "= \"/dump_infer/dcn.json\") model.fit(max_iter = 2300, display = 200, eval_interval =", "[\"multicross1\"], num_layers=1)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names = [\"slice12\"], top_names =", "hugectr.Layer_t.Slice, bottom_names = [\"concat1\"], top_names = [\"slice11\", \"slice12\"], ranges=[(0,429),(0,429)])) model.add(hugectr.DenseLayer(layer_type", "1, batchsize_eval = 16384, batchsize = 16384, lr = 0.001,", "= 300, embedding_vec_size = 16, combiner = \"sum\", sparse_embedding_name =", "[\"concat1\"], top_names = [\"slice11\", \"slice12\"], ranges=[(0,429),(0,429)])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.MultiCross, bottom_names", "= InferenceParams(model_name = \"dcn\", max_batchsize = batch_size, hit_rate_threshold = 1.0,", "beta1 = 0.9, beta2 = 0.999, epsilon = 0.0001) model", "data_reader_type = hugectr.DataReaderType_t.Norm, check_type = hugectr.Check_t.Sum) grount_truth = np.loadtxt(\"/dump_infer/dcn_pred_2000\") diff", "[\"loss\"])) model.compile() model.summary() model.graph_to_json(graph_config_file = \"/dump_infer/dcn.json\") model.fit(max_iter = 2300, display", "= data_source, data_reader_type = hugectr.DataReaderType_t.Norm, check_type = hugectr.Check_t.Sum) grount_truth =", "{hugectr.MetricsType.AUC: 1.0}) reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm, source = [\"./dcn_data/file_list.txt\"],", "13, dense_name = \"dense\", data_reader_sparse_param_array = [hugectr.DataReaderSparseParam(\"data1\", 2, False, 26)]))", "= hugectr.Check_t.Sum, num_workers = 16) optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam,", "beta2 = 0.999, epsilon = 0.0001) model = hugectr.Model(solver, reader,", "= \"/dump_infer/dcn_dense_2000.model\", sparse_model_files = [\"/dump_infer/dcn0_sparse_2000.model\"], device_id = 0, use_gpu_embedding_cache =", "bottom_names = [\"fc1\"], top_names = [\"relu1\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout, bottom_names", "= batch_size, hit_rate_threshold = 1.0, dense_model_file = \"/dump_infer/dcn_dense_2000.model\", sparse_model_files =", "= 0.001, vvgpu = [[0]], repeat_dataset = True, use_mixed_precision =", "= hugectr.Layer_t.BinaryCrossEntropyLoss, bottom_names = [\"fc3\", \"label\"], top_names = [\"loss\"])) model.compile()", "optimizer) model.add(hugectr.Input(label_dim = 1, label_name = \"label\", dense_dim = 13,", "= {hugectr.MetricsType.AUC: 1.0}) reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm, source =", "num_batches, source = data_source, data_reader_type = hugectr.DataReaderType_t.Norm, check_type = hugectr.Check_t.Sum)", "= 0, use_gpu_embedding_cache = False, cache_size_percentage = 1.0, i64_input_key =", "hugectr.Optimizer_t.Adam, update_type = hugectr.Update_t.Global, beta1 = 0.9, beta2 = 0.999,", "label_name = \"label\", dense_dim = 13, dense_name = \"dense\", data_reader_sparse_param_array", "model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names = [\"concat2\"], top_names = [\"fc3\"], num_output=1))", "= [\"relu1\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout, bottom_names = [\"relu1\"], top_names =", "dropout_rate=0.5)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat, bottom_names = [\"dropout2\", \"multicross1\"], top_names =", "hot inference results are consistent with those during training, mse:", "[\"fc3\"], num_output=1)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss, bottom_names = [\"fc3\", \"label\"], top_names", "bottom_names = [\"dropout1\"], top_names = [\"fc2\"], num_output=1024)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,", "= False, use_cuda_graph = True) inference_session = CreateInferenceSession(\"/dump_infer/dcn.json\", inference_params) predictions", "CreateInferenceSession import numpy as np batch_size = 16384 num_batches =", "= False, scaler = 1.0, use_cuda_graph = True, metrics_spec =", "= [\"fc3\"], num_output=1)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss, bottom_names = [\"fc3\", \"label\"],", "eval_interval = 2000, snapshot = 2000, snapshot_prefix = \"/dump_infer/dcn\") model.export_predictions(\"/dump_infer/dcn_pred_\"", "<filename>test/inference_correctness/dcn_multi_hot.py import hugectr from mpi4py import MPI solver = hugectr.CreateSolver(model_name", "= hugectr.Check_t.Sum) grount_truth = np.loadtxt(\"/dump_infer/dcn_pred_2000\") diff = predictions-grount_truth mse =", "inference_session.predict(num_batches = num_batches, source = data_source, data_reader_type = hugectr.DataReaderType_t.Norm, check_type", "sparse_embedding_name = \"sparse_embedding1\", bottom_name = \"data1\", optimizer = optimizer)) model.add(hugectr.DenseLayer(layer_type", "hugectr.Layer_t.InnerProduct, bottom_names = [\"slice12\"], top_names = [\"fc1\"], num_output=1024)) model.add(hugectr.DenseLayer(layer_type =", "hugectr.inference import InferenceParams, CreateInferenceSession import numpy as np batch_size =", "InferenceParams, CreateInferenceSession import numpy as np batch_size = 16384 num_batches", "num_workers = 16) optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam, update_type =", "if mse > 1e-3: raise RuntimeError(\"Too large mse between DCN", "[\"slice11\", \"slice12\"], ranges=[(0,429),(0,429)])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.MultiCross, bottom_names = [\"slice11\"], top_names", "[\"relu2\"], top_names = [\"dropout2\"], dropout_rate=0.5)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat, bottom_names =", "snapshot_prefix = \"/dump_infer/dcn\") model.export_predictions(\"/dump_infer/dcn_pred_\" + str(2000), \"/dump_infer/dcn_label_\" + str(2000)) from", "= [\"relu1\"], top_names = [\"dropout1\"], dropout_rate=0.5)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names", "import MPI solver = hugectr.CreateSolver(model_name = \"dcn\", max_eval_batches = 1,", "bottom_names = [\"relu2\"], top_names = [\"dropout2\"], dropout_rate=0.5)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,", "+ str(2000)) from hugectr.inference import InferenceParams, CreateInferenceSession import numpy as", "lr = 0.001, vvgpu = [[0]], repeat_dataset = True, use_mixed_precision", "model = hugectr.Model(solver, reader, optimizer) model.add(hugectr.Input(label_dim = 1, label_name =", "= [\"fc1\"], num_output=1024)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU, bottom_names = [\"fc1\"], top_names", "\"label\", dense_dim = 13, dense_name = \"dense\", data_reader_sparse_param_array = [hugectr.DataReaderSparseParam(\"data1\",", "repeat_dataset = True, use_mixed_precision = False, scaler = 1.0, use_cuda_graph", "[\"fc1\"], num_output=1024)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU, bottom_names = [\"fc1\"], top_names =", "numpy as np batch_size = 16384 num_batches = 1 data_source", "display = 200, eval_interval = 2000, snapshot = 2000, snapshot_prefix", "inference and training: {}\".format(mse)) sys.exit(1) else: print(\"DCN multi hot inference", "top_names = [\"concat2\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names = [\"concat2\"], top_names", "= [[0]], repeat_dataset = True, use_mixed_precision = False, scaler =", "1.0}) reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm, source = [\"./dcn_data/file_list.txt\"], eval_source", "epsilon = 0.0001) model = hugectr.Model(solver, reader, optimizer) model.add(hugectr.Input(label_dim =", "= \"/dump_infer/dcn\") model.export_predictions(\"/dump_infer/dcn_pred_\" + str(2000), \"/dump_infer/dcn_label_\" + str(2000)) from hugectr.inference", "= np.loadtxt(\"/dump_infer/dcn_pred_2000\") diff = predictions-grount_truth mse = np.mean(diff*diff) if mse", "hugectr.Layer_t.InnerProduct, bottom_names = [\"concat2\"], top_names = [\"fc3\"], num_output=1)) model.add(hugectr.DenseLayer(layer_type =", "np.mean(diff*diff) if mse > 1e-3: raise RuntimeError(\"Too large mse between", "= [\"fc2\"], num_output=1024)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU, bottom_names = [\"fc2\"], top_names", "data_source = \"./dcn_data/file_list_test.txt\" inference_params = InferenceParams(model_name = \"dcn\", max_batchsize =", "model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat, bottom_names = [\"dropout2\", \"multicross1\"], top_names = [\"concat2\"]))", "\"./dcn_data/file_list_test.txt\", check_type = hugectr.Check_t.Sum, num_workers = 16) optimizer = hugectr.CreateOptimizer(optimizer_type", "metrics_spec = {hugectr.MetricsType.AUC: 1.0}) reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm, source", "predictions = inference_session.predict(num_batches = num_batches, source = data_source, data_reader_type =", "bottom_names = [\"slice11\"], top_names = [\"multicross1\"], num_layers=1)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,", "hugectr.Layer_t.ReLU, bottom_names = [\"fc1\"], top_names = [\"relu1\"])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout,", "top_names = [\"slice11\", \"slice12\"], ranges=[(0,429),(0,429)])) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.MultiCross, bottom_names =", "check_type = hugectr.Check_t.Sum, num_workers = 16) optimizer = hugectr.CreateOptimizer(optimizer_type =", "= True) inference_session = CreateInferenceSession(\"/dump_infer/dcn.json\", inference_params) predictions = inference_session.predict(num_batches =", "num_layers=1)) model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct, bottom_names = [\"slice12\"], top_names = [\"fc1\"],", "grount_truth = np.loadtxt(\"/dump_infer/dcn_pred_2000\") diff = predictions-grount_truth mse = np.mean(diff*diff) if", "inference results are consistent with those during training, mse: {}\".format(mse))", "model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash, workspace_size_per_gpu_in_mb = 300, embedding_vec_size = 16, combiner" ]
[ "Ci to F, to put each point cloud in a", "X_FCi_id0 - ... - point_cloud_CiSi_idN - X_FCi_idN output_ports: - point_cloud_FS", "None for id in points_dict: if scene_points is None: scene_points", "string IDs of all of the point clouds. This is", "common frame F. The system returns one point cloud combining", "def _TransformPoints(points_Ci, X_CiSi): # Make homogeneous copy of points. points_h_Ci", "scene_points = None scene_colors = None for id in points_dict:", "self.DeclareAbstractInputPort( \"point_cloud_CiSi_{}\".format(id), AbstractValue.Make(PointCloud(fields=output_fields))) self._transform_ports[id] = self.DeclareAbstractInputPort( \"X_FCi_{}\".format(id), AbstractValue.Make(RigidTransform.Identity())) self.DeclareAbstractOutputPort(\"point_cloud_FS\", lambda:", "RGBs are optional. If absent, those points will be the", "in self._id_list: self._point_cloud_ports[id] = self.DeclareAbstractInputPort( \"point_cloud_CiSi_{}\".format(id), AbstractValue.Make(PointCloud(fields=output_fields))) self._transform_ports[id] = self.DeclareAbstractInputPort(", "= Fields(BaseField.kXYZs | BaseField.kRGBs) for id in self._id_list: self._point_cloud_ports[id] =", "- point_cloud_CiSi_idN - X_FCi_idN output_ports: - point_cloud_FS \"\"\" def __init__(self,", "colors) def DoCalcOutput(self, context, output): scene_points, scene_colors = self._AlignPointClouds(context) output.get_mutable_value().resize(scene_points.shape[1])", "import LeafSystem def _TransformPoints(points_Ci, X_CiSi): # Make homogeneous copy of", "all of the point clouds. This is often the serial", "the transformed point clouds. Each point cloud must have XYZs.", "should be between 0 and 255. The default is white.", "system returns one point cloud combining all of the transformed", "camera they came from, such as \"1\" for a simulated", "of points. points_h_Ci = np.vstack((points_Ci, np.ones((1, points_Ci.shape[1])))) return X_CiSi.dot(points_h_Ci)[:3, :]", "scene_points = scene_points[:, valid_indices[0, :]] scene_colors = scene_colors[:, valid_indices[0, :]]", "def _ConcatenatePointClouds(points_dict, colors_dict): scene_points = None scene_colors = None for", "- ... - point_cloud_CiSi_idN - X_FCi_idN output_ports: - point_cloud_FS \"\"\"", "\"point_cloud_CiSi_{}\".format(id), AbstractValue.Make(PointCloud(fields=output_fields))) self._transform_ports[id] = self.DeclareAbstractInputPort( \"X_FCi_{}\".format(id), AbstractValue.Make(RigidTransform.Identity())) self.DeclareAbstractOutputPort(\"point_cloud_FS\", lambda: AbstractValue.Make(", "of the point clouds. This is often the serial number", "scene_colors[:, valid_indices[0, :]] return scene_points, scene_colors class PointCloudConcatenation(LeafSystem): \"\"\" ..", "one point cloud combining all of the transformed point clouds.", "manual broadcasting. return np.tile(np.array([color]).T, (1, dim)) def _ConcatenatePointClouds(points_dict, colors_dict): scene_points", "X_FCi_idN output_ports: - point_cloud_FS \"\"\" def __init__(self, id_list, default_rgb=[255., 255.,", "= self.EvalAbstractInput( context, self._transform_ports[id].get_index()).get_value() points[id] = _TransformPoints( point_cloud.xyzs(), X_CiSi.GetAsMatrix4()) if", "is often the serial number of the camera they came", "or \"805212060373\" for a real camera. @param default_rgb A list", "AbstractValue.Make( PointCloud(fields=output_fields)), self.DoCalcOutput) def _AlignPointClouds(self, context): points = {} colors", "self._point_cloud_ports[id].get_index()).get_value() X_CiSi = self.EvalAbstractInput( context, self._transform_ports[id].get_index()).get_value() points[id] = _TransformPoints( point_cloud.xyzs(),", "default is white. \"\"\" LeafSystem.__init__(self) self._point_cloud_ports = {} self._transform_ports =", "points_h_Ci = np.vstack((points_Ci, np.ones((1, points_Ci.shape[1])))) return X_CiSi.dot(points_h_Ci)[:3, :] def _TileColors(color,", "PointCloudConcatenation(LeafSystem): \"\"\" .. pydrake_system:: name: PointCloudConcatenation input_ports: - point_cloud_CiSi_id0 -", "default color. @param id_list A list containing the string IDs", "from pydrake.perception import BaseField, Fields, PointCloud from pydrake.systems.framework import LeafSystem", "Make homogeneous copy of points. points_h_Ci = np.vstack((points_Ci, np.ones((1, points_Ci.shape[1]))))", "LeafSystem.__init__(self) self._point_cloud_ports = {} self._transform_ports = {} self._id_list = id_list", "points_dict: if scene_points is None: scene_points = points_dict[id] else: scene_points", "pydrake.math import RigidTransform from pydrake.perception import BaseField, Fields, PointCloud from", "pydrake.systems.framework import LeafSystem def _TransformPoints(points_Ci, X_CiSi): # Make homogeneous copy", "is None: scene_points = points_dict[id] else: scene_points = np.hstack((points_dict[id], scene_points))", "self._point_cloud_ports = {} self._transform_ports = {} self._id_list = id_list self._default_rgb", "absent, those points will be the provided default color. @param", "\"\"\" def __init__(self, id_list, default_rgb=[255., 255., 255.]): \"\"\" A system", "scene_points, scene_colors = self._AlignPointClouds(context) output.get_mutable_value().resize(scene_points.shape[1]) output.get_mutable_value().mutable_xyzs()[:] = scene_points output.get_mutable_value().mutable_rgbs()[:] =", "frame Ci to F, to put each point cloud in", "colors_dict[id] else: scene_colors = np.hstack((colors_dict[id], scene_colors)) valid_indices = np.logical_not(np.isnan(scene_points)) scene_points", "all of the transformed point clouds. Each point cloud must", "Fields, PointCloud from pydrake.systems.framework import LeafSystem def _TransformPoints(points_Ci, X_CiSi): #", "PointCloudConcatenation input_ports: - point_cloud_CiSi_id0 - X_FCi_id0 - ... - point_cloud_CiSi_idN", "IDs of all of the point clouds. This is often", "are optional. If absent, those points will be the provided", "= scene_colors[:, valid_indices[0, :]] return scene_points, scene_colors class PointCloudConcatenation(LeafSystem): \"\"\"", "if scene_points is None: scene_points = points_dict[id] else: scene_points =", "from frame Ci to F, to put each point cloud", "pydrake.perception import BaseField, Fields, PointCloud from pydrake.systems.framework import LeafSystem def", "from pydrake.systems.framework import LeafSystem def _TransformPoints(points_Ci, X_CiSi): # Make homogeneous", "point_cloud_CiSi_id0 - X_FCi_id0 - ... - point_cloud_CiSi_idN - X_FCi_idN output_ports:", "scene_colors = np.hstack((colors_dict[id], scene_colors)) valid_indices = np.logical_not(np.isnan(scene_points)) scene_points = scene_points[:,", "\"X_FCi_{}\".format(id), AbstractValue.Make(RigidTransform.Identity())) self.DeclareAbstractOutputPort(\"point_cloud_FS\", lambda: AbstractValue.Make( PointCloud(fields=output_fields)), self.DoCalcOutput) def _AlignPointClouds(self, context):", "LeafSystem def _TransformPoints(points_Ci, X_CiSi): # Make homogeneous copy of points.", "_TransformPoints( point_cloud.xyzs(), X_CiSi.GetAsMatrix4()) if point_cloud.has_rgbs(): colors[id] = point_cloud.rgbs() else: colors[id]", "scene_colors is None: scene_colors = colors_dict[id] else: scene_colors = np.hstack((colors_dict[id],", "self.DeclareAbstractOutputPort(\"point_cloud_FS\", lambda: AbstractValue.Make( PointCloud(fields=output_fields)), self.DoCalcOutput) def _AlignPointClouds(self, context): points =", "Values should be between 0 and 255. The default is", "in N point clouds of points Si in frame Ci,", "self._point_cloud_ports[id] = self.DeclareAbstractInputPort( \"point_cloud_CiSi_{}\".format(id), AbstractValue.Make(PointCloud(fields=output_fields))) self._transform_ports[id] = self.DeclareAbstractInputPort( \"X_FCi_{}\".format(id), AbstractValue.Make(RigidTransform.Identity()))", "scene_points[:, valid_indices[0, :]] scene_colors = scene_colors[:, valid_indices[0, :]] return scene_points,", "points_dict[id] else: scene_points = np.hstack((points_dict[id], scene_points)) if scene_colors is None:", "context, output): scene_points, scene_colors = self._AlignPointClouds(context) output.get_mutable_value().resize(scene_points.shape[1]) output.get_mutable_value().mutable_xyzs()[:] = scene_points", "None: scene_points = points_dict[id] else: scene_points = np.hstack((points_dict[id], scene_points)) if", "255., 255.]): \"\"\" A system that takes in N point", "clouds. This is often the serial number of the camera", "# Make homogeneous copy of points. points_h_Ci = np.vstack((points_Ci, np.ones((1,", "N RigidTransforms from frame Ci to F, to put each", "class PointCloudConcatenation(LeafSystem): \"\"\" .. pydrake_system:: name: PointCloudConcatenation input_ports: - point_cloud_CiSi_id0", ".. pydrake_system:: name: PointCloudConcatenation input_ports: - point_cloud_CiSi_id0 - X_FCi_id0 -", "@param default_rgb A list of length 3 containing the RGB", "\"\"\" A system that takes in N point clouds of", "return X_CiSi.dot(points_h_Ci)[:3, :] def _TileColors(color, dim): # Need manual broadcasting.", "scene_colors)) valid_indices = np.logical_not(np.isnan(scene_points)) scene_points = scene_points[:, valid_indices[0, :]] scene_colors", "output_fields = Fields(BaseField.kXYZs | BaseField.kRGBs) for id in self._id_list: self._point_cloud_ports[id]", "as \"1\" for a simulated camera or \"805212060373\" for a", "- X_FCi_id0 - ... - point_cloud_CiSi_idN - X_FCi_idN output_ports: -", "will be the provided default color. @param id_list A list", "each point cloud in a common frame F. The system", "returns one point cloud combining all of the transformed point", "name: PointCloudConcatenation input_ports: - point_cloud_CiSi_id0 - X_FCi_id0 - ... -", "color. @param id_list A list containing the string IDs of", "RGB values to use in the absence of PointCloud.rgbs. Values", "optional. If absent, those points will be the provided default", "self._id_list: point_cloud = self.EvalAbstractInput( context, self._point_cloud_ports[id].get_index()).get_value() X_CiSi = self.EvalAbstractInput( context,", "PointCloud from pydrake.systems.framework import LeafSystem def _TransformPoints(points_Ci, X_CiSi): # Make", "return scene_points, scene_colors class PointCloudConcatenation(LeafSystem): \"\"\" .. pydrake_system:: name: PointCloudConcatenation", "None scene_colors = None for id in points_dict: if scene_points", "clouds of points Si in frame Ci, and N RigidTransforms", "scene_colors = scene_colors[:, valid_indices[0, :]] return scene_points, scene_colors class PointCloudConcatenation(LeafSystem):", "numpy as np from pydrake.common.value import AbstractValue from pydrake.math import", "If absent, those points will be the provided default color.", "F. The system returns one point cloud combining all of", "in a common frame F. The system returns one point", "real camera. @param default_rgb A list of length 3 containing", "point_cloud.xyzs().shape[1]) return _ConcatenatePointClouds(points, colors) def DoCalcOutput(self, context, output): scene_points, scene_colors", "in points_dict: if scene_points is None: scene_points = points_dict[id] else:", "X_CiSi = self.EvalAbstractInput( context, self._transform_ports[id].get_index()).get_value() points[id] = _TransformPoints( point_cloud.xyzs(), X_CiSi.GetAsMatrix4())", "point clouds. This is often the serial number of the", "and N RigidTransforms from frame Ci to F, to put", "points will be the provided default color. @param id_list A", "DoCalcOutput(self, context, output): scene_points, scene_colors = self._AlignPointClouds(context) output.get_mutable_value().resize(scene_points.shape[1]) output.get_mutable_value().mutable_xyzs()[:] =", "_ConcatenatePointClouds(points_dict, colors_dict): scene_points = None scene_colors = None for id", "pydrake_system:: name: PointCloudConcatenation input_ports: - point_cloud_CiSi_id0 - X_FCi_id0 - ...", "BaseField, Fields, PointCloud from pydrake.systems.framework import LeafSystem def _TransformPoints(points_Ci, X_CiSi):", "PointCloud(fields=output_fields)), self.DoCalcOutput) def _AlignPointClouds(self, context): points = {} colors =", "_TransformPoints(points_Ci, X_CiSi): # Make homogeneous copy of points. points_h_Ci =", "Si in frame Ci, and N RigidTransforms from frame Ci", "id in points_dict: if scene_points is None: scene_points = points_dict[id]", "= {} colors = {} for id in self._id_list: point_cloud", "for a simulated camera or \"805212060373\" for a real camera.", "return _ConcatenatePointClouds(points, colors) def DoCalcOutput(self, context, output): scene_points, scene_colors =", "dim)) def _ConcatenatePointClouds(points_dict, colors_dict): scene_points = None scene_colors = None", "scene_colors class PointCloudConcatenation(LeafSystem): \"\"\" .. pydrake_system:: name: PointCloudConcatenation input_ports: -", "such as \"1\" for a simulated camera or \"805212060373\" for", "= np.vstack((points_Ci, np.ones((1, points_Ci.shape[1])))) return X_CiSi.dot(points_h_Ci)[:3, :] def _TileColors(color, dim):", "np.logical_not(np.isnan(scene_points)) scene_points = scene_points[:, valid_indices[0, :]] scene_colors = scene_colors[:, valid_indices[0,", "system that takes in N point clouds of points Si", "from pydrake.common.value import AbstractValue from pydrake.math import RigidTransform from pydrake.perception", "The system returns one point cloud combining all of the", "<reponame>RobotLocomotion/drake-python3.7 import numpy as np from pydrake.common.value import AbstractValue from", "np.hstack((points_dict[id], scene_points)) if scene_colors is None: scene_colors = colors_dict[id] else:", "return np.tile(np.array([color]).T, (1, dim)) def _ConcatenatePointClouds(points_dict, colors_dict): scene_points = None", "from, such as \"1\" for a simulated camera or \"805212060373\"", "\"805212060373\" for a real camera. @param default_rgb A list of", "= colors_dict[id] else: scene_colors = np.hstack((colors_dict[id], scene_colors)) valid_indices = np.logical_not(np.isnan(scene_points))", "to put each point cloud in a common frame F.", "is None: scene_colors = colors_dict[id] else: scene_colors = np.hstack((colors_dict[id], scene_colors))", "input_ports: - point_cloud_CiSi_id0 - X_FCi_id0 - ... - point_cloud_CiSi_idN -", "pydrake.common.value import AbstractValue from pydrake.math import RigidTransform from pydrake.perception import", "homogeneous copy of points. points_h_Ci = np.vstack((points_Ci, np.ones((1, points_Ci.shape[1])))) return", "frame Ci, and N RigidTransforms from frame Ci to F,", "simulated camera or \"805212060373\" for a real camera. @param default_rgb", "colors_dict): scene_points = None scene_colors = None for id in", "the provided default color. @param id_list A list containing the", "X_CiSi.dot(points_h_Ci)[:3, :] def _TileColors(color, dim): # Need manual broadcasting. return", "output_ports: - point_cloud_FS \"\"\" def __init__(self, id_list, default_rgb=[255., 255., 255.]):", "default_rgb A list of length 3 containing the RGB values", "a common frame F. The system returns one point cloud", "number of the camera they came from, such as \"1\"", "= _TransformPoints( point_cloud.xyzs(), X_CiSi.GetAsMatrix4()) if point_cloud.has_rgbs(): colors[id] = point_cloud.rgbs() else:", "= point_cloud.rgbs() else: colors[id] = _TileColors( self._default_rgb, point_cloud.xyzs().shape[1]) return _ConcatenatePointClouds(points,", "takes in N point clouds of points Si in frame", "This is often the serial number of the camera they", "| BaseField.kRGBs) for id in self._id_list: self._point_cloud_ports[id] = self.DeclareAbstractInputPort( \"point_cloud_CiSi_{}\".format(id),", "else: scene_points = np.hstack((points_dict[id], scene_points)) if scene_colors is None: scene_colors", "of PointCloud.rgbs. Values should be between 0 and 255. The", "X_CiSi.GetAsMatrix4()) if point_cloud.has_rgbs(): colors[id] = point_cloud.rgbs() else: colors[id] = _TileColors(", "frame F. The system returns one point cloud combining all", "= {} for id in self._id_list: point_cloud = self.EvalAbstractInput( context,", "points[id] = _TransformPoints( point_cloud.xyzs(), X_CiSi.GetAsMatrix4()) if point_cloud.has_rgbs(): colors[id] = point_cloud.rgbs()", "else: scene_colors = np.hstack((colors_dict[id], scene_colors)) valid_indices = np.logical_not(np.isnan(scene_points)) scene_points =", "... - point_cloud_CiSi_idN - X_FCi_idN output_ports: - point_cloud_FS \"\"\" def", "point_cloud.has_rgbs(): colors[id] = point_cloud.rgbs() else: colors[id] = _TileColors( self._default_rgb, point_cloud.xyzs().shape[1])", "\"\"\" LeafSystem.__init__(self) self._point_cloud_ports = {} self._transform_ports = {} self._id_list =", "be between 0 and 255. The default is white. \"\"\"", "output): scene_points, scene_colors = self._AlignPointClouds(context) output.get_mutable_value().resize(scene_points.shape[1]) output.get_mutable_value().mutable_xyzs()[:] = scene_points output.get_mutable_value().mutable_rgbs()[:]", "= {} self._transform_ports = {} self._id_list = id_list self._default_rgb =", "{} colors = {} for id in self._id_list: point_cloud =", "if point_cloud.has_rgbs(): colors[id] = point_cloud.rgbs() else: colors[id] = _TileColors( self._default_rgb,", "to F, to put each point cloud in a common", "= np.hstack((points_dict[id], scene_points)) if scene_colors is None: scene_colors = colors_dict[id]", "points Si in frame Ci, and N RigidTransforms from frame", "point_cloud.rgbs() else: colors[id] = _TileColors( self._default_rgb, point_cloud.xyzs().shape[1]) return _ConcatenatePointClouds(points, colors)", "- point_cloud_CiSi_id0 - X_FCi_id0 - ... - point_cloud_CiSi_idN - X_FCi_idN", "XYZs. RGBs are optional. If absent, those points will be", "valid_indices = np.logical_not(np.isnan(scene_points)) scene_points = scene_points[:, valid_indices[0, :]] scene_colors =", "came from, such as \"1\" for a simulated camera or", "in frame Ci, and N RigidTransforms from frame Ci to", "{} self._id_list = id_list self._default_rgb = np.array(default_rgb) output_fields = Fields(BaseField.kXYZs", "point clouds. Each point cloud must have XYZs. RGBs are", "of all of the point clouds. This is often the", "scene_colors = None for id in points_dict: if scene_points is", "point_cloud_CiSi_idN - X_FCi_idN output_ports: - point_cloud_FS \"\"\" def __init__(self, id_list,", "0 and 255. The default is white. \"\"\" LeafSystem.__init__(self) self._point_cloud_ports", "context, self._point_cloud_ports[id].get_index()).get_value() X_CiSi = self.EvalAbstractInput( context, self._transform_ports[id].get_index()).get_value() points[id] = _TransformPoints(", "absence of PointCloud.rgbs. Values should be between 0 and 255.", "= id_list self._default_rgb = np.array(default_rgb) output_fields = Fields(BaseField.kXYZs | BaseField.kRGBs)", "scene_points = points_dict[id] else: scene_points = np.hstack((points_dict[id], scene_points)) if scene_colors", "The default is white. \"\"\" LeafSystem.__init__(self) self._point_cloud_ports = {} self._transform_ports", "a real camera. @param default_rgb A list of length 3", "self._default_rgb, point_cloud.xyzs().shape[1]) return _ConcatenatePointClouds(points, colors) def DoCalcOutput(self, context, output): scene_points,", "valid_indices[0, :]] scene_colors = scene_colors[:, valid_indices[0, :]] return scene_points, scene_colors", "= self.DeclareAbstractInputPort( \"point_cloud_CiSi_{}\".format(id), AbstractValue.Make(PointCloud(fields=output_fields))) self._transform_ports[id] = self.DeclareAbstractInputPort( \"X_FCi_{}\".format(id), AbstractValue.Make(RigidTransform.Identity())) self.DeclareAbstractOutputPort(\"point_cloud_FS\",", "F, to put each point cloud in a common frame", "broadcasting. return np.tile(np.array([color]).T, (1, dim)) def _ConcatenatePointClouds(points_dict, colors_dict): scene_points =", "self.EvalAbstractInput( context, self._point_cloud_ports[id].get_index()).get_value() X_CiSi = self.EvalAbstractInput( context, self._transform_ports[id].get_index()).get_value() points[id] =", "self.DeclareAbstractInputPort( \"X_FCi_{}\".format(id), AbstractValue.Make(RigidTransform.Identity())) self.DeclareAbstractOutputPort(\"point_cloud_FS\", lambda: AbstractValue.Make( PointCloud(fields=output_fields)), self.DoCalcOutput) def _AlignPointClouds(self,", "of the camera they came from, such as \"1\" for", "self.EvalAbstractInput( context, self._transform_ports[id].get_index()).get_value() points[id] = _TransformPoints( point_cloud.xyzs(), X_CiSi.GetAsMatrix4()) if point_cloud.has_rgbs():", "default_rgb=[255., 255., 255.]): \"\"\" A system that takes in N", "of points Si in frame Ci, and N RigidTransforms from", "self._transform_ports[id].get_index()).get_value() points[id] = _TransformPoints( point_cloud.xyzs(), X_CiSi.GetAsMatrix4()) if point_cloud.has_rgbs(): colors[id] =", "as np from pydrake.common.value import AbstractValue from pydrake.math import RigidTransform", "id_list, default_rgb=[255., 255., 255.]): \"\"\" A system that takes in", "= np.hstack((colors_dict[id], scene_colors)) valid_indices = np.logical_not(np.isnan(scene_points)) scene_points = scene_points[:, valid_indices[0,", "valid_indices[0, :]] return scene_points, scene_colors class PointCloudConcatenation(LeafSystem): \"\"\" .. pydrake_system::", "the RGB values to use in the absence of PointCloud.rgbs.", "colors[id] = point_cloud.rgbs() else: colors[id] = _TileColors( self._default_rgb, point_cloud.xyzs().shape[1]) return", "camera. @param default_rgb A list of length 3 containing the", "def DoCalcOutput(self, context, output): scene_points, scene_colors = self._AlignPointClouds(context) output.get_mutable_value().resize(scene_points.shape[1]) output.get_mutable_value().mutable_xyzs()[:]", "_AlignPointClouds(self, context): points = {} colors = {} for id", "point cloud must have XYZs. RGBs are optional. If absent,", "the absence of PointCloud.rgbs. Values should be between 0 and", "RigidTransform from pydrake.perception import BaseField, Fields, PointCloud from pydrake.systems.framework import", "= None scene_colors = None for id in points_dict: if", "= np.array(default_rgb) output_fields = Fields(BaseField.kXYZs | BaseField.kRGBs) for id in", "often the serial number of the camera they came from,", "dim): # Need manual broadcasting. return np.tile(np.array([color]).T, (1, dim)) def", "lambda: AbstractValue.Make( PointCloud(fields=output_fields)), self.DoCalcOutput) def _AlignPointClouds(self, context): points = {}", "self._id_list = id_list self._default_rgb = np.array(default_rgb) output_fields = Fields(BaseField.kXYZs |", "id in self._id_list: point_cloud = self.EvalAbstractInput( context, self._point_cloud_ports[id].get_index()).get_value() X_CiSi =", "cloud in a common frame F. The system returns one", "point cloud combining all of the transformed point clouds. Each", "point clouds of points Si in frame Ci, and N", "cloud combining all of the transformed point clouds. Each point", "points. points_h_Ci = np.vstack((points_Ci, np.ones((1, points_Ci.shape[1])))) return X_CiSi.dot(points_h_Ci)[:3, :] def", "for id in points_dict: if scene_points is None: scene_points =", "of length 3 containing the RGB values to use in", "np from pydrake.common.value import AbstractValue from pydrake.math import RigidTransform from", "A list containing the string IDs of all of the", "in the absence of PointCloud.rgbs. Values should be between 0", "of the transformed point clouds. Each point cloud must have", "= None for id in points_dict: if scene_points is None:", "np.vstack((points_Ci, np.ones((1, points_Ci.shape[1])))) return X_CiSi.dot(points_h_Ci)[:3, :] def _TileColors(color, dim): #", "= scene_points[:, valid_indices[0, :]] scene_colors = scene_colors[:, valid_indices[0, :]] return", "{} for id in self._id_list: point_cloud = self.EvalAbstractInput( context, self._point_cloud_ports[id].get_index()).get_value()", "X_CiSi): # Make homogeneous copy of points. points_h_Ci = np.vstack((points_Ci,", "np.ones((1, points_Ci.shape[1])))) return X_CiSi.dot(points_h_Ci)[:3, :] def _TileColors(color, dim): # Need", "serial number of the camera they came from, such as", "_TileColors(color, dim): # Need manual broadcasting. return np.tile(np.array([color]).T, (1, dim))", "AbstractValue.Make(PointCloud(fields=output_fields))) self._transform_ports[id] = self.DeclareAbstractInputPort( \"X_FCi_{}\".format(id), AbstractValue.Make(RigidTransform.Identity())) self.DeclareAbstractOutputPort(\"point_cloud_FS\", lambda: AbstractValue.Make( PointCloud(fields=output_fields)),", "Ci, and N RigidTransforms from frame Ci to F, to", "the string IDs of all of the point clouds. This", "None: scene_colors = colors_dict[id] else: scene_colors = np.hstack((colors_dict[id], scene_colors)) valid_indices", "{} self._transform_ports = {} self._id_list = id_list self._default_rgb = np.array(default_rgb)", ":]] return scene_points, scene_colors class PointCloudConcatenation(LeafSystem): \"\"\" .. pydrake_system:: name:", "clouds. Each point cloud must have XYZs. RGBs are optional.", "white. \"\"\" LeafSystem.__init__(self) self._point_cloud_ports = {} self._transform_ports = {} self._id_list", "id_list A list containing the string IDs of all of", "- point_cloud_FS \"\"\" def __init__(self, id_list, default_rgb=[255., 255., 255.]): \"\"\"", "255. The default is white. \"\"\" LeafSystem.__init__(self) self._point_cloud_ports = {}", "_TileColors( self._default_rgb, point_cloud.xyzs().shape[1]) return _ConcatenatePointClouds(points, colors) def DoCalcOutput(self, context, output):", "scene_colors = colors_dict[id] else: scene_colors = np.hstack((colors_dict[id], scene_colors)) valid_indices =", "id in self._id_list: self._point_cloud_ports[id] = self.DeclareAbstractInputPort( \"point_cloud_CiSi_{}\".format(id), AbstractValue.Make(PointCloud(fields=output_fields))) self._transform_ports[id] =", "import numpy as np from pydrake.common.value import AbstractValue from pydrake.math", "Fields(BaseField.kXYZs | BaseField.kRGBs) for id in self._id_list: self._point_cloud_ports[id] = self.DeclareAbstractInputPort(", "scene_points, scene_colors class PointCloudConcatenation(LeafSystem): \"\"\" .. pydrake_system:: name: PointCloudConcatenation input_ports:", "__init__(self, id_list, default_rgb=[255., 255., 255.]): \"\"\" A system that takes", "self._id_list: self._point_cloud_ports[id] = self.DeclareAbstractInputPort( \"point_cloud_CiSi_{}\".format(id), AbstractValue.Make(PointCloud(fields=output_fields))) self._transform_ports[id] = self.DeclareAbstractInputPort( \"X_FCi_{}\".format(id),", "from pydrake.math import RigidTransform from pydrake.perception import BaseField, Fields, PointCloud", "containing the string IDs of all of the point clouds.", "values to use in the absence of PointCloud.rgbs. Values should", ":]] scene_colors = scene_colors[:, valid_indices[0, :]] return scene_points, scene_colors class", "RigidTransforms from frame Ci to F, to put each point", "camera or \"805212060373\" for a real camera. @param default_rgb A", "import RigidTransform from pydrake.perception import BaseField, Fields, PointCloud from pydrake.systems.framework", "point cloud in a common frame F. The system returns", ":] def _TileColors(color, dim): # Need manual broadcasting. return np.tile(np.array([color]).T,", "else: colors[id] = _TileColors( self._default_rgb, point_cloud.xyzs().shape[1]) return _ConcatenatePointClouds(points, colors) def", "self.DoCalcOutput) def _AlignPointClouds(self, context): points = {} colors = {}", "they came from, such as \"1\" for a simulated camera", "must have XYZs. RGBs are optional. If absent, those points", "= {} self._id_list = id_list self._default_rgb = np.array(default_rgb) output_fields =", "255.]): \"\"\" A system that takes in N point clouds", "list containing the string IDs of all of the point", "scene_points is None: scene_points = points_dict[id] else: scene_points = np.hstack((points_dict[id],", "be the provided default color. @param id_list A list containing", "self._transform_ports = {} self._id_list = id_list self._default_rgb = np.array(default_rgb) output_fields", "scene_points = np.hstack((points_dict[id], scene_points)) if scene_colors is None: scene_colors =", "copy of points. points_h_Ci = np.vstack((points_Ci, np.ones((1, points_Ci.shape[1])))) return X_CiSi.dot(points_h_Ci)[:3,", "def _TileColors(color, dim): # Need manual broadcasting. return np.tile(np.array([color]).T, (1,", "the point clouds. This is often the serial number of", "\"1\" for a simulated camera or \"805212060373\" for a real", "= points_dict[id] else: scene_points = np.hstack((points_dict[id], scene_points)) if scene_colors is", "= np.logical_not(np.isnan(scene_points)) scene_points = scene_points[:, valid_indices[0, :]] scene_colors = scene_colors[:,", "colors[id] = _TileColors( self._default_rgb, point_cloud.xyzs().shape[1]) return _ConcatenatePointClouds(points, colors) def DoCalcOutput(self,", "PointCloud.rgbs. Values should be between 0 and 255. The default", "self._default_rgb = np.array(default_rgb) output_fields = Fields(BaseField.kXYZs | BaseField.kRGBs) for id", "= self.EvalAbstractInput( context, self._point_cloud_ports[id].get_index()).get_value() X_CiSi = self.EvalAbstractInput( context, self._transform_ports[id].get_index()).get_value() points[id]", "points_Ci.shape[1])))) return X_CiSi.dot(points_h_Ci)[:3, :] def _TileColors(color, dim): # Need manual", "combining all of the transformed point clouds. Each point cloud", "containing the RGB values to use in the absence of", "import AbstractValue from pydrake.math import RigidTransform from pydrake.perception import BaseField,", "that takes in N point clouds of points Si in", "np.array(default_rgb) output_fields = Fields(BaseField.kXYZs | BaseField.kRGBs) for id in self._id_list:", "to use in the absence of PointCloud.rgbs. Values should be", "@param id_list A list containing the string IDs of all", "3 containing the RGB values to use in the absence", "point_cloud.xyzs(), X_CiSi.GetAsMatrix4()) if point_cloud.has_rgbs(): colors[id] = point_cloud.rgbs() else: colors[id] =", "id_list self._default_rgb = np.array(default_rgb) output_fields = Fields(BaseField.kXYZs | BaseField.kRGBs) for", "points = {} colors = {} for id in self._id_list:", "scene_colors = self._AlignPointClouds(context) output.get_mutable_value().resize(scene_points.shape[1]) output.get_mutable_value().mutable_xyzs()[:] = scene_points output.get_mutable_value().mutable_rgbs()[:] = scene_colors", "N point clouds of points Si in frame Ci, and", "np.hstack((colors_dict[id], scene_colors)) valid_indices = np.logical_not(np.isnan(scene_points)) scene_points = scene_points[:, valid_indices[0, :]]", "for id in self._id_list: point_cloud = self.EvalAbstractInput( context, self._point_cloud_ports[id].get_index()).get_value() X_CiSi", "those points will be the provided default color. @param id_list", "scene_points)) if scene_colors is None: scene_colors = colors_dict[id] else: scene_colors", "a simulated camera or \"805212060373\" for a real camera. @param", "point_cloud = self.EvalAbstractInput( context, self._point_cloud_ports[id].get_index()).get_value() X_CiSi = self.EvalAbstractInput( context, self._transform_ports[id].get_index()).get_value()", "_ConcatenatePointClouds(points, colors) def DoCalcOutput(self, context, output): scene_points, scene_colors = self._AlignPointClouds(context)", "= _TileColors( self._default_rgb, point_cloud.xyzs().shape[1]) return _ConcatenatePointClouds(points, colors) def DoCalcOutput(self, context,", "have XYZs. RGBs are optional. If absent, those points will", "context, self._transform_ports[id].get_index()).get_value() points[id] = _TransformPoints( point_cloud.xyzs(), X_CiSi.GetAsMatrix4()) if point_cloud.has_rgbs(): colors[id]", "transformed point clouds. Each point cloud must have XYZs. RGBs", "A list of length 3 containing the RGB values to", "list of length 3 containing the RGB values to use", "def __init__(self, id_list, default_rgb=[255., 255., 255.]): \"\"\" A system that", "use in the absence of PointCloud.rgbs. Values should be between", "if scene_colors is None: scene_colors = colors_dict[id] else: scene_colors =", "the camera they came from, such as \"1\" for a", "self._transform_ports[id] = self.DeclareAbstractInputPort( \"X_FCi_{}\".format(id), AbstractValue.Make(RigidTransform.Identity())) self.DeclareAbstractOutputPort(\"point_cloud_FS\", lambda: AbstractValue.Make( PointCloud(fields=output_fields)), self.DoCalcOutput)", "Need manual broadcasting. return np.tile(np.array([color]).T, (1, dim)) def _ConcatenatePointClouds(points_dict, colors_dict):", "in self._id_list: point_cloud = self.EvalAbstractInput( context, self._point_cloud_ports[id].get_index()).get_value() X_CiSi = self.EvalAbstractInput(", "for id in self._id_list: self._point_cloud_ports[id] = self.DeclareAbstractInputPort( \"point_cloud_CiSi_{}\".format(id), AbstractValue.Make(PointCloud(fields=output_fields))) self._transform_ports[id]", "BaseField.kRGBs) for id in self._id_list: self._point_cloud_ports[id] = self.DeclareAbstractInputPort( \"point_cloud_CiSi_{}\".format(id), AbstractValue.Make(PointCloud(fields=output_fields)))", "is white. \"\"\" LeafSystem.__init__(self) self._point_cloud_ports = {} self._transform_ports = {}", "AbstractValue.Make(RigidTransform.Identity())) self.DeclareAbstractOutputPort(\"point_cloud_FS\", lambda: AbstractValue.Make( PointCloud(fields=output_fields)), self.DoCalcOutput) def _AlignPointClouds(self, context): points", "colors = {} for id in self._id_list: point_cloud = self.EvalAbstractInput(", "provided default color. @param id_list A list containing the string", "import BaseField, Fields, PointCloud from pydrake.systems.framework import LeafSystem def _TransformPoints(points_Ci,", "- X_FCi_idN output_ports: - point_cloud_FS \"\"\" def __init__(self, id_list, default_rgb=[255.,", "cloud must have XYZs. RGBs are optional. If absent, those", "point_cloud_FS \"\"\" def __init__(self, id_list, default_rgb=[255., 255., 255.]): \"\"\" A", "for a real camera. @param default_rgb A list of length", "\"\"\" .. pydrake_system:: name: PointCloudConcatenation input_ports: - point_cloud_CiSi_id0 - X_FCi_id0", "Each point cloud must have XYZs. RGBs are optional. If", "A system that takes in N point clouds of points", "the serial number of the camera they came from, such", "between 0 and 255. The default is white. \"\"\" LeafSystem.__init__(self)", "length 3 containing the RGB values to use in the", "def _AlignPointClouds(self, context): points = {} colors = {} for", "AbstractValue from pydrake.math import RigidTransform from pydrake.perception import BaseField, Fields,", "# Need manual broadcasting. return np.tile(np.array([color]).T, (1, dim)) def _ConcatenatePointClouds(points_dict,", "put each point cloud in a common frame F. The", "context): points = {} colors = {} for id in", "and 255. The default is white. \"\"\" LeafSystem.__init__(self) self._point_cloud_ports =", "(1, dim)) def _ConcatenatePointClouds(points_dict, colors_dict): scene_points = None scene_colors =", "= self.DeclareAbstractInputPort( \"X_FCi_{}\".format(id), AbstractValue.Make(RigidTransform.Identity())) self.DeclareAbstractOutputPort(\"point_cloud_FS\", lambda: AbstractValue.Make( PointCloud(fields=output_fields)), self.DoCalcOutput) def", "np.tile(np.array([color]).T, (1, dim)) def _ConcatenatePointClouds(points_dict, colors_dict): scene_points = None scene_colors" ]
[ "= female_first_names[random.randint(0,len(female_first_names)-1)] catanDBObj.person_bio.sex = 'female' catanDBObj.person_bio.name_family = family_names[random.randint(0,len(family_names)-1)] catanDBObj.person_bio.age =", "-71.114484, -71.084422) cmd = ('SELECT ' 'db_person_bio.person_id, ' 'db_person_bio.origin_node_id, '", "cmd + \"(%d, %.6f, %.6f, %.6f, %.6f, %.6f)\" % (i,", "= random.randint(0,1) if sex == 0: # male catanDBObj.person_bio.name_given =", "'db_person_bio.person_id, ' 'db_person_bio.origin_node_id, ' 'db_person_bio.name_family, ' 'db_person_bio.name_given, ' 'db_person_bio.age, '", "first names file f = open('dist.female.first','r') female_first_names = [name.strip().split()[0] for", "%.6f, %.6f, %.6f)\" % (i, lat, lng, 0, 0, 0)", "(message, status, location, etc.) # location lat = round(random.uniform(start_lat, stop_lat),", "male/female ratio between 5 and 90 years of age \"\"\"", "\"\"\" Generates n people, random male/female ratio between 5 and", "for name in f.readlines()] f.close() # open female first names", "for cambridge gen_nodes(32, db, 42.354823, 42.368315, -71.114484, -71.084422) gen_people(100, db,", "= random.randint(5,90) # message (message, status, location, etc.) # location", "for the database # it only cares about the data", "female catanDBObj.person_bio.name_given = female_first_names[random.randint(0,len(female_first_names)-1)] catanDBObj.person_bio.sex = 'female' catanDBObj.person_bio.name_family = family_names[random.randint(0,len(family_names)-1)]", "i in range(n): # random lat, long lat = round(random.uniform(start_lat,", "file f = open('dist.all.last','r') family_names = [name.strip().split()[0] for name in", "%.6f, %.6f, %.6f, %.6f, %.6f)\" % (i, lat, lng, 0,", "# insert some test nodes # for cambridge gen_nodes(32, db,", "for i in range(n): # random lat, long lat =", "message (message, status, location, etc.) # location lat = round(random.uniform(start_lat,", "'db_person_bio.origin_node_id, ' 'db_person_bio.name_family, ' 'db_person_bio.name_given, ' 'db_person_bio.age, ' 'db_person_bio.sex, '", "people, random male/female ratio between 5 and 90 years of", "42.354823, 42.368315, -71.114484, -71.084422) gen_people(100, db, 42.354823, 42.368315, -71.114484, -71.084422)", "'db_person_bio.name_given, ' 'db_person_bio.age, ' 'db_person_bio.sex, ' 'db_person_messages.submission_id, ' 'db_person_messages.origin_node_id, '", "import argparse import random import catan.db from catan.data import NodeMessage", "lat = round(random.uniform(start_lat, stop_lat), 6) lng = round(random.uniform(start_long, stop_long), 6)", "= open('dist.all.last','r') family_names = [name.strip().split()[0] for name in f.readlines()] f.close()", "= 'female' catanDBObj.person_bio.name_family = family_names[random.randint(0,len(family_names)-1)] catanDBObj.person_bio.age = random.randint(5,90) # message", "open last names file f = open('dist.all.last','r') family_names = [name.strip().split()[0]", "'db_person_bio.name_family, ' 'db_person_bio.name_given, ' 'db_person_bio.age, ' 'db_person_bio.sex, ' 'db_person_messages.submission_id, '", "data and source fields, so we can ignore other fields", "catanDBObj.person_bio.sex = 'female' catanDBObj.person_bio.name_family = family_names[random.randint(0,len(family_names)-1)] catanDBObj.person_bio.age = random.randint(5,90) #", "r in db._sql(cmd).fetchall(): print r def main(args): pass if __name__=='__main__':", "in range(n): # random lat, long lat = round(random.uniform(start_lat, stop_lat),", "in f.readlines()] f.close() # open female first names file f", "nmsg.data = catanDBObj.pack() db.update_db(nmsg) # Create some random updates for", "# nodes def gen_nodes(n, db, start_lat, stop_lat, start_long, stop_long): assert", "> 0 # open male first names file f =", "catanDBObj.person_message.status_gps_latitude = lat catanDBObj.person_message.status_gps_longitude = lng catanDBObj.person_message.status_gps_accuracy = 0 n", "-71.114484, -71.084422) gen_people(100, db, 42.354823, 42.368315, -71.114484, -71.084422) cmd =", "0 cmd = \"INSERT INTO catan_nodes VALUES \" # generate", "between 5 and 90 years of age \"\"\" assert n", "('SELECT ' 'db_person_bio.person_id, ' 'db_person_bio.origin_node_id, ' 'db_person_bio.name_family, ' 'db_person_bio.name_given, '", "database # it only cares about the data and source", "fields, so we can ignore other fields nmsg = NodeMessage()", "data STATUS_LIST = ['ok', 'injured', 'deceased'] # nodes def gen_nodes(n,", "gen_people(n, db, start_lat, stop_lat, start_long, stop_long): \"\"\" Generates n people,", "' 'db_person_bio.name_given, ' 'db_person_bio.age, ' 'db_person_bio.sex, ' 'db_person_messages.submission_id, ' 'db_person_messages.origin_node_id,", "from catan.data import NodeMessage # test data STATUS_LIST = ['ok',", "db_person_messages.person_id = db_person_bio.person_id ' 'LEFT JOIN db_submitter_info ON db_submitter_info.submission_id =", "catanDBObj = catan.db.CatanDatabaseObject() # bio sex = random.randint(0,1) if sex", "NodeMessage() nmsg.source = random.randint(0,31) # random node 0-31 nmsg.data =", "import catan.db from catan.data import NodeMessage # test data STATUS_LIST", "' 'FROM db_person_bio ' 'LEFT JOIN db_person_messages ON db_person_messages.person_id =", "lng, 0, 0, 0) db._sql(sql_cmd) # people def gen_people(n, db,", "assert n > 0 # open male first names file", "i # location lat = round(random.uniform(start_lat, stop_lat), 6) lng =", "# generate a NodeMessage for the database # it only", "'db_submitter_info.timestamp ' 'FROM db_person_bio ' 'LEFT JOIN db_person_messages ON db_person_messages.person_id", "'LEFT JOIN db_person_messages ON db_person_messages.person_id = db_person_bio.person_id ' 'LEFT JOIN", "gen_nodes(n, db, start_lat, stop_lat, start_long, stop_long): assert n > 0", "in f.readlines()] f.close() # open last names file f =", "'female' catanDBObj.person_bio.name_family = family_names[random.randint(0,len(family_names)-1)] catanDBObj.person_bio.age = random.randint(5,90) # message (message,", "= round(random.uniform(start_lat, stop_lat), 6) lng = round(random.uniform(start_long, stop_long), 6) catanDBObj.person_message.person_message", "insert some test nodes # for cambridge gen_nodes(32, db, 42.354823,", "[name.strip().split()[0] for name in f.readlines()] f.close() # open female first", "' 'LEFT JOIN db_submitter_info ON db_submitter_info.submission_id = db_person_messages.submission_id') for r", "= ['ok', 'injured', 'deceased'] # nodes def gen_nodes(n, db, start_lat,", "female_first_names = [name.strip().split()[0] for name in f.readlines()] f.close() # open", "6) catanDBObj.person_message.person_message = 'Hi Mom' catanDBObj.person_message.status_gps_latitude = lat catanDBObj.person_message.status_gps_longitude =", "0, 0, 0) db._sql(sql_cmd) # people def gen_people(n, db, start_lat,", "random import catan.db from catan.data import NodeMessage # test data", "stop_long): assert n > 0 cmd = \"INSERT INTO catan_nodes", "node_id, gps_lat, gps_long, gps_acc, path, timestamp sql_cmd = cmd +", "catanDBObj.person_message.status_gps_accuracy = 0 # status catanDBObj.person_message.status = STATUS_LIST[random.randint(0,len(STATUS_LIST)-1)] catanDBObj.person_message.status_location =", "female first names file f = open('dist.female.first','r') female_first_names = [name.strip().split()[0]", "'db_person_messages.origin_node_id, ' 'db_person_messages.status_gps_latitude, ' 'db_person_messages.status_gps_longitude, ' 'db_person_messages.status_gps_accuracy, ' 'db_person_messages.status, '", "open('dist.all.last','r') family_names = [name.strip().split()[0] for name in f.readlines()] f.close() #", "NodeMessage for the database # it only cares about the", "n.data = catanDBObj.pack() db.update_db(n) def populate_db(): db = catan.db.CatanDatabase(0) #", "db, start_lat, stop_lat, start_long, stop_long): assert n > 0 cmd", "open male first names file f = open('dist.male.first','r') male_first_names =", "VALUES \" # generate n random nodes, centered around Cambridge", "= random.randint(0,1) if update == 0: catanDBObj = catan.db.CatanDatabaseObject() catanDBObj.person_id", "random male/female ratio between 5 and 90 years of age", "catanDBObj.person_message.status_gps_longitude = lng catanDBObj.person_message.status_gps_accuracy = 0 n = NodeMessage() n.source", "cmd = ('SELECT ' 'db_person_bio.person_id, ' 'db_person_bio.origin_node_id, ' 'db_person_bio.name_family, '", "name in f.readlines()] f.close() # open female first names file", "db.update_db(n) def populate_db(): db = catan.db.CatanDatabase(0) # insert some test", "0 n = NodeMessage() n.source = random.randint(0,31) n.data = catanDBObj.pack()", "# location lat = round(random.uniform(start_lat, stop_lat), 6) lng = round(random.uniform(start_long,", "round(random.uniform(start_long, stop_long), 6) catanDBObj.person_message.person_message = 'Hi Mom' catanDBObj.person_message.status_gps_latitude = lat", "open('dist.male.first','r') male_first_names = [name.strip().split()[0] for name in f.readlines()] f.close() #", "== 0: catanDBObj = catan.db.CatanDatabaseObject() catanDBObj.person_id = i # location", "family_names[random.randint(0,len(family_names)-1)] catanDBObj.person_bio.age = random.randint(5,90) # message (message, status, location, etc.)", "' 'db_person_bio.name_family, ' 'db_person_bio.name_given, ' 'db_person_bio.age, ' 'db_person_bio.sex, ' 'db_person_messages.submission_id,", "status catanDBObj.person_message.status = STATUS_LIST[random.randint(0,len(STATUS_LIST)-1)] catanDBObj.person_message.status_location = 'Test status location' #", "\"INSERT INTO catan_nodes VALUES \" # generate n random nodes,", "source fields, so we can ignore other fields nmsg =", "0-31 nmsg.data = catanDBObj.pack() db.update_db(nmsg) # Create some random updates", "sex = random.randint(0,1) if sex == 0: # male catanDBObj.person_bio.name_given", "db_submitter_info ON db_submitter_info.submission_id = db_person_messages.submission_id') for r in db._sql(cmd).fetchall(): print", "in db._sql(cmd).fetchall(): print r def main(args): pass if __name__=='__main__': populate_db()", "f = open('dist.all.last','r') family_names = [name.strip().split()[0] for name in f.readlines()]", "= catanDBObj.pack() db.update_db(nmsg) # Create some random updates for i", "nodes, centered around Cambridge for i in range(n): # random", "db._sql(sql_cmd) # people def gen_people(n, db, start_lat, stop_lat, start_long, stop_long):", "male_first_names = [name.strip().split()[0] for name in f.readlines()] f.close() # open", "'db_person_messages.submission_id, ' 'db_person_messages.origin_node_id, ' 'db_person_messages.status_gps_latitude, ' 'db_person_messages.status_gps_longitude, ' 'db_person_messages.status_gps_accuracy, '", "the data and source fields, so we can ignore other", "male_first_names[random.randint(0,len(male_first_names)-1)] catanDBObj.person_bio.sex = 'male' else: # female catanDBObj.person_bio.name_given = female_first_names[random.randint(0,len(female_first_names)-1)]", "ON db_submitter_info.submission_id = db_person_messages.submission_id') for r in db._sql(cmd).fetchall(): print r", "= catan.db.CatanDatabaseObject() # bio sex = random.randint(0,1) if sex ==", "'FROM db_person_bio ' 'LEFT JOIN db_person_messages ON db_person_messages.person_id = db_person_bio.person_id", "' 'db_submitter_info.timestamp ' 'FROM db_person_bio ' 'LEFT JOIN db_person_messages ON", "node 0-31 nmsg.data = catanDBObj.pack() db.update_db(nmsg) # Create some random", "# status catanDBObj.person_message.status = STATUS_LIST[random.randint(0,len(STATUS_LIST)-1)] catanDBObj.person_message.status_location = 'Test status location'", "db_person_bio ' 'LEFT JOIN db_person_messages ON db_person_messages.person_id = db_person_bio.person_id '", "only cares about the data and source fields, so we", "@author <NAME> © 2015 Massachusetts Institute of Technology \"\"\" import", "catan.db.CatanDatabaseObject() catanDBObj.person_id = i # location lat = round(random.uniform(start_lat, stop_lat),", "'db_person_messages.status_gps_latitude, ' 'db_person_messages.status_gps_longitude, ' 'db_person_messages.status_gps_accuracy, ' 'db_person_messages.status, ' 'db_person_messages.status_location, '", "catan.db.CatanDatabase(0) # insert some test nodes # for cambridge gen_nodes(32,", "male first names file f = open('dist.male.first','r') male_first_names = [name.strip().split()[0]", "db.update_db(nmsg) # Create some random updates for i in range(1,n+1):", "lng = round(random.uniform(start_long, stop_long), 6) catanDBObj.person_message.person_message = 'Location update 1'", "== 0: # male catanDBObj.person_bio.name_given = male_first_names[random.randint(0,len(male_first_names)-1)] catanDBObj.person_bio.sex = 'male'", "location' # generate a NodeMessage for the database # it", "people def gen_people(n, db, start_lat, stop_lat, start_long, stop_long): \"\"\" Generates", "random.randint(5,90) # message (message, status, location, etc.) # location lat", "db, start_lat, stop_lat, start_long, stop_long): \"\"\" Generates n people, random", "gps_lat, gps_long, gps_acc, path, timestamp sql_cmd = cmd + \"(%d,", "= [name.strip().split()[0] for name in f.readlines()] f.close() # open female", "we can ignore other fields nmsg = NodeMessage() nmsg.source =", "42.354823, 42.368315, -71.114484, -71.084422) cmd = ('SELECT ' 'db_person_bio.person_id, '", "'db_person_messages.status_location, ' 'db_submitter_info.timestamp ' 'FROM db_person_bio ' 'LEFT JOIN db_person_messages", "i in range(n): catanDBObj = catan.db.CatanDatabaseObject() # bio sex =", "= round(random.uniform(start_long, stop_long), 6) catanDBObj.person_message.person_message = 'Hi Mom' catanDBObj.person_message.status_gps_latitude =", "start_long, stop_long): \"\"\" Generates n people, random male/female ratio between", "path, timestamp sql_cmd = cmd + \"(%d, %.6f, %.6f, %.6f,", "random.randint(0,1) if update == 0: catanDBObj = catan.db.CatanDatabaseObject() catanDBObj.person_id =", "test data STATUS_LIST = ['ok', 'injured', 'deceased'] # nodes def", "import random import catan.db from catan.data import NodeMessage # test", "= open('dist.female.first','r') female_first_names = [name.strip().split()[0] for name in f.readlines()] f.close()", "range(n): # random lat, long lat = round(random.uniform(start_lat, stop_lat), 6)", "\"\"\" import argparse import random import catan.db from catan.data import", "catanDBObj.person_message.status_gps_latitude = lat catanDBObj.person_message.status_gps_longitude = lng catanDBObj.person_message.status_gps_accuracy = 0 #", "import NodeMessage # test data STATUS_LIST = ['ok', 'injured', 'deceased']", "db_submitter_info.submission_id = db_person_messages.submission_id') for r in db._sql(cmd).fetchall(): print r def", "name in f.readlines()] f.close() # generate people for i in", "start_lat, stop_lat, start_long, stop_long): \"\"\" Generates n people, random male/female", "'db_person_messages.status_gps_longitude, ' 'db_person_messages.status_gps_accuracy, ' 'db_person_messages.status, ' 'db_person_messages.status_location, ' 'db_submitter_info.timestamp '", "+ \"(%d, %.6f, %.6f, %.6f, %.6f, %.6f)\" % (i, lat,", "update == 0: catanDBObj = catan.db.CatanDatabaseObject() catanDBObj.person_id = i #", "# for cambridge gen_nodes(32, db, 42.354823, 42.368315, -71.114484, -71.084422) gen_people(100,", "random lat, long lat = round(random.uniform(start_lat, stop_lat), 6) lng =", "= 'Test status location' # generate a NodeMessage for the", "db, 42.354823, 42.368315, -71.114484, -71.084422) gen_people(100, db, 42.354823, 42.368315, -71.114484,", "\"\"\" @author <NAME> © 2015 Massachusetts Institute of Technology \"\"\"", "lat catanDBObj.person_message.status_gps_longitude = lng catanDBObj.person_message.status_gps_accuracy = 0 # status catanDBObj.person_message.status", "catanDBObj.person_id = i # location lat = round(random.uniform(start_lat, stop_lat), 6)", "catanDBObj.person_bio.name_given = male_first_names[random.randint(0,len(male_first_names)-1)] catanDBObj.person_bio.sex = 'male' else: # female catanDBObj.person_bio.name_given", "stop_long), 6) # node_id, gps_lat, gps_long, gps_acc, path, timestamp sql_cmd", "' 'db_person_messages.status_gps_latitude, ' 'db_person_messages.status_gps_longitude, ' 'db_person_messages.status_gps_accuracy, ' 'db_person_messages.status, ' 'db_person_messages.status_location,", "© 2015 Massachusetts Institute of Technology \"\"\" import argparse import", "# test data STATUS_LIST = ['ok', 'injured', 'deceased'] # nodes", "= \"INSERT INTO catan_nodes VALUES \" # generate n random", "status location' # generate a NodeMessage for the database #", "0: catanDBObj = catan.db.CatanDatabaseObject() catanDBObj.person_id = i # location lat", "of age \"\"\" assert n > 0 # open male", "' 'db_person_messages.status_gps_longitude, ' 'db_person_messages.status_gps_accuracy, ' 'db_person_messages.status, ' 'db_person_messages.status_location, ' 'db_submitter_info.timestamp", "first names file f = open('dist.male.first','r') male_first_names = [name.strip().split()[0] for", "0 # status catanDBObj.person_message.status = STATUS_LIST[random.randint(0,len(STATUS_LIST)-1)] catanDBObj.person_message.status_location = 'Test status", "'db_person_messages.status_gps_accuracy, ' 'db_person_messages.status, ' 'db_person_messages.status_location, ' 'db_submitter_info.timestamp ' 'FROM db_person_bio", "update 1' catanDBObj.person_message.status_gps_latitude = lat catanDBObj.person_message.status_gps_longitude = lng catanDBObj.person_message.status_gps_accuracy =", "n > 0 cmd = \"INSERT INTO catan_nodes VALUES \"", "in f.readlines()] f.close() # generate people for i in range(n):", "%.6f, %.6f)\" % (i, lat, lng, 0, 0, 0) db._sql(sql_cmd)", "location lat = round(random.uniform(start_lat, stop_lat), 6) lng = round(random.uniform(start_long, stop_long),", "# open last names file f = open('dist.all.last','r') family_names =", "generate n random nodes, centered around Cambridge for i in", "cares about the data and source fields, so we can", "INTO catan_nodes VALUES \" # generate n random nodes, centered", "stop_long), 6) catanDBObj.person_message.person_message = 'Location update 1' catanDBObj.person_message.status_gps_latitude = lat", "assert n > 0 cmd = \"INSERT INTO catan_nodes VALUES", "' 'db_person_bio.origin_node_id, ' 'db_person_bio.name_family, ' 'db_person_bio.name_given, ' 'db_person_bio.age, ' 'db_person_bio.sex,", "' 'LEFT JOIN db_person_messages ON db_person_messages.person_id = db_person_bio.person_id ' 'LEFT", "= catan.db.CatanDatabase(0) # insert some test nodes # for cambridge", "random.randint(0,31) n.data = catanDBObj.pack() db.update_db(n) def populate_db(): db = catan.db.CatanDatabase(0)", "lng catanDBObj.person_message.status_gps_accuracy = 0 n = NodeMessage() n.source = random.randint(0,31)", "catanDBObj.person_bio.name_given = female_first_names[random.randint(0,len(female_first_names)-1)] catanDBObj.person_bio.sex = 'female' catanDBObj.person_bio.name_family = family_names[random.randint(0,len(family_names)-1)] catanDBObj.person_bio.age", "NodeMessage() n.source = random.randint(0,31) n.data = catanDBObj.pack() db.update_db(n) def populate_db():", "start_lat, stop_lat, start_long, stop_long): assert n > 0 cmd =", "random node 0-31 nmsg.data = catanDBObj.pack() db.update_db(nmsg) # Create some", "' 'db_person_messages.status_gps_accuracy, ' 'db_person_messages.status, ' 'db_person_messages.status_location, ' 'db_submitter_info.timestamp ' 'FROM", "catanDBObj.pack() db.update_db(nmsg) # Create some random updates for i in", "of Technology \"\"\" import argparse import random import catan.db from", "= catan.db.CatanDatabaseObject() catanDBObj.person_id = i # location lat = round(random.uniform(start_lat,", "lng = round(random.uniform(start_long, stop_long), 6) # node_id, gps_lat, gps_long, gps_acc,", "5 and 90 years of age \"\"\" assert n >", "nmsg.source = random.randint(0,31) # random node 0-31 nmsg.data = catanDBObj.pack()", "def gen_nodes(n, db, start_lat, stop_lat, start_long, stop_long): assert n >", "= 'Location update 1' catanDBObj.person_message.status_gps_latitude = lat catanDBObj.person_message.status_gps_longitude = lng", "0) db._sql(sql_cmd) # people def gen_people(n, db, start_lat, stop_lat, start_long,", "# male catanDBObj.person_bio.name_given = male_first_names[random.randint(0,len(male_first_names)-1)] catanDBObj.person_bio.sex = 'male' else: #", "populate_db(): db = catan.db.CatanDatabase(0) # insert some test nodes #", "'injured', 'deceased'] # nodes def gen_nodes(n, db, start_lat, stop_lat, start_long,", "and source fields, so we can ignore other fields nmsg", "test nodes # for cambridge gen_nodes(32, db, 42.354823, 42.368315, -71.114484,", "python \"\"\" @author <NAME> © 2015 Massachusetts Institute of Technology", "<NAME> © 2015 Massachusetts Institute of Technology \"\"\" import argparse", "(i, lat, lng, 0, 0, 0) db._sql(sql_cmd) # people def", "in range(1,n+1): update = random.randint(0,1) if update == 0: catanDBObj", "# node_id, gps_lat, gps_long, gps_acc, path, timestamp sql_cmd = cmd", "if update == 0: catanDBObj = catan.db.CatanDatabaseObject() catanDBObj.person_id = i", "NodeMessage # test data STATUS_LIST = ['ok', 'injured', 'deceased'] #", "# random node 0-31 nmsg.data = catanDBObj.pack() db.update_db(nmsg) # Create", "n = NodeMessage() n.source = random.randint(0,31) n.data = catanDBObj.pack() db.update_db(n)", "JOIN db_person_messages ON db_person_messages.person_id = db_person_bio.person_id ' 'LEFT JOIN db_submitter_info", "lng catanDBObj.person_message.status_gps_accuracy = 0 # status catanDBObj.person_message.status = STATUS_LIST[random.randint(0,len(STATUS_LIST)-1)] catanDBObj.person_message.status_location", "\"(%d, %.6f, %.6f, %.6f, %.6f, %.6f)\" % (i, lat, lng,", "= 0 n = NodeMessage() n.source = random.randint(0,31) n.data =", "random updates for i in range(1,n+1): update = random.randint(0,1) if", "-71.084422) cmd = ('SELECT ' 'db_person_bio.person_id, ' 'db_person_bio.origin_node_id, ' 'db_person_bio.name_family,", "round(random.uniform(start_lat, stop_lat), 6) lng = round(random.uniform(start_long, stop_long), 6) catanDBObj.person_message.person_message =", "# people def gen_people(n, db, start_lat, stop_lat, start_long, stop_long): \"\"\"", "f.close() # open female first names file f = open('dist.female.first','r')", "2015 Massachusetts Institute of Technology \"\"\" import argparse import random", "Massachusetts Institute of Technology \"\"\" import argparse import random import", "f.close() # open last names file f = open('dist.all.last','r') family_names", "years of age \"\"\" assert n > 0 # open", "stop_lat), 6) lng = round(random.uniform(start_long, stop_long), 6) # node_id, gps_lat,", "= cmd + \"(%d, %.6f, %.6f, %.6f, %.6f, %.6f)\" %", "updates for i in range(1,n+1): update = random.randint(0,1) if update", "'Hi Mom' catanDBObj.person_message.status_gps_latitude = lat catanDBObj.person_message.status_gps_longitude = lng catanDBObj.person_message.status_gps_accuracy =", "for i in range(1,n+1): update = random.randint(0,1) if update ==", "'db_person_bio.age, ' 'db_person_bio.sex, ' 'db_person_messages.submission_id, ' 'db_person_messages.origin_node_id, ' 'db_person_messages.status_gps_latitude, '", "if sex == 0: # male catanDBObj.person_bio.name_given = male_first_names[random.randint(0,len(male_first_names)-1)] catanDBObj.person_bio.sex", "catanDBObj.person_message.status_gps_accuracy = 0 n = NodeMessage() n.source = random.randint(0,31) n.data", "it only cares about the data and source fields, so", "= lat catanDBObj.person_message.status_gps_longitude = lng catanDBObj.person_message.status_gps_accuracy = 0 n =", "-71.084422) gen_people(100, db, 42.354823, 42.368315, -71.114484, -71.084422) cmd = ('SELECT", "f.readlines()] f.close() # open female first names file f =", "= STATUS_LIST[random.randint(0,len(STATUS_LIST)-1)] catanDBObj.person_message.status_location = 'Test status location' # generate a", "f.close() # generate people for i in range(n): catanDBObj =", "\" # generate n random nodes, centered around Cambridge for", "= db_person_bio.person_id ' 'LEFT JOIN db_submitter_info ON db_submitter_info.submission_id = db_person_messages.submission_id')", "stop_lat), 6) lng = round(random.uniform(start_long, stop_long), 6) catanDBObj.person_message.person_message = 'Hi", "def gen_people(n, db, start_lat, stop_lat, start_long, stop_long): \"\"\" Generates n", "<gh_stars>10-100 #!/usr/bin/env python \"\"\" @author <NAME> © 2015 Massachusetts Institute", "stop_lat), 6) lng = round(random.uniform(start_long, stop_long), 6) catanDBObj.person_message.person_message = 'Location", "JOIN db_submitter_info ON db_submitter_info.submission_id = db_person_messages.submission_id') for r in db._sql(cmd).fetchall():", "start_long, stop_long): assert n > 0 cmd = \"INSERT INTO", "Mom' catanDBObj.person_message.status_gps_latitude = lat catanDBObj.person_message.status_gps_longitude = lng catanDBObj.person_message.status_gps_accuracy = 0", "stop_long), 6) catanDBObj.person_message.person_message = 'Hi Mom' catanDBObj.person_message.status_gps_latitude = lat catanDBObj.person_message.status_gps_longitude", "file f = open('dist.male.first','r') male_first_names = [name.strip().split()[0] for name in", "gps_long, gps_acc, path, timestamp sql_cmd = cmd + \"(%d, %.6f,", "ignore other fields nmsg = NodeMessage() nmsg.source = random.randint(0,31) #", "bio sex = random.randint(0,1) if sex == 0: # male", "0, 0) db._sql(sql_cmd) # people def gen_people(n, db, start_lat, stop_lat,", "# message (message, status, location, etc.) # location lat =", "= NodeMessage() nmsg.source = random.randint(0,31) # random node 0-31 nmsg.data", "stop_lat, start_long, stop_long): \"\"\" Generates n people, random male/female ratio", "f.readlines()] f.close() # generate people for i in range(n): catanDBObj", "catanDBObj.pack() db.update_db(n) def populate_db(): db = catan.db.CatanDatabase(0) # insert some", "catan.db from catan.data import NodeMessage # test data STATUS_LIST =", "for i in range(n): catanDBObj = catan.db.CatanDatabaseObject() # bio sex", "file f = open('dist.female.first','r') female_first_names = [name.strip().split()[0] for name in", "fields nmsg = NodeMessage() nmsg.source = random.randint(0,31) # random node", "# open female first names file f = open('dist.female.first','r') female_first_names", "round(random.uniform(start_long, stop_long), 6) # node_id, gps_lat, gps_long, gps_acc, path, timestamp", "' 'db_person_messages.submission_id, ' 'db_person_messages.origin_node_id, ' 'db_person_messages.status_gps_latitude, ' 'db_person_messages.status_gps_longitude, ' 'db_person_messages.status_gps_accuracy,", "catanDBObj.person_message.status_gps_longitude = lng catanDBObj.person_message.status_gps_accuracy = 0 # status catanDBObj.person_message.status =", "name in f.readlines()] f.close() # open last names file f", "'Test status location' # generate a NodeMessage for the database", "round(random.uniform(start_lat, stop_lat), 6) lng = round(random.uniform(start_long, stop_long), 6) # node_id,", "cmd = \"INSERT INTO catan_nodes VALUES \" # generate n", "nodes def gen_nodes(n, db, start_lat, stop_lat, start_long, stop_long): assert n", "6) catanDBObj.person_message.person_message = 'Location update 1' catanDBObj.person_message.status_gps_latitude = lat catanDBObj.person_message.status_gps_longitude", "lat, long lat = round(random.uniform(start_lat, stop_lat), 6) lng = round(random.uniform(start_long,", "long lat = round(random.uniform(start_lat, stop_lat), 6) lng = round(random.uniform(start_long, stop_long),", "= round(random.uniform(start_lat, stop_lat), 6) lng = round(random.uniform(start_long, stop_long), 6) #", "#!/usr/bin/env python \"\"\" @author <NAME> © 2015 Massachusetts Institute of", "Create some random updates for i in range(1,n+1): update =", "range(1,n+1): update = random.randint(0,1) if update == 0: catanDBObj =", "' 'db_person_bio.sex, ' 'db_person_messages.submission_id, ' 'db_person_messages.origin_node_id, ' 'db_person_messages.status_gps_latitude, ' 'db_person_messages.status_gps_longitude,", "catanDBObj.person_message.person_message = 'Location update 1' catanDBObj.person_message.status_gps_latitude = lat catanDBObj.person_message.status_gps_longitude =", "db_person_bio.person_id ' 'LEFT JOIN db_submitter_info ON db_submitter_info.submission_id = db_person_messages.submission_id') for", "names file f = open('dist.female.first','r') female_first_names = [name.strip().split()[0] for name", "# random lat, long lat = round(random.uniform(start_lat, stop_lat), 6) lng", "some random updates for i in range(1,n+1): update = random.randint(0,1)", "and 90 years of age \"\"\" assert n > 0", "ON db_person_messages.person_id = db_person_bio.person_id ' 'LEFT JOIN db_submitter_info ON db_submitter_info.submission_id", "else: # female catanDBObj.person_bio.name_given = female_first_names[random.randint(0,len(female_first_names)-1)] catanDBObj.person_bio.sex = 'female' catanDBObj.person_bio.name_family", "'LEFT JOIN db_submitter_info ON db_submitter_info.submission_id = db_person_messages.submission_id') for r in", "gps_acc, path, timestamp sql_cmd = cmd + \"(%d, %.6f, %.6f,", "= round(random.uniform(start_long, stop_long), 6) # node_id, gps_lat, gps_long, gps_acc, path,", "Institute of Technology \"\"\" import argparse import random import catan.db", "# open male first names file f = open('dist.male.first','r') male_first_names", "so we can ignore other fields nmsg = NodeMessage() nmsg.source", "90 years of age \"\"\" assert n > 0 #", "db_person_messages.submission_id') for r in db._sql(cmd).fetchall(): print r def main(args): pass", "= ('SELECT ' 'db_person_bio.person_id, ' 'db_person_bio.origin_node_id, ' 'db_person_bio.name_family, ' 'db_person_bio.name_given,", "lat catanDBObj.person_message.status_gps_longitude = lng catanDBObj.person_message.status_gps_accuracy = 0 n = NodeMessage()", "n random nodes, centered around Cambridge for i in range(n):", "db = catan.db.CatanDatabase(0) # insert some test nodes # for", "sql_cmd = cmd + \"(%d, %.6f, %.6f, %.6f, %.6f, %.6f)\"", "' 'db_person_messages.origin_node_id, ' 'db_person_messages.status_gps_latitude, ' 'db_person_messages.status_gps_longitude, ' 'db_person_messages.status_gps_accuracy, ' 'db_person_messages.status,", "' 'db_person_messages.status, ' 'db_person_messages.status_location, ' 'db_submitter_info.timestamp ' 'FROM db_person_bio '", "can ignore other fields nmsg = NodeMessage() nmsg.source = random.randint(0,31)", "age \"\"\" assert n > 0 # open male first", "0: # male catanDBObj.person_bio.name_given = male_first_names[random.randint(0,len(male_first_names)-1)] catanDBObj.person_bio.sex = 'male' else:", "round(random.uniform(start_long, stop_long), 6) catanDBObj.person_message.person_message = 'Location update 1' catanDBObj.person_message.status_gps_latitude =", "names file f = open('dist.male.first','r') male_first_names = [name.strip().split()[0] for name", "catanDBObj.person_bio.age = random.randint(5,90) # message (message, status, location, etc.) #", "f = open('dist.female.first','r') female_first_names = [name.strip().split()[0] for name in f.readlines()]", "family_names = [name.strip().split()[0] for name in f.readlines()] f.close() # generate", "= [name.strip().split()[0] for name in f.readlines()] f.close() # generate people", "about the data and source fields, so we can ignore", "%.6f, %.6f, %.6f, %.6f)\" % (i, lat, lng, 0, 0,", "'Location update 1' catanDBObj.person_message.status_gps_latitude = lat catanDBObj.person_message.status_gps_longitude = lng catanDBObj.person_message.status_gps_accuracy", "# generate people for i in range(n): catanDBObj = catan.db.CatanDatabaseObject()", "def populate_db(): db = catan.db.CatanDatabase(0) # insert some test nodes", "> 0 cmd = \"INSERT INTO catan_nodes VALUES \" #", "catan.db.CatanDatabaseObject() # bio sex = random.randint(0,1) if sex == 0:", "range(n): catanDBObj = catan.db.CatanDatabaseObject() # bio sex = random.randint(0,1) if", "= lng catanDBObj.person_message.status_gps_accuracy = 0 n = NodeMessage() n.source =", "the database # it only cares about the data and", "catanDBObj.person_message.status = STATUS_LIST[random.randint(0,len(STATUS_LIST)-1)] catanDBObj.person_message.status_location = 'Test status location' # generate", "catan.data import NodeMessage # test data STATUS_LIST = ['ok', 'injured',", "in range(n): catanDBObj = catan.db.CatanDatabaseObject() # bio sex = random.randint(0,1)", "for r in db._sql(cmd).fetchall(): print r def main(args): pass if", "= 0 # status catanDBObj.person_message.status = STATUS_LIST[random.randint(0,len(STATUS_LIST)-1)] catanDBObj.person_message.status_location = 'Test", "'deceased'] # nodes def gen_nodes(n, db, start_lat, stop_lat, start_long, stop_long):", "random.randint(0,31) # random node 0-31 nmsg.data = catanDBObj.pack() db.update_db(nmsg) #", "lat, lng, 0, 0, 0) db._sql(sql_cmd) # people def gen_people(n,", "= male_first_names[random.randint(0,len(male_first_names)-1)] catanDBObj.person_bio.sex = 'male' else: # female catanDBObj.person_bio.name_given =", "nmsg = NodeMessage() nmsg.source = random.randint(0,31) # random node 0-31", "argparse import random import catan.db from catan.data import NodeMessage #", "= random.randint(0,31) n.data = catanDBObj.pack() db.update_db(n) def populate_db(): db =", "['ok', 'injured', 'deceased'] # nodes def gen_nodes(n, db, start_lat, stop_lat,", "open('dist.female.first','r') female_first_names = [name.strip().split()[0] for name in f.readlines()] f.close() #", "n.source = random.randint(0,31) n.data = catanDBObj.pack() db.update_db(n) def populate_db(): db", "cambridge gen_nodes(32, db, 42.354823, 42.368315, -71.114484, -71.084422) gen_people(100, db, 42.354823,", "catanDBObj.person_message.person_message = 'Hi Mom' catanDBObj.person_message.status_gps_latitude = lat catanDBObj.person_message.status_gps_longitude = lng", "timestamp sql_cmd = cmd + \"(%d, %.6f, %.6f, %.6f, %.6f,", "ratio between 5 and 90 years of age \"\"\" assert", "f = open('dist.male.first','r') male_first_names = [name.strip().split()[0] for name in f.readlines()]", "open female first names file f = open('dist.female.first','r') female_first_names =", "last names file f = open('dist.all.last','r') family_names = [name.strip().split()[0] for", "gen_people(100, db, 42.354823, 42.368315, -71.114484, -71.084422) cmd = ('SELECT '", "n > 0 # open male first names file f", "[name.strip().split()[0] for name in f.readlines()] f.close() # open last names", "6) # node_id, gps_lat, gps_long, gps_acc, path, timestamp sql_cmd =", "f.readlines()] f.close() # open last names file f = open('dist.all.last','r')", "' 'db_person_bio.person_id, ' 'db_person_bio.origin_node_id, ' 'db_person_bio.name_family, ' 'db_person_bio.name_given, ' 'db_person_bio.age,", "'db_person_messages.status, ' 'db_person_messages.status_location, ' 'db_submitter_info.timestamp ' 'FROM db_person_bio ' 'LEFT", "%.6f)\" % (i, lat, lng, 0, 0, 0) db._sql(sql_cmd) #", "STATUS_LIST[random.randint(0,len(STATUS_LIST)-1)] catanDBObj.person_message.status_location = 'Test status location' # generate a NodeMessage", "other fields nmsg = NodeMessage() nmsg.source = random.randint(0,31) # random", "etc.) # location lat = round(random.uniform(start_lat, stop_lat), 6) lng =", "= round(random.uniform(start_long, stop_long), 6) catanDBObj.person_message.person_message = 'Location update 1' catanDBObj.person_message.status_gps_latitude", "# Create some random updates for i in range(1,n+1): update", "a NodeMessage for the database # it only cares about", "= [name.strip().split()[0] for name in f.readlines()] f.close() # open last", "# it only cares about the data and source fields,", "' 'db_person_bio.age, ' 'db_person_bio.sex, ' 'db_person_messages.submission_id, ' 'db_person_messages.origin_node_id, ' 'db_person_messages.status_gps_latitude,", "0 # open male first names file f = open('dist.male.first','r')", "stop_long): \"\"\" Generates n people, random male/female ratio between 5", "# generate n random nodes, centered around Cambridge for i", "# female catanDBObj.person_bio.name_given = female_first_names[random.randint(0,len(female_first_names)-1)] catanDBObj.person_bio.sex = 'female' catanDBObj.person_bio.name_family =", "'male' else: # female catanDBObj.person_bio.name_given = female_first_names[random.randint(0,len(female_first_names)-1)] catanDBObj.person_bio.sex = 'female'", "gen_nodes(32, db, 42.354823, 42.368315, -71.114484, -71.084422) gen_people(100, db, 42.354823, 42.368315,", "= i # location lat = round(random.uniform(start_lat, stop_lat), 6) lng", "6) lng = round(random.uniform(start_long, stop_long), 6) catanDBObj.person_message.person_message = 'Location update", "STATUS_LIST = ['ok', 'injured', 'deceased'] # nodes def gen_nodes(n, db,", "= db_person_messages.submission_id') for r in db._sql(cmd).fetchall(): print r def main(args):", "lng = round(random.uniform(start_long, stop_long), 6) catanDBObj.person_message.person_message = 'Hi Mom' catanDBObj.person_message.status_gps_latitude", "# bio sex = random.randint(0,1) if sex == 0: #", "catan_nodes VALUES \" # generate n random nodes, centered around", "= lat catanDBObj.person_message.status_gps_longitude = lng catanDBObj.person_message.status_gps_accuracy = 0 # status", "catanDBObj.person_bio.name_family = family_names[random.randint(0,len(family_names)-1)] catanDBObj.person_bio.age = random.randint(5,90) # message (message, status,", "location, etc.) # location lat = round(random.uniform(start_lat, stop_lat), 6) lng", "catanDBObj.person_bio.sex = 'male' else: # female catanDBObj.person_bio.name_given = female_first_names[random.randint(0,len(female_first_names)-1)] catanDBObj.person_bio.sex", "sex == 0: # male catanDBObj.person_bio.name_given = male_first_names[random.randint(0,len(male_first_names)-1)] catanDBObj.person_bio.sex =", "' 'db_person_messages.status_location, ' 'db_submitter_info.timestamp ' 'FROM db_person_bio ' 'LEFT JOIN", "status, location, etc.) # location lat = round(random.uniform(start_lat, stop_lat), 6)", "generate people for i in range(n): catanDBObj = catan.db.CatanDatabaseObject() #", "% (i, lat, lng, 0, 0, 0) db._sql(sql_cmd) # people", "\"\"\" assert n > 0 # open male first names", "names file f = open('dist.all.last','r') family_names = [name.strip().split()[0] for name", "= family_names[random.randint(0,len(family_names)-1)] catanDBObj.person_bio.age = random.randint(5,90) # message (message, status, location,", "Technology \"\"\" import argparse import random import catan.db from catan.data", "6) lng = round(random.uniform(start_long, stop_long), 6) # node_id, gps_lat, gps_long,", "nodes # for cambridge gen_nodes(32, db, 42.354823, 42.368315, -71.114484, -71.084422)", "for name in f.readlines()] f.close() # generate people for i", "n people, random male/female ratio between 5 and 90 years", "= 'Hi Mom' catanDBObj.person_message.status_gps_latitude = lat catanDBObj.person_message.status_gps_longitude = lng catanDBObj.person_message.status_gps_accuracy", "i in range(1,n+1): update = random.randint(0,1) if update == 0:", "db, 42.354823, 42.368315, -71.114484, -71.084422) cmd = ('SELECT ' 'db_person_bio.person_id,", "42.368315, -71.114484, -71.084422) cmd = ('SELECT ' 'db_person_bio.person_id, ' 'db_person_bio.origin_node_id,", "catanDBObj = catan.db.CatanDatabaseObject() catanDBObj.person_id = i # location lat =", "Cambridge for i in range(n): # random lat, long lat", "= open('dist.male.first','r') male_first_names = [name.strip().split()[0] for name in f.readlines()] f.close()", "6) lng = round(random.uniform(start_long, stop_long), 6) catanDBObj.person_message.person_message = 'Hi Mom'", "random.randint(0,1) if sex == 0: # male catanDBObj.person_bio.name_given = male_first_names[random.randint(0,len(male_first_names)-1)]", "catanDBObj.person_message.status_location = 'Test status location' # generate a NodeMessage for", "= random.randint(0,31) # random node 0-31 nmsg.data = catanDBObj.pack() db.update_db(nmsg)", "1' catanDBObj.person_message.status_gps_latitude = lat catanDBObj.person_message.status_gps_longitude = lng catanDBObj.person_message.status_gps_accuracy = 0", "'db_person_bio.sex, ' 'db_person_messages.submission_id, ' 'db_person_messages.origin_node_id, ' 'db_person_messages.status_gps_latitude, ' 'db_person_messages.status_gps_longitude, '", "= 'male' else: # female catanDBObj.person_bio.name_given = female_first_names[random.randint(0,len(female_first_names)-1)] catanDBObj.person_bio.sex =", "42.368315, -71.114484, -71.084422) gen_people(100, db, 42.354823, 42.368315, -71.114484, -71.084422) cmd", "[name.strip().split()[0] for name in f.readlines()] f.close() # generate people for", "update = random.randint(0,1) if update == 0: catanDBObj = catan.db.CatanDatabaseObject()", "for name in f.readlines()] f.close() # open last names file", "stop_lat, start_long, stop_long): assert n > 0 cmd = \"INSERT", "people for i in range(n): catanDBObj = catan.db.CatanDatabaseObject() # bio", "= catanDBObj.pack() db.update_db(n) def populate_db(): db = catan.db.CatanDatabase(0) # insert", "random nodes, centered around Cambridge for i in range(n): #", "= NodeMessage() n.source = random.randint(0,31) n.data = catanDBObj.pack() db.update_db(n) def", "db_person_messages ON db_person_messages.person_id = db_person_bio.person_id ' 'LEFT JOIN db_submitter_info ON", "generate a NodeMessage for the database # it only cares", "female_first_names[random.randint(0,len(female_first_names)-1)] catanDBObj.person_bio.sex = 'female' catanDBObj.person_bio.name_family = family_names[random.randint(0,len(family_names)-1)] catanDBObj.person_bio.age = random.randint(5,90)", "around Cambridge for i in range(n): # random lat, long", "= lng catanDBObj.person_message.status_gps_accuracy = 0 # status catanDBObj.person_message.status = STATUS_LIST[random.randint(0,len(STATUS_LIST)-1)]", "some test nodes # for cambridge gen_nodes(32, db, 42.354823, 42.368315,", "Generates n people, random male/female ratio between 5 and 90", "centered around Cambridge for i in range(n): # random lat,", "male catanDBObj.person_bio.name_given = male_first_names[random.randint(0,len(male_first_names)-1)] catanDBObj.person_bio.sex = 'male' else: # female" ]
[ "# if x + 1 < m and grid[x+1][y] ==", "grid: List[List[str]] :rtype: int \"\"\" def sink(i, j): if 0", "from discussion. # The following is another easy understanding idea:", "self.island(i, j, grid, m, n) # return res # def", "of islands. An island is surrounded by water and is", "# # Example 2: # 11000 # 11000 # 00100", "surrounded by water and is formed by connecting adjacent lands", "i-1, i, i), (j, j, j+1, j-1)) return 1 return", "grid[x-1][y] = '2' # self.island(x-1,y,grid, m, n) # if y", "discussion. # The following is another easy understanding idea: #", "00011 # Answer: 3 # # Version: 1.0 # 11/13/17", "Version: 1.0 # 11/13/17 by Jianfa # ------------------------------ class Solution(object):", "# def numIslands(self, grid): # \"\"\" # :type grid: List[List[str]]", "len(grid) == 0: return 0 # m = len(grid) #", "== '1': # grid[x-1][y] = '2' # self.island(x-1,y,grid, m, n)", "'1': # grid[x-1][y] = '2' # self.island(x-1,y,grid, m, n) #", "# \"\"\" # :type grid: List[List[str]] # :rtype: int #", "len(grid) and 0 <= j < len(grid[0]) and grid[i][j] ==", "are all surrounded by water. # # Example 1: #", "j-1)) return 1 return 0 return sum(sink(i, j) for i", "n): # if x + 1 < m and grid[x+1][y]", "the number of islands. An island is surrounded by water", "+= 1 # grid[i][j] = '2' # self.island(i, j, grid,", "# :type grid: List[List[str]] # :rtype: int # \"\"\" #", "lands horizontally or vertically. You may assume all four edges", "11110 # 11010 # 11000 # 00000 # Answer: 1", "# 200. Number of Islands # # Description: # Given", "len(grid[0]) # res = 0 # for i in range(m):", "if grid[i][j] == '1': # res += 1 # grid[i][j]", "# Summary: # Copied from discussion. # The following is", "\"\"\" :type grid: List[List[str]] :rtype: int \"\"\" def sink(i, j):", "grid[x+1][y] == '1': # grid[x+1][y] = '2' # self.island(x+1,y,grid, m,", "of Islands # # Description: # Given a 2d grid", "# if x -1 >=0 and grid[x-1][y] == '1': #", "m, n) # if y + 1 < n and", "count the number of islands. An island is surrounded by", "return 0 # m = len(grid) # n = len(grid[0])", "if y - 1 >= 0 and grid[x][y-1] == '1':", "return sum(sink(i, j) for i in range(len(grid)) for j in", "j < len(grid[0]) and grid[i][j] == \"1\": grid[i][j] = \"0\"", "0 <= j < len(grid[0]) and grid[i][j] == \"1\": grid[i][j]", "# # Example 1: # 11110 # 11010 # 11000", "\"0\" map(sink, (i+1, i-1, i, i), (j, j, j+1, j-1))", "# # class Solution(object): # def numIslands(self, grid): # \"\"\"", "grid): \"\"\" :type grid: List[List[str]] :rtype: int \"\"\" def sink(i,", "grid[x][y+1] == '1': # grid[x][y+1] = '2' # self.island(x,y+1,grid, m,", "sink(i, j): if 0 <= i < len(grid) and 0", "= len(grid[0]) # res = 0 # for i in", "'1': # grid[x+1][y] = '2' # self.island(x+1,y,grid, m, n) #", "(i+1, i-1, i, i), (j, j, j+1, j-1)) return 1", "connecting adjacent lands horizontally or vertically. You may assume all", "0 # m = len(grid) # n = len(grid[0]) #", "2: # 11000 # 11000 # 00100 # 00011 #", "Description: # Given a 2d grid map of '1's (land)", "i), (j, j, j+1, j-1)) return 1 return 0 return", "# 11/13/17 by Jianfa # ------------------------------ class Solution(object): def numIslands(self,", "# 11110 # 11010 # 11000 # 00000 # Answer:", ":type grid: List[List[str]] :rtype: int \"\"\" def sink(i, j): if", "# class Solution(object): # def numIslands(self, grid): # \"\"\" #", "grid[x][y+1] = '2' # self.island(x,y+1,grid, m, n) # if x", "grid[i][j] == \"1\": grid[i][j] = \"0\" map(sink, (i+1, i-1, i,", "Example 1: # 11110 # 11010 # 11000 # 00000", "0 return sum(sink(i, j) for i in range(len(grid)) for j", "# for i in range(m): # for j in range(n):", "Jianfa # ------------------------------ class Solution(object): def numIslands(self, grid): \"\"\" :type", "Islands # # Description: # Given a 2d grid map", "m, n) # if y - 1 >= 0 and", "of the grid are all surrounded by water. # #", "'1's (land) and '0's (water), count the number of islands.", "self.island(x+1,y,grid, m, n) # if y + 1 < n", "number of islands. An island is surrounded by water and", "horizontally or vertically. You may assume all four edges of", "'2' # self.island(i, j, grid, m, n) # return res", "# ------------------------------ # 200. Number of Islands # # Description:", "= 0 # for i in range(m): # for j", "n and grid[x][y+1] == '1': # grid[x][y+1] = '2' #", "a 2d grid map of '1's (land) and '0's (water),", ":type grid: List[List[str]] # :rtype: int # \"\"\" # if", "in range(m): # for j in range(n): # if grid[i][j]", "res # def island(self, x, y, grid, m, n): #", "200. Number of Islands # # Description: # Given a", "00000 # Answer: 1 # # Example 2: # 11000", "# self.island(x-1,y,grid, m, n) # if y - 1 >=", "List[List[str]] :rtype: int \"\"\" def sink(i, j): if 0 <=", "# for j in range(n): # if grid[i][j] == '1':", "by water. # # Example 1: # 11110 # 11010", "return 1 return 0 return sum(sink(i, j) for i in", "class Solution(object): # def numIslands(self, grid): # \"\"\" # :type", "# 00000 # Answer: 1 # # Example 2: #", "00100 # 00011 # Answer: 3 # # Version: 1.0", "0: return 0 # m = len(grid) # n =", "# return res # def island(self, x, y, grid, m,", "# :rtype: int # \"\"\" # if len(grid) == 0:", "y - 1 >= 0 and grid[x][y-1] == '1': #", "i in range(m): # for j in range(n): # if", "List[List[str]] # :rtype: int # \"\"\" # if len(grid) ==", "n) # return res # def island(self, x, y, grid,", "1 < m and grid[x+1][y] == '1': # grid[x+1][y] =", "assume all four edges of the grid are all surrounded", "def sink(i, j): if 0 <= i < len(grid) and", "n) # if y + 1 < n and grid[x][y+1]", "of '1's (land) and '0's (water), count the number of", "in range(len(grid)) for j in range(len(grid[i]))) # ------------------------------ # Summary:", "Solution(object): # def numIslands(self, grid): # \"\"\" # :type grid:", "grid: List[List[str]] # :rtype: int # \"\"\" # if len(grid)", "numIslands(self, grid): \"\"\" :type grid: List[List[str]] :rtype: int \"\"\" def", "'2' # self.island(x-1,y,grid, m, n) # if y - 1", "m = len(grid) # n = len(grid[0]) # res =", "\"1\": grid[i][j] = \"0\" map(sink, (i+1, i-1, i, i), (j,", "# 00011 # Answer: 3 # # Version: 1.0 #", "# Version: 1.0 # 11/13/17 by Jianfa # ------------------------------ class", "'0's (water), count the number of islands. An island is", "j, grid, m, n) # return res # def island(self,", "grid, m, n): # if x + 1 < m", "following is another easy understanding idea: # # class Solution(object):", "< m and grid[x+1][y] == '1': # grid[x+1][y] = '2'", "and grid[x+1][y] == '1': # grid[x+1][y] = '2' # self.island(x+1,y,grid,", "1 >= 0 and grid[x][y-1] == '1': # grid[x][y-1] =", "if len(grid) == 0: return 0 # m = len(grid)", "# grid[x+1][y] = '2' # self.island(x+1,y,grid, m, n) # if", "'2' # self.island(x,y+1,grid, m, n) # if x -1 >=0", "'1': # grid[x][y+1] = '2' # self.island(x,y+1,grid, m, n) #", "------------------------------ # Summary: # Copied from discussion. # The following", "# if len(grid) == 0: return 0 # m =", "# grid[i][j] = '2' # self.island(i, j, grid, m, n)", "# Description: # Given a 2d grid map of '1's", "for j in range(n): # if grid[i][j] == '1': #", "x -1 >=0 and grid[x-1][y] == '1': # grid[x-1][y] =", "= '2' # self.island(x-1,y,grid, m, n) # if y -", "the grid are all surrounded by water. # # Example", "self.island(x,y+1,grid, m, n) # if x -1 >=0 and grid[x-1][y]", "Example 2: # 11000 # 11000 # 00100 # 00011", "grid[x-1][y] == '1': # grid[x-1][y] = '2' # self.island(x-1,y,grid, m,", "grid[i][j] = \"0\" map(sink, (i+1, i-1, i, i), (j, j,", "# Answer: 3 # # Version: 1.0 # 11/13/17 by", "easy understanding idea: # # class Solution(object): # def numIslands(self,", "grid[i][j] = '2' # self.island(i, j, grid, m, n) #", "m, n): # if x + 1 < m and", ":rtype: int \"\"\" def sink(i, j): if 0 <= i", "(land) and '0's (water), count the number of islands. An", "is formed by connecting adjacent lands horizontally or vertically. You", "(j, j, j+1, j-1)) return 1 return 0 return sum(sink(i,", "# self.island(x+1,y,grid, m, n) # if y + 1 <", "# 11000 # 11000 # 00100 # 00011 # Answer:", "# # Description: # Given a 2d grid map of", "by connecting adjacent lands horizontally or vertically. You may assume", "# res = 0 # for i in range(m): #", "# 11000 # 00100 # 00011 # Answer: 3 #", "m and grid[x+1][y] == '1': # grid[x+1][y] = '2' #", "Answer: 3 # # Version: 1.0 # 11/13/17 by Jianfa", "if x -1 >=0 and grid[x-1][y] == '1': # grid[x-1][y]", "== '1': # grid[x][y+1] = '2' # self.island(x,y+1,grid, m, n)", "<= j < len(grid[0]) and grid[i][j] == \"1\": grid[i][j] =", "- 1 >= 0 and grid[x][y-1] == '1': # grid[x][y-1]", "n = len(grid[0]) # res = 0 # for i", "1 # # Example 2: # 11000 # 11000 #", "# Example 1: # 11110 # 11010 # 11000 #", "grid[i][j] == '1': # res += 1 # grid[i][j] =", "# Given a 2d grid map of '1's (land) and", "# self.island(x,y+1,grid, m, n) # if x -1 >=0 and", "== '1': # res += 1 # grid[i][j] = '2'", "An island is surrounded by water and is formed by", "def island(self, x, y, grid, m, n): # if x", "Answer: 1 # # Example 2: # 11000 # 11000", "11000 # 11000 # 00100 # 00011 # Answer: 3", "You may assume all four edges of the grid are", "= \"0\" map(sink, (i+1, i-1, i, i), (j, j, j+1,", "by water and is formed by connecting adjacent lands horizontally", "water. # # Example 1: # 11110 # 11010 #", "1 return 0 return sum(sink(i, j) for i in range(len(grid))", "and '0's (water), count the number of islands. An island", "# # Version: 1.0 # 11/13/17 by Jianfa # ------------------------------", "by Jianfa # ------------------------------ class Solution(object): def numIslands(self, grid): \"\"\"", "0 and grid[x][y-1] == '1': # grid[x][y-1] = '2' #", "may assume all four edges of the grid are all", "formed by connecting adjacent lands horizontally or vertically. You may", "m, n) # if x -1 >=0 and grid[x-1][y] ==", "<gh_stars>1-10 # ------------------------------ # 200. Number of Islands # #", "in range(len(grid[i]))) # ------------------------------ # Summary: # Copied from discussion.", "# ------------------------------ class Solution(object): def numIslands(self, grid): \"\"\" :type grid:", "# n = len(grid[0]) # res = 0 # for", "len(grid) # n = len(grid[0]) # res = 0 #", "in range(n): # if grid[i][j] == '1': # res +=", "# def island(self, x, y, grid, m, n): # if", "11/13/17 by Jianfa # ------------------------------ class Solution(object): def numIslands(self, grid):", "and grid[x-1][y] == '1': # grid[x-1][y] = '2' # self.island(x-1,y,grid,", "another easy understanding idea: # # class Solution(object): # def", "m, n) # return res # def island(self, x, y,", "Given a 2d grid map of '1's (land) and '0's", "grid are all surrounded by water. # # Example 1:", "= '2' # self.island(x+1,y,grid, m, n) # if y +", "< len(grid) and 0 <= j < len(grid[0]) and grid[i][j]", "range(len(grid)) for j in range(len(grid[i]))) # ------------------------------ # Summary: #", "j+1, j-1)) return 1 return 0 return sum(sink(i, j) for", "j) for i in range(len(grid)) for j in range(len(grid[i]))) #", "water and is formed by connecting adjacent lands horizontally or", "+ 1 < m and grid[x+1][y] == '1': # grid[x+1][y]", "------------------------------ class Solution(object): def numIslands(self, grid): \"\"\" :type grid: List[List[str]]", "# 11000 # 00000 # Answer: 1 # # Example", "understanding idea: # # class Solution(object): # def numIslands(self, grid):", "= '2' # self.island(i, j, grid, m, n) # return", "adjacent lands horizontally or vertically. You may assume all four", "== 0: return 0 # m = len(grid) # n", "grid[x+1][y] = '2' # self.island(x+1,y,grid, m, n) # if y", "3 # # Version: 1.0 # 11/13/17 by Jianfa #", "\"\"\" # if len(grid) == 0: return 0 # m", "range(len(grid[i]))) # ------------------------------ # Summary: # Copied from discussion. #", "(water), count the number of islands. An island is surrounded", "n) # if x -1 >=0 and grid[x-1][y] == '1':", "# grid[x-1][y] = '2' # self.island(x-1,y,grid, m, n) # if", "self.island(x-1,y,grid, m, n) # if y - 1 >= 0", "y + 1 < n and grid[x][y+1] == '1': #", "and grid[x][y+1] == '1': # grid[x][y+1] = '2' # self.island(x,y+1,grid,", "\"\"\" def sink(i, j): if 0 <= i < len(grid)", "j): if 0 <= i < len(grid) and 0 <=", "res = 0 # for i in range(m): # for", "four edges of the grid are all surrounded by water.", "i, i), (j, j, j+1, j-1)) return 1 return 0", "# if y - 1 >= 0 and grid[x][y-1] ==", "# Answer: 1 # # Example 2: # 11000 #", "# grid[x][y+1] = '2' # self.island(x,y+1,grid, m, n) # if", "return res # def island(self, x, y, grid, m, n):", "island is surrounded by water and is formed by connecting", "for j in range(len(grid[i]))) # ------------------------------ # Summary: # Copied", "-1 >=0 and grid[x-1][y] == '1': # grid[x-1][y] = '2'", "i < len(grid) and 0 <= j < len(grid[0]) and", "# if y + 1 < n and grid[x][y+1] ==", "j in range(n): # if grid[i][j] == '1': # res", "range(m): # for j in range(n): # if grid[i][j] ==", "edges of the grid are all surrounded by water. #", "\"\"\" # :type grid: List[List[str]] # :rtype: int # \"\"\"", "or vertically. You may assume all four edges of the", "def numIslands(self, grid): # \"\"\" # :type grid: List[List[str]] #", "idea: # # class Solution(object): # def numIslands(self, grid): #", "and grid[x][y-1] == '1': # grid[x][y-1] = '2' # self.island(x,y-1,grid,", "== \"1\": grid[i][j] = \"0\" map(sink, (i+1, i-1, i, i),", "int # \"\"\" # if len(grid) == 0: return 0", "# The following is another easy understanding idea: # #", "# if grid[i][j] == '1': # res += 1 #", "i in range(len(grid)) for j in range(len(grid[i]))) # ------------------------------ #", "# self.island(i, j, grid, m, n) # return res #", "Summary: # Copied from discussion. # The following is another", "# ------------------------------ # Summary: # Copied from discussion. # The", "class Solution(object): def numIslands(self, grid): \"\"\" :type grid: List[List[str]] :rtype:", "1 # grid[i][j] = '2' # self.island(i, j, grid, m,", "if 0 <= i < len(grid) and 0 <= j", "is surrounded by water and is formed by connecting adjacent", "grid[x][y-1] == '1': # grid[x][y-1] = '2' # self.island(x,y-1,grid, m,", "1.0 # 11/13/17 by Jianfa # ------------------------------ class Solution(object): def", "------------------------------ # 200. Number of Islands # # Description: #", "y, grid, m, n): # if x + 1 <", "for i in range(m): # for j in range(n): #", "# 00100 # 00011 # Answer: 3 # # Version:", "if y + 1 < n and grid[x][y+1] == '1':", "for i in range(len(grid)) for j in range(len(grid[i]))) # ------------------------------", "1 < n and grid[x][y+1] == '1': # grid[x][y+1] =", "is another easy understanding idea: # # class Solution(object): #", "0 # for i in range(m): # for j in", "j in range(len(grid[i]))) # ------------------------------ # Summary: # Copied from", "= len(grid) # n = len(grid[0]) # res = 0", "grid, m, n) # return res # def island(self, x,", "1: # 11110 # 11010 # 11000 # 00000 #", "res += 1 # grid[i][j] = '2' # self.island(i, j,", "# m = len(grid) # n = len(grid[0]) # res", "island(self, x, y, grid, m, n): # if x +", "= '2' # self.island(x,y+1,grid, m, n) # if x -1", "if x + 1 < m and grid[x+1][y] == '1':", "Number of Islands # # Description: # Given a 2d", "def numIslands(self, grid): \"\"\" :type grid: List[List[str]] :rtype: int \"\"\"", "# Example 2: # 11000 # 11000 # 00100 #", "11000 # 00100 # 00011 # Answer: 3 # #", ">=0 and grid[x-1][y] == '1': # grid[x-1][y] = '2' #", ":rtype: int # \"\"\" # if len(grid) == 0: return", ">= 0 and grid[x][y-1] == '1': # grid[x][y-1] = '2'", "Copied from discussion. # The following is another easy understanding", "sum(sink(i, j) for i in range(len(grid)) for j in range(len(grid[i])))", "Solution(object): def numIslands(self, grid): \"\"\" :type grid: List[List[str]] :rtype: int", "all four edges of the grid are all surrounded by", "# \"\"\" # if len(grid) == 0: return 0 #", "11010 # 11000 # 00000 # Answer: 1 # #", "surrounded by water. # # Example 1: # 11110 #", "map(sink, (i+1, i-1, i, i), (j, j, j+1, j-1)) return", "== '1': # grid[x+1][y] = '2' # self.island(x+1,y,grid, m, n)", "len(grid[0]) and grid[i][j] == \"1\": grid[i][j] = \"0\" map(sink, (i+1,", "'2' # self.island(x+1,y,grid, m, n) # if y + 1", "2d grid map of '1's (land) and '0's (water), count", "The following is another easy understanding idea: # # class", "and grid[i][j] == \"1\": grid[i][j] = \"0\" map(sink, (i+1, i-1,", "11000 # 00000 # Answer: 1 # # Example 2:", "all surrounded by water. # # Example 1: # 11110", "return 0 return sum(sink(i, j) for i in range(len(grid)) for", "== '1': # grid[x][y-1] = '2' # self.island(x,y-1,grid, m, n)", "x + 1 < m and grid[x+1][y] == '1': #", "and 0 <= j < len(grid[0]) and grid[i][j] == \"1\":", "grid map of '1's (land) and '0's (water), count the", "# res += 1 # grid[i][j] = '2' # self.island(i,", "islands. An island is surrounded by water and is formed", "grid): # \"\"\" # :type grid: List[List[str]] # :rtype: int", "int \"\"\" def sink(i, j): if 0 <= i <", "< len(grid[0]) and grid[i][j] == \"1\": grid[i][j] = \"0\" map(sink,", "numIslands(self, grid): # \"\"\" # :type grid: List[List[str]] # :rtype:", "x, y, grid, m, n): # if x + 1", "# 11010 # 11000 # 00000 # Answer: 1 #", "n) # if y - 1 >= 0 and grid[x][y-1]", "<= i < len(grid) and 0 <= j < len(grid[0])", "< n and grid[x][y+1] == '1': # grid[x][y+1] = '2'", "map of '1's (land) and '0's (water), count the number", "+ 1 < n and grid[x][y+1] == '1': # grid[x][y+1]", "and is formed by connecting adjacent lands horizontally or vertically.", "# Copied from discussion. # The following is another easy", "range(n): # if grid[i][j] == '1': # res += 1", "j, j+1, j-1)) return 1 return 0 return sum(sink(i, j)", "vertically. You may assume all four edges of the grid", "'1': # res += 1 # grid[i][j] = '2' #", "0 <= i < len(grid) and 0 <= j <" ]
[ "testInitialization(self): \"\"\"Tests the initialization.\"\"\" event_formatter = fseventsd.FSEventsdEventFormatter() self.assertIsNotNone(event_formatter) def testGetFormatStringAttributeNames(self):", "the GetFormatStringAttributeNames function.\"\"\" event_formatter = fseventsd.FSEventsdEventFormatter() expected_attribute_names = [ u'event_identifier',", "for the fseventsd record event formatter.\"\"\" from __future__ import unicode_literals", "event_formatter, expected_attribute_names) # TODO: add test for GetSources. if __name__", "-*- \"\"\"Tests for the fseventsd record event formatter.\"\"\" from __future__", "function.\"\"\" event_formatter = fseventsd.FSEventsdEventFormatter() expected_attribute_names = [ u'event_identifier', u'flag_values', u'hex_flags',", "event_formatter = fseventsd.FSEventsdEventFormatter() self.assertIsNotNone(event_formatter) def testGetFormatStringAttributeNames(self): \"\"\"Tests the GetFormatStringAttributeNames function.\"\"\"", "u'hex_flags', u'path'] self._TestGetFormatStringAttributeNames( event_formatter, expected_attribute_names) # TODO: add test for", "testGetFormatStringAttributeNames(self): \"\"\"Tests the GetFormatStringAttributeNames function.\"\"\" event_formatter = fseventsd.FSEventsdEventFormatter() expected_attribute_names =", "[ u'event_identifier', u'flag_values', u'hex_flags', u'path'] self._TestGetFormatStringAttributeNames( event_formatter, expected_attribute_names) # TODO:", "# -*- coding: utf-8 -*- \"\"\"Tests for the fseventsd record", "\"\"\"Tests for the fseventsd record event formatter.\"\"\" from __future__ import", "= [ u'event_identifier', u'flag_values', u'hex_flags', u'path'] self._TestGetFormatStringAttributeNames( event_formatter, expected_attribute_names) #", "= fseventsd.FSEventsdEventFormatter() expected_attribute_names = [ u'event_identifier', u'flag_values', u'hex_flags', u'path'] self._TestGetFormatStringAttributeNames(", "plaso.formatters import fseventsd from tests.formatters import test_lib class FseventsdFormatterTest(test_lib.EventFormatterTestCase): \"\"\"Tests", "the fseventsd record event formatter.\"\"\" def testInitialization(self): \"\"\"Tests the initialization.\"\"\"", "GetFormatStringAttributeNames function.\"\"\" event_formatter = fseventsd.FSEventsdEventFormatter() expected_attribute_names = [ u'event_identifier', u'flag_values',", "class FseventsdFormatterTest(test_lib.EventFormatterTestCase): \"\"\"Tests for the fseventsd record event formatter.\"\"\" def", "# TODO: add test for GetSources. if __name__ == '__main__':", "FseventsdFormatterTest(test_lib.EventFormatterTestCase): \"\"\"Tests for the fseventsd record event formatter.\"\"\" def testInitialization(self):", "__future__ import unicode_literals import unittest from plaso.formatters import fseventsd from", "formatter.\"\"\" from __future__ import unicode_literals import unittest from plaso.formatters import", "import unicode_literals import unittest from plaso.formatters import fseventsd from tests.formatters", "unittest from plaso.formatters import fseventsd from tests.formatters import test_lib class", "for the fseventsd record event formatter.\"\"\" def testInitialization(self): \"\"\"Tests the", "initialization.\"\"\" event_formatter = fseventsd.FSEventsdEventFormatter() self.assertIsNotNone(event_formatter) def testGetFormatStringAttributeNames(self): \"\"\"Tests the GetFormatStringAttributeNames", "coding: utf-8 -*- \"\"\"Tests for the fseventsd record event formatter.\"\"\"", "def testGetFormatStringAttributeNames(self): \"\"\"Tests the GetFormatStringAttributeNames function.\"\"\" event_formatter = fseventsd.FSEventsdEventFormatter() expected_attribute_names", "u'path'] self._TestGetFormatStringAttributeNames( event_formatter, expected_attribute_names) # TODO: add test for GetSources.", "unicode_literals import unittest from plaso.formatters import fseventsd from tests.formatters import", "fseventsd record event formatter.\"\"\" def testInitialization(self): \"\"\"Tests the initialization.\"\"\" event_formatter", "expected_attribute_names = [ u'event_identifier', u'flag_values', u'hex_flags', u'path'] self._TestGetFormatStringAttributeNames( event_formatter, expected_attribute_names)", "self.assertIsNotNone(event_formatter) def testGetFormatStringAttributeNames(self): \"\"\"Tests the GetFormatStringAttributeNames function.\"\"\" event_formatter = fseventsd.FSEventsdEventFormatter()", "expected_attribute_names) # TODO: add test for GetSources. if __name__ ==", "from __future__ import unicode_literals import unittest from plaso.formatters import fseventsd", "def testInitialization(self): \"\"\"Tests the initialization.\"\"\" event_formatter = fseventsd.FSEventsdEventFormatter() self.assertIsNotNone(event_formatter) def", "from plaso.formatters import fseventsd from tests.formatters import test_lib class FseventsdFormatterTest(test_lib.EventFormatterTestCase):", "record event formatter.\"\"\" from __future__ import unicode_literals import unittest from", "event_formatter = fseventsd.FSEventsdEventFormatter() expected_attribute_names = [ u'event_identifier', u'flag_values', u'hex_flags', u'path']", "utf-8 -*- \"\"\"Tests for the fseventsd record event formatter.\"\"\" from", "u'flag_values', u'hex_flags', u'path'] self._TestGetFormatStringAttributeNames( event_formatter, expected_attribute_names) # TODO: add test", "import unittest from plaso.formatters import fseventsd from tests.formatters import test_lib", "fseventsd record event formatter.\"\"\" from __future__ import unicode_literals import unittest", "python # -*- coding: utf-8 -*- \"\"\"Tests for the fseventsd", "\"\"\"Tests the initialization.\"\"\" event_formatter = fseventsd.FSEventsdEventFormatter() self.assertIsNotNone(event_formatter) def testGetFormatStringAttributeNames(self): \"\"\"Tests", "\"\"\"Tests the GetFormatStringAttributeNames function.\"\"\" event_formatter = fseventsd.FSEventsdEventFormatter() expected_attribute_names = [", "\"\"\"Tests for the fseventsd record event formatter.\"\"\" def testInitialization(self): \"\"\"Tests", "record event formatter.\"\"\" def testInitialization(self): \"\"\"Tests the initialization.\"\"\" event_formatter =", "the initialization.\"\"\" event_formatter = fseventsd.FSEventsdEventFormatter() self.assertIsNotNone(event_formatter) def testGetFormatStringAttributeNames(self): \"\"\"Tests the", "= fseventsd.FSEventsdEventFormatter() self.assertIsNotNone(event_formatter) def testGetFormatStringAttributeNames(self): \"\"\"Tests the GetFormatStringAttributeNames function.\"\"\" event_formatter", "fseventsd.FSEventsdEventFormatter() expected_attribute_names = [ u'event_identifier', u'flag_values', u'hex_flags', u'path'] self._TestGetFormatStringAttributeNames( event_formatter,", "-*- coding: utf-8 -*- \"\"\"Tests for the fseventsd record event", "the fseventsd record event formatter.\"\"\" from __future__ import unicode_literals import", "event formatter.\"\"\" from __future__ import unicode_literals import unittest from plaso.formatters", "fseventsd from tests.formatters import test_lib class FseventsdFormatterTest(test_lib.EventFormatterTestCase): \"\"\"Tests for the", "import fseventsd from tests.formatters import test_lib class FseventsdFormatterTest(test_lib.EventFormatterTestCase): \"\"\"Tests for", "fseventsd.FSEventsdEventFormatter() self.assertIsNotNone(event_formatter) def testGetFormatStringAttributeNames(self): \"\"\"Tests the GetFormatStringAttributeNames function.\"\"\" event_formatter =", "from tests.formatters import test_lib class FseventsdFormatterTest(test_lib.EventFormatterTestCase): \"\"\"Tests for the fseventsd", "u'event_identifier', u'flag_values', u'hex_flags', u'path'] self._TestGetFormatStringAttributeNames( event_formatter, expected_attribute_names) # TODO: add", "tests.formatters import test_lib class FseventsdFormatterTest(test_lib.EventFormatterTestCase): \"\"\"Tests for the fseventsd record", "self._TestGetFormatStringAttributeNames( event_formatter, expected_attribute_names) # TODO: add test for GetSources. if", "test_lib class FseventsdFormatterTest(test_lib.EventFormatterTestCase): \"\"\"Tests for the fseventsd record event formatter.\"\"\"", "event formatter.\"\"\" def testInitialization(self): \"\"\"Tests the initialization.\"\"\" event_formatter = fseventsd.FSEventsdEventFormatter()", "TODO: add test for GetSources. if __name__ == '__main__': unittest.main()", "import test_lib class FseventsdFormatterTest(test_lib.EventFormatterTestCase): \"\"\"Tests for the fseventsd record event", "formatter.\"\"\" def testInitialization(self): \"\"\"Tests the initialization.\"\"\" event_formatter = fseventsd.FSEventsdEventFormatter() self.assertIsNotNone(event_formatter)", "#!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\"Tests for the" ]
[ "[conf.trainDataPath+\"/\"+f for f in os.listdir(conf.trainDataPath) if '.jpg' in f] random.shuffle(data)", "model_checkpoint = ModelCheckpoint(conf.logPath+\"/\"+trainString+'/Checkpoint-{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', save_best_only=False, save_weights_only=True) change_lr = LearningRateScheduler(LrPolicy(conf.lr).stepDecay) tbCallBack=TensorBoard(log_dir=conf.logPath+\"/\"+trainString+'/logs',", "import DataLoader, LrPolicy from config import Config import argparse def", "validationDataLoader=DataLoader(conf.batchSize,conf.inputShape,valData,conf.guaMaxValue) print('Fitting model...') model.fit_generator(generator=trainDataLoader.generator(), validation_data=validationDataLoader.generator(), steps_per_epoch=len(trainData)//conf.batchSize, validation_steps=len(valData)//conf.batchSize, epochs=conf.epoches, verbose=1, initial_epoch=0,", "change_lr = LearningRateScheduler(LrPolicy(conf.lr).stepDecay) tbCallBack=TensorBoard(log_dir=conf.logPath+\"/\"+trainString+'/logs', histogram_freq=0, write_graph=True, write_images=True) model=models.modelCreator(conf.model,conf.inputShape,conf.classes,conf.pretrainedModel) model.compile(optimizer =", "from datetime import datetime import os import json import models", "= get_parser() args = parser.parse_args(args) conf=Config() conf.load(args.configPath) time=datetime.now().strftime('%Y-%m-%d_%H-%M-%S') trainString=\"%s_%s_%s_%s\" %", "argparse def get_parser(): parser = argparse.ArgumentParser('train') parser.add_argument('--configPath', '-c', required=True) return", "import ModelCheckpoint,Callback,LearningRateScheduler,TensorBoard from keras.models import load_model import random import numpy", "gc from keras.optimizers import Adam from imageio import imread from", "ModelCheckpoint,Callback,LearningRateScheduler,TensorBoard from keras.models import load_model import random import numpy as", "from keras.optimizers import Adam from imageio import imread from datetime", "train(args=None): parser = get_parser() args = parser.parse_args(args) conf=Config() conf.load(args.configPath) time=datetime.now().strftime('%Y-%m-%d_%H-%M-%S')", "parser.parse_args(args) conf=Config() conf.load(args.configPath) time=datetime.now().strftime('%Y-%m-%d_%H-%M-%S') trainString=\"%s_%s_%s_%s\" % (conf.model,conf.optimizer,str(conf.lr),time) os.makedirs(conf.logPath+\"/\"+trainString) conf.save(conf.logPath+\"/\"+trainString+'/config.json') print('Compiling", "data = [conf.trainDataPath+\"/\"+f for f in os.listdir(conf.trainDataPath) if '.jpg' in", "trainData=data[thr:] valData=data[:thr] trainDataLoader=DataLoader(conf.batchSize,conf.inputShape,trainData,conf.guaMaxValue) validationDataLoader=DataLoader(conf.batchSize,conf.inputShape,valData,conf.guaMaxValue) print('Fitting model...') model.fit_generator(generator=trainDataLoader.generator(), validation_data=validationDataLoader.generator(), steps_per_epoch=len(trainData)//conf.batchSize, validation_steps=len(valData)//conf.batchSize,", "import models from utils import DataLoader, LrPolicy from config import", "monitor='val_loss', save_best_only=False, save_weights_only=True) change_lr = LearningRateScheduler(LrPolicy(conf.lr).stepDecay) tbCallBack=TensorBoard(log_dir=conf.logPath+\"/\"+trainString+'/logs', histogram_freq=0, write_graph=True, write_images=True)", "conf=Config() conf.load(args.configPath) time=datetime.now().strftime('%Y-%m-%d_%H-%M-%S') trainString=\"%s_%s_%s_%s\" % (conf.model,conf.optimizer,str(conf.lr),time) os.makedirs(conf.logPath+\"/\"+trainString) conf.save(conf.logPath+\"/\"+trainString+'/config.json') print('Compiling model...')", "'.jpg' in f] random.shuffle(data) thr=int(len(data)*conf.validationSplit) trainData=data[thr:] valData=data[:thr] trainDataLoader=DataLoader(conf.batchSize,conf.inputShape,trainData,conf.guaMaxValue) validationDataLoader=DataLoader(conf.batchSize,conf.inputShape,valData,conf.guaMaxValue) print('Fitting", "in f] random.shuffle(data) thr=int(len(data)*conf.validationSplit) trainData=data[thr:] valData=data[:thr] trainDataLoader=DataLoader(conf.batchSize,conf.inputShape,trainData,conf.guaMaxValue) validationDataLoader=DataLoader(conf.batchSize,conf.inputShape,valData,conf.guaMaxValue) print('Fitting model...')", "from scipy import misc import gc from keras.optimizers import Adam", "from config import Config import argparse def get_parser(): parser =", "conf.loss) data = [conf.trainDataPath+\"/\"+f for f in os.listdir(conf.trainDataPath) if '.jpg'", "save_weights_only=True) change_lr = LearningRateScheduler(LrPolicy(conf.lr).stepDecay) tbCallBack=TensorBoard(log_dir=conf.logPath+\"/\"+trainString+'/logs', histogram_freq=0, write_graph=True, write_images=True) model=models.modelCreator(conf.model,conf.inputShape,conf.classes,conf.pretrainedModel) model.compile(optimizer", "conf.optimizer, loss = conf.loss) data = [conf.trainDataPath+\"/\"+f for f in", "thr=int(len(data)*conf.validationSplit) trainData=data[thr:] valData=data[:thr] trainDataLoader=DataLoader(conf.batchSize,conf.inputShape,trainData,conf.guaMaxValue) validationDataLoader=DataLoader(conf.batchSize,conf.inputShape,valData,conf.guaMaxValue) print('Fitting model...') model.fit_generator(generator=trainDataLoader.generator(), validation_data=validationDataLoader.generator(), steps_per_epoch=len(trainData)//conf.batchSize,", "write_images=True) model=models.modelCreator(conf.model,conf.inputShape,conf.classes,conf.pretrainedModel) model.compile(optimizer = conf.optimizer, loss = conf.loss) data =", "model.compile(optimizer = conf.optimizer, loss = conf.loss) data = [conf.trainDataPath+\"/\"+f for", "= conf.optimizer, loss = conf.loss) data = [conf.trainDataPath+\"/\"+f for f", "random import numpy as np from scipy import misc import", "os.makedirs(conf.logPath+\"/\"+trainString) conf.save(conf.logPath+\"/\"+trainString+'/config.json') print('Compiling model...') model_checkpoint = ModelCheckpoint(conf.logPath+\"/\"+trainString+'/Checkpoint-{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', save_best_only=False, save_weights_only=True)", "= LearningRateScheduler(LrPolicy(conf.lr).stepDecay) tbCallBack=TensorBoard(log_dir=conf.logPath+\"/\"+trainString+'/logs', histogram_freq=0, write_graph=True, write_images=True) model=models.modelCreator(conf.model,conf.inputShape,conf.classes,conf.pretrainedModel) model.compile(optimizer = conf.optimizer,", "steps_per_epoch=len(trainData)//conf.batchSize, validation_steps=len(valData)//conf.batchSize, epochs=conf.epoches, verbose=1, initial_epoch=0, callbacks = [model_checkpoint, change_lr,tbCallBack] )", "import datetime import os import json import models from utils", "'-c', required=True) return parser def train(args=None): parser = get_parser() args", "LearningRateScheduler(LrPolicy(conf.lr).stepDecay) tbCallBack=TensorBoard(log_dir=conf.logPath+\"/\"+trainString+'/logs', histogram_freq=0, write_graph=True, write_images=True) model=models.modelCreator(conf.model,conf.inputShape,conf.classes,conf.pretrainedModel) model.compile(optimizer = conf.optimizer, loss", "loss = conf.loss) data = [conf.trainDataPath+\"/\"+f for f in os.listdir(conf.trainDataPath)", "= argparse.ArgumentParser('train') parser.add_argument('--configPath', '-c', required=True) return parser def train(args=None): parser", "import Adam from imageio import imread from datetime import datetime", "model...') model.fit_generator(generator=trainDataLoader.generator(), validation_data=validationDataLoader.generator(), steps_per_epoch=len(trainData)//conf.batchSize, validation_steps=len(valData)//conf.batchSize, epochs=conf.epoches, verbose=1, initial_epoch=0, callbacks =", "as np from scipy import misc import gc from keras.optimizers", "import gc from keras.optimizers import Adam from imageio import imread", "scipy import misc import gc from keras.optimizers import Adam from", "Adam from imageio import imread from datetime import datetime import", "import json import models from utils import DataLoader, LrPolicy from", "DataLoader, LrPolicy from config import Config import argparse def get_parser():", "def train(args=None): parser = get_parser() args = parser.parse_args(args) conf=Config() conf.load(args.configPath)", "validation_data=validationDataLoader.generator(), steps_per_epoch=len(trainData)//conf.batchSize, validation_steps=len(valData)//conf.batchSize, epochs=conf.epoches, verbose=1, initial_epoch=0, callbacks = [model_checkpoint, change_lr,tbCallBack]", "LrPolicy from config import Config import argparse def get_parser(): parser", "= [conf.trainDataPath+\"/\"+f for f in os.listdir(conf.trainDataPath) if '.jpg' in f]", "imread from datetime import datetime import os import json import", "initial_epoch=0, callbacks = [model_checkpoint, change_lr,tbCallBack] ) if __name__ == \"__main__\":", "callbacks = [model_checkpoint, change_lr,tbCallBack] ) if __name__ == \"__main__\": train()", "get_parser(): parser = argparse.ArgumentParser('train') parser.add_argument('--configPath', '-c', required=True) return parser def", "validation_steps=len(valData)//conf.batchSize, epochs=conf.epoches, verbose=1, initial_epoch=0, callbacks = [model_checkpoint, change_lr,tbCallBack] ) if", "parser def train(args=None): parser = get_parser() args = parser.parse_args(args) conf=Config()", "def get_parser(): parser = argparse.ArgumentParser('train') parser.add_argument('--configPath', '-c', required=True) return parser", "from keras.callbacks import ModelCheckpoint,Callback,LearningRateScheduler,TensorBoard from keras.models import load_model import random", "utils import DataLoader, LrPolicy from config import Config import argparse", "(conf.model,conf.optimizer,str(conf.lr),time) os.makedirs(conf.logPath+\"/\"+trainString) conf.save(conf.logPath+\"/\"+trainString+'/config.json') print('Compiling model...') model_checkpoint = ModelCheckpoint(conf.logPath+\"/\"+trainString+'/Checkpoint-{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', save_best_only=False,", "datetime import os import json import models from utils import", "conf.load(args.configPath) time=datetime.now().strftime('%Y-%m-%d_%H-%M-%S') trainString=\"%s_%s_%s_%s\" % (conf.model,conf.optimizer,str(conf.lr),time) os.makedirs(conf.logPath+\"/\"+trainString) conf.save(conf.logPath+\"/\"+trainString+'/config.json') print('Compiling model...') model_checkpoint", "trainDataLoader=DataLoader(conf.batchSize,conf.inputShape,trainData,conf.guaMaxValue) validationDataLoader=DataLoader(conf.batchSize,conf.inputShape,valData,conf.guaMaxValue) print('Fitting model...') model.fit_generator(generator=trainDataLoader.generator(), validation_data=validationDataLoader.generator(), steps_per_epoch=len(trainData)//conf.batchSize, validation_steps=len(valData)//conf.batchSize, epochs=conf.epoches, verbose=1,", "print('Fitting model...') model.fit_generator(generator=trainDataLoader.generator(), validation_data=validationDataLoader.generator(), steps_per_epoch=len(trainData)//conf.batchSize, validation_steps=len(valData)//conf.batchSize, epochs=conf.epoches, verbose=1, initial_epoch=0, callbacks", "random.shuffle(data) thr=int(len(data)*conf.validationSplit) trainData=data[thr:] valData=data[:thr] trainDataLoader=DataLoader(conf.batchSize,conf.inputShape,trainData,conf.guaMaxValue) validationDataLoader=DataLoader(conf.batchSize,conf.inputShape,valData,conf.guaMaxValue) print('Fitting model...') model.fit_generator(generator=trainDataLoader.generator(), validation_data=validationDataLoader.generator(),", "histogram_freq=0, write_graph=True, write_images=True) model=models.modelCreator(conf.model,conf.inputShape,conf.classes,conf.pretrainedModel) model.compile(optimizer = conf.optimizer, loss = conf.loss)", "from keras.models import load_model import random import numpy as np", "% (conf.model,conf.optimizer,str(conf.lr),time) os.makedirs(conf.logPath+\"/\"+trainString) conf.save(conf.logPath+\"/\"+trainString+'/config.json') print('Compiling model...') model_checkpoint = ModelCheckpoint(conf.logPath+\"/\"+trainString+'/Checkpoint-{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss',", "import argparse def get_parser(): parser = argparse.ArgumentParser('train') parser.add_argument('--configPath', '-c', required=True)", "import imread from datetime import datetime import os import json", "tbCallBack=TensorBoard(log_dir=conf.logPath+\"/\"+trainString+'/logs', histogram_freq=0, write_graph=True, write_images=True) model=models.modelCreator(conf.model,conf.inputShape,conf.classes,conf.pretrainedModel) model.compile(optimizer = conf.optimizer, loss =", "required=True) return parser def train(args=None): parser = get_parser() args =", "f in os.listdir(conf.trainDataPath) if '.jpg' in f] random.shuffle(data) thr=int(len(data)*conf.validationSplit) trainData=data[thr:]", "import misc import gc from keras.optimizers import Adam from imageio", "= conf.loss) data = [conf.trainDataPath+\"/\"+f for f in os.listdir(conf.trainDataPath) if", "return parser def train(args=None): parser = get_parser() args = parser.parse_args(args)", "valData=data[:thr] trainDataLoader=DataLoader(conf.batchSize,conf.inputShape,trainData,conf.guaMaxValue) validationDataLoader=DataLoader(conf.batchSize,conf.inputShape,valData,conf.guaMaxValue) print('Fitting model...') model.fit_generator(generator=trainDataLoader.generator(), validation_data=validationDataLoader.generator(), steps_per_epoch=len(trainData)//conf.batchSize, validation_steps=len(valData)//conf.batchSize, epochs=conf.epoches,", "keras.models import load_model import random import numpy as np from", "import numpy as np from scipy import misc import gc", "import load_model import random import numpy as np from scipy", "for f in os.listdir(conf.trainDataPath) if '.jpg' in f] random.shuffle(data) thr=int(len(data)*conf.validationSplit)", "os import json import models from utils import DataLoader, LrPolicy", "import Config import argparse def get_parser(): parser = argparse.ArgumentParser('train') parser.add_argument('--configPath',", "np from scipy import misc import gc from keras.optimizers import", "= ModelCheckpoint(conf.logPath+\"/\"+trainString+'/Checkpoint-{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', save_best_only=False, save_weights_only=True) change_lr = LearningRateScheduler(LrPolicy(conf.lr).stepDecay) tbCallBack=TensorBoard(log_dir=conf.logPath+\"/\"+trainString+'/logs', histogram_freq=0,", "verbose=1, initial_epoch=0, callbacks = [model_checkpoint, change_lr,tbCallBack] ) if __name__ ==", "model.fit_generator(generator=trainDataLoader.generator(), validation_data=validationDataLoader.generator(), steps_per_epoch=len(trainData)//conf.batchSize, validation_steps=len(valData)//conf.batchSize, epochs=conf.epoches, verbose=1, initial_epoch=0, callbacks = [model_checkpoint,", "in os.listdir(conf.trainDataPath) if '.jpg' in f] random.shuffle(data) thr=int(len(data)*conf.validationSplit) trainData=data[thr:] valData=data[:thr]", "get_parser() args = parser.parse_args(args) conf=Config() conf.load(args.configPath) time=datetime.now().strftime('%Y-%m-%d_%H-%M-%S') trainString=\"%s_%s_%s_%s\" % (conf.model,conf.optimizer,str(conf.lr),time)", "= parser.parse_args(args) conf=Config() conf.load(args.configPath) time=datetime.now().strftime('%Y-%m-%d_%H-%M-%S') trainString=\"%s_%s_%s_%s\" % (conf.model,conf.optimizer,str(conf.lr),time) os.makedirs(conf.logPath+\"/\"+trainString) conf.save(conf.logPath+\"/\"+trainString+'/config.json')", "misc import gc from keras.optimizers import Adam from imageio import", "print('Compiling model...') model_checkpoint = ModelCheckpoint(conf.logPath+\"/\"+trainString+'/Checkpoint-{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', save_best_only=False, save_weights_only=True) change_lr =", "write_graph=True, write_images=True) model=models.modelCreator(conf.model,conf.inputShape,conf.classes,conf.pretrainedModel) model.compile(optimizer = conf.optimizer, loss = conf.loss) data", "from imageio import imread from datetime import datetime import os", "trainString=\"%s_%s_%s_%s\" % (conf.model,conf.optimizer,str(conf.lr),time) os.makedirs(conf.logPath+\"/\"+trainString) conf.save(conf.logPath+\"/\"+trainString+'/config.json') print('Compiling model...') model_checkpoint = ModelCheckpoint(conf.logPath+\"/\"+trainString+'/Checkpoint-{epoch:02d}-{val_loss:.2f}.hdf5',", "datetime import datetime import os import json import models from", "f] random.shuffle(data) thr=int(len(data)*conf.validationSplit) trainData=data[thr:] valData=data[:thr] trainDataLoader=DataLoader(conf.batchSize,conf.inputShape,trainData,conf.guaMaxValue) validationDataLoader=DataLoader(conf.batchSize,conf.inputShape,valData,conf.guaMaxValue) print('Fitting model...') model.fit_generator(generator=trainDataLoader.generator(),", "model=models.modelCreator(conf.model,conf.inputShape,conf.classes,conf.pretrainedModel) model.compile(optimizer = conf.optimizer, loss = conf.loss) data = [conf.trainDataPath+\"/\"+f", "from utils import DataLoader, LrPolicy from config import Config import", "if '.jpg' in f] random.shuffle(data) thr=int(len(data)*conf.validationSplit) trainData=data[thr:] valData=data[:thr] trainDataLoader=DataLoader(conf.batchSize,conf.inputShape,trainData,conf.guaMaxValue) validationDataLoader=DataLoader(conf.batchSize,conf.inputShape,valData,conf.guaMaxValue)", "argparse.ArgumentParser('train') parser.add_argument('--configPath', '-c', required=True) return parser def train(args=None): parser =", "model...') model_checkpoint = ModelCheckpoint(conf.logPath+\"/\"+trainString+'/Checkpoint-{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', save_best_only=False, save_weights_only=True) change_lr = LearningRateScheduler(LrPolicy(conf.lr).stepDecay)", "epochs=conf.epoches, verbose=1, initial_epoch=0, callbacks = [model_checkpoint, change_lr,tbCallBack] ) if __name__", "os.listdir(conf.trainDataPath) if '.jpg' in f] random.shuffle(data) thr=int(len(data)*conf.validationSplit) trainData=data[thr:] valData=data[:thr] trainDataLoader=DataLoader(conf.batchSize,conf.inputShape,trainData,conf.guaMaxValue)", "time=datetime.now().strftime('%Y-%m-%d_%H-%M-%S') trainString=\"%s_%s_%s_%s\" % (conf.model,conf.optimizer,str(conf.lr),time) os.makedirs(conf.logPath+\"/\"+trainString) conf.save(conf.logPath+\"/\"+trainString+'/config.json') print('Compiling model...') model_checkpoint =", "args = parser.parse_args(args) conf=Config() conf.load(args.configPath) time=datetime.now().strftime('%Y-%m-%d_%H-%M-%S') trainString=\"%s_%s_%s_%s\" % (conf.model,conf.optimizer,str(conf.lr),time) os.makedirs(conf.logPath+\"/\"+trainString)", "parser = argparse.ArgumentParser('train') parser.add_argument('--configPath', '-c', required=True) return parser def train(args=None):", "config import Config import argparse def get_parser(): parser = argparse.ArgumentParser('train')", "parser = get_parser() args = parser.parse_args(args) conf=Config() conf.load(args.configPath) time=datetime.now().strftime('%Y-%m-%d_%H-%M-%S') trainString=\"%s_%s_%s_%s\"", "load_model import random import numpy as np from scipy import", "keras.optimizers import Adam from imageio import imread from datetime import", "json import models from utils import DataLoader, LrPolicy from config", "import random import numpy as np from scipy import misc", "models from utils import DataLoader, LrPolicy from config import Config", "numpy as np from scipy import misc import gc from", "conf.save(conf.logPath+\"/\"+trainString+'/config.json') print('Compiling model...') model_checkpoint = ModelCheckpoint(conf.logPath+\"/\"+trainString+'/Checkpoint-{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', save_best_only=False, save_weights_only=True) change_lr", "import os import json import models from utils import DataLoader,", "save_best_only=False, save_weights_only=True) change_lr = LearningRateScheduler(LrPolicy(conf.lr).stepDecay) tbCallBack=TensorBoard(log_dir=conf.logPath+\"/\"+trainString+'/logs', histogram_freq=0, write_graph=True, write_images=True) model=models.modelCreator(conf.model,conf.inputShape,conf.classes,conf.pretrainedModel)", "Config import argparse def get_parser(): parser = argparse.ArgumentParser('train') parser.add_argument('--configPath', '-c',", "keras.callbacks import ModelCheckpoint,Callback,LearningRateScheduler,TensorBoard from keras.models import load_model import random import", "ModelCheckpoint(conf.logPath+\"/\"+trainString+'/Checkpoint-{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', save_best_only=False, save_weights_only=True) change_lr = LearningRateScheduler(LrPolicy(conf.lr).stepDecay) tbCallBack=TensorBoard(log_dir=conf.logPath+\"/\"+trainString+'/logs', histogram_freq=0, write_graph=True,", "parser.add_argument('--configPath', '-c', required=True) return parser def train(args=None): parser = get_parser()", "imageio import imread from datetime import datetime import os import" ]
[ "self.check_forward(self.x) @attr.gpu def test_forward_gpu(self): self.check_forward(cuda.to_gpu(self.x)) def check_backward(self, x_data, g_data): gradient_check.check_backward(", "import chainer from chainer import cuda from chainer import functions", "(numpy.prod((1,) + self.shape),) self.g = numpy.random.uniform(-1, 1, self.g_shape).astype(self.dtype) def check_forward(self,", "'dtype': [numpy.float16, numpy.float32, numpy.float64], })) class TestFlatten(unittest.TestCase): dtype = numpy.float32", "[(3, 4), ()], 'dtype': [numpy.float16, numpy.float32, numpy.float64], })) class TestFlatten(unittest.TestCase):", "})) class TestFlatten(unittest.TestCase): dtype = numpy.float32 def setUp(self): self.x =", "@testing.parameterize(*testing.product({ 'shape': [(3, 4), ()], 'dtype': [numpy.float16, numpy.float32, numpy.float64], }))", "check_forward(self, x_data): x = chainer.Variable(x_data) y = functions.flatten(x) self.assertEqual(y.shape, self.g_shape)", "check_backward(self, x_data, g_data): gradient_check.check_backward( functions.Flatten(), x_data, g_data, dtype=numpy.float64) def test_backward_cpu(self):", "= (numpy.prod((1,) + self.shape),) self.g = numpy.random.uniform(-1, 1, self.g_shape).astype(self.dtype) def", "numpy.random.uniform(-1, 1, self.g_shape).astype(self.dtype) def check_forward(self, x_data): x = chainer.Variable(x_data) y", "y = functions.flatten(x) self.assertEqual(y.shape, self.g_shape) self.assertEqual(y.dtype, self.dtype) testing.assert_allclose(self.x.flatten(), y.data) def", "1, self.shape).astype(self.dtype) self.g_shape = (numpy.prod((1,) + self.shape),) self.g = numpy.random.uniform(-1,", "chainer.Variable(x_data) y = functions.flatten(x) self.assertEqual(y.shape, self.g_shape) self.assertEqual(y.dtype, self.dtype) testing.assert_allclose(self.x.flatten(), y.data)", "self.assertEqual(y.shape, self.g_shape) self.assertEqual(y.dtype, self.dtype) testing.assert_allclose(self.x.flatten(), y.data) def test_forward_cpu(self): self.check_forward(self.x) @attr.gpu", "test_forward_gpu(self): self.check_forward(cuda.to_gpu(self.x)) def check_backward(self, x_data, g_data): gradient_check.check_backward( functions.Flatten(), x_data, g_data,", "self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype) self.g_shape = (numpy.prod((1,) + self.shape),)", "class TestFlatten(unittest.TestCase): dtype = numpy.float32 def setUp(self): self.x = numpy.random.uniform(-1,", "y.data) def test_forward_cpu(self): self.check_forward(self.x) @attr.gpu def test_forward_gpu(self): self.check_forward(cuda.to_gpu(self.x)) def check_backward(self,", "import functions from chainer import gradient_check from chainer import testing", "from chainer import gradient_check from chainer import testing from chainer.testing", "x_data, g_data, dtype=numpy.float64) def test_backward_cpu(self): self.check_backward(self.x, self.g) @attr.gpu def test_backward_gpu(self):", "()], 'dtype': [numpy.float16, numpy.float32, numpy.float64], })) class TestFlatten(unittest.TestCase): dtype =", "cuda from chainer import functions from chainer import gradient_check from", "from chainer import testing from chainer.testing import attr @testing.parameterize(*testing.product({ 'shape':", "g_data, dtype=numpy.float64) def test_backward_cpu(self): self.check_backward(self.x, self.g) @attr.gpu def test_backward_gpu(self): self.check_backward(cuda.to_gpu(self.x),", "x_data, g_data): gradient_check.check_backward( functions.Flatten(), x_data, g_data, dtype=numpy.float64) def test_backward_cpu(self): self.check_backward(self.x,", "gradient_check from chainer import testing from chainer.testing import attr @testing.parameterize(*testing.product({", "numpy import chainer from chainer import cuda from chainer import", "def check_backward(self, x_data, g_data): gradient_check.check_backward( functions.Flatten(), x_data, g_data, dtype=numpy.float64) def", "functions from chainer import gradient_check from chainer import testing from", "from chainer import functions from chainer import gradient_check from chainer", "= numpy.random.uniform(-1, 1, self.g_shape).astype(self.dtype) def check_forward(self, x_data): x = chainer.Variable(x_data)", "import testing from chainer.testing import attr @testing.parameterize(*testing.product({ 'shape': [(3, 4),", "4), ()], 'dtype': [numpy.float16, numpy.float32, numpy.float64], })) class TestFlatten(unittest.TestCase): dtype", "import cuda from chainer import functions from chainer import gradient_check", "def check_forward(self, x_data): x = chainer.Variable(x_data) y = functions.flatten(x) self.assertEqual(y.shape,", "self.dtype) testing.assert_allclose(self.x.flatten(), y.data) def test_forward_cpu(self): self.check_forward(self.x) @attr.gpu def test_forward_gpu(self): self.check_forward(cuda.to_gpu(self.x))", "import numpy import chainer from chainer import cuda from chainer", "[numpy.float16, numpy.float32, numpy.float64], })) class TestFlatten(unittest.TestCase): dtype = numpy.float32 def", "self.assertEqual(y.dtype, self.dtype) testing.assert_allclose(self.x.flatten(), y.data) def test_forward_cpu(self): self.check_forward(self.x) @attr.gpu def test_forward_gpu(self):", "test_backward_cpu(self): self.check_backward(self.x, self.g) @attr.gpu def test_backward_gpu(self): self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.g)) testing.run_module(__name__, __file__)", "dtype = numpy.float32 def setUp(self): self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)", "testing from chainer.testing import attr @testing.parameterize(*testing.product({ 'shape': [(3, 4), ()],", "from chainer.testing import attr @testing.parameterize(*testing.product({ 'shape': [(3, 4), ()], 'dtype':", "numpy.float32, numpy.float64], })) class TestFlatten(unittest.TestCase): dtype = numpy.float32 def setUp(self):", "self.check_forward(cuda.to_gpu(self.x)) def check_backward(self, x_data, g_data): gradient_check.check_backward( functions.Flatten(), x_data, g_data, dtype=numpy.float64)", "functions.flatten(x) self.assertEqual(y.shape, self.g_shape) self.assertEqual(y.dtype, self.dtype) testing.assert_allclose(self.x.flatten(), y.data) def test_forward_cpu(self): self.check_forward(self.x)", "'shape': [(3, 4), ()], 'dtype': [numpy.float16, numpy.float32, numpy.float64], })) class", "x_data): x = chainer.Variable(x_data) y = functions.flatten(x) self.assertEqual(y.shape, self.g_shape) self.assertEqual(y.dtype,", "numpy.float64], })) class TestFlatten(unittest.TestCase): dtype = numpy.float32 def setUp(self): self.x", "test_forward_cpu(self): self.check_forward(self.x) @attr.gpu def test_forward_gpu(self): self.check_forward(cuda.to_gpu(self.x)) def check_backward(self, x_data, g_data):", "from chainer import cuda from chainer import functions from chainer", "1, self.g_shape).astype(self.dtype) def check_forward(self, x_data): x = chainer.Variable(x_data) y =", "chainer from chainer import cuda from chainer import functions from", "functions.Flatten(), x_data, g_data, dtype=numpy.float64) def test_backward_cpu(self): self.check_backward(self.x, self.g) @attr.gpu def", "self.g_shape) self.assertEqual(y.dtype, self.dtype) testing.assert_allclose(self.x.flatten(), y.data) def test_forward_cpu(self): self.check_forward(self.x) @attr.gpu def", "def test_forward_gpu(self): self.check_forward(cuda.to_gpu(self.x)) def check_backward(self, x_data, g_data): gradient_check.check_backward( functions.Flatten(), x_data,", "chainer import functions from chainer import gradient_check from chainer import", "numpy.random.uniform(-1, 1, self.shape).astype(self.dtype) self.g_shape = (numpy.prod((1,) + self.shape),) self.g =", "import attr @testing.parameterize(*testing.product({ 'shape': [(3, 4), ()], 'dtype': [numpy.float16, numpy.float32,", "numpy.float32 def setUp(self): self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype) self.g_shape =", "attr @testing.parameterize(*testing.product({ 'shape': [(3, 4), ()], 'dtype': [numpy.float16, numpy.float32, numpy.float64],", "+ self.shape),) self.g = numpy.random.uniform(-1, 1, self.g_shape).astype(self.dtype) def check_forward(self, x_data):", "def test_backward_cpu(self): self.check_backward(self.x, self.g) @attr.gpu def test_backward_gpu(self): self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.g)) testing.run_module(__name__,", "= functions.flatten(x) self.assertEqual(y.shape, self.g_shape) self.assertEqual(y.dtype, self.dtype) testing.assert_allclose(self.x.flatten(), y.data) def test_forward_cpu(self):", "g_data): gradient_check.check_backward( functions.Flatten(), x_data, g_data, dtype=numpy.float64) def test_backward_cpu(self): self.check_backward(self.x, self.g)", "self.shape),) self.g = numpy.random.uniform(-1, 1, self.g_shape).astype(self.dtype) def check_forward(self, x_data): x", "TestFlatten(unittest.TestCase): dtype = numpy.float32 def setUp(self): self.x = numpy.random.uniform(-1, 1,", "x = chainer.Variable(x_data) y = functions.flatten(x) self.assertEqual(y.shape, self.g_shape) self.assertEqual(y.dtype, self.dtype)", "import gradient_check from chainer import testing from chainer.testing import attr", "chainer import gradient_check from chainer import testing from chainer.testing import", "import unittest import numpy import chainer from chainer import cuda", "chainer import cuda from chainer import functions from chainer import", "dtype=numpy.float64) def test_backward_cpu(self): self.check_backward(self.x, self.g) @attr.gpu def test_backward_gpu(self): self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.g))", "@attr.gpu def test_forward_gpu(self): self.check_forward(cuda.to_gpu(self.x)) def check_backward(self, x_data, g_data): gradient_check.check_backward( functions.Flatten(),", "self.g_shape = (numpy.prod((1,) + self.shape),) self.g = numpy.random.uniform(-1, 1, self.g_shape).astype(self.dtype)", "setUp(self): self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype) self.g_shape = (numpy.prod((1,) +", "= numpy.float32 def setUp(self): self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype) self.g_shape", "def test_forward_cpu(self): self.check_forward(self.x) @attr.gpu def test_forward_gpu(self): self.check_forward(cuda.to_gpu(self.x)) def check_backward(self, x_data,", "unittest import numpy import chainer from chainer import cuda from", "= numpy.random.uniform(-1, 1, self.shape).astype(self.dtype) self.g_shape = (numpy.prod((1,) + self.shape),) self.g", "chainer import testing from chainer.testing import attr @testing.parameterize(*testing.product({ 'shape': [(3,", "self.g = numpy.random.uniform(-1, 1, self.g_shape).astype(self.dtype) def check_forward(self, x_data): x =", "self.shape).astype(self.dtype) self.g_shape = (numpy.prod((1,) + self.shape),) self.g = numpy.random.uniform(-1, 1,", "gradient_check.check_backward( functions.Flatten(), x_data, g_data, dtype=numpy.float64) def test_backward_cpu(self): self.check_backward(self.x, self.g) @attr.gpu", "= chainer.Variable(x_data) y = functions.flatten(x) self.assertEqual(y.shape, self.g_shape) self.assertEqual(y.dtype, self.dtype) testing.assert_allclose(self.x.flatten(),", "self.g_shape).astype(self.dtype) def check_forward(self, x_data): x = chainer.Variable(x_data) y = functions.flatten(x)", "testing.assert_allclose(self.x.flatten(), y.data) def test_forward_cpu(self): self.check_forward(self.x) @attr.gpu def test_forward_gpu(self): self.check_forward(cuda.to_gpu(self.x)) def", "def setUp(self): self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype) self.g_shape = (numpy.prod((1,)", "chainer.testing import attr @testing.parameterize(*testing.product({ 'shape': [(3, 4), ()], 'dtype': [numpy.float16," ]
[ "2020-03-24 09:59 from django.db import migrations, models import django.db.models.deletion class", "'0018_photo_file'), ] operations = [ migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True,", "serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('image_url', models.URLField()), ('image_caption', models.CharField(blank=True,", "by Django 3.0.3 on 2020-03-24 09:59 from django.db import migrations,", "[ ('exercises', '0018_photo_file'), ] operations = [ migrations.CreateModel( name='Category', fields=[", "[ migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created',", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)),", "('exercises', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='categories', to='exercises.Exercise')), ], options={ 'abstract': False, },", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial", "import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [", "name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('updated',", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True", "max_length=80)), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='photos', to='categories.Category')), ], options={ 'abstract': False, },", "primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('name', models.CharField(max_length=80)), ('description',", "('image_url', models.URLField()), ('image_caption', models.CharField(blank=True, max_length=80)), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='photos', to='categories.Category')), ],", "class Migration(migrations.Migration): initial = True dependencies = [ ('exercises', '0018_photo_file'),", "options={ 'abstract': False, }, ), migrations.CreateModel( name='Photo', fields=[ ('id', models.AutoField(auto_created=True,", "serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('name', models.CharField(max_length=80)), ('description', models.TextField(blank=True)),", "Generated by Django 3.0.3 on 2020-03-24 09:59 from django.db import", "# Generated by Django 3.0.3 on 2020-03-24 09:59 from django.db", "models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('name', models.CharField(max_length=80)), ('description', models.TextField(blank=True)), ('exercises', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE,", "verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('image_url', models.URLField()), ('image_caption', models.CharField(blank=True, max_length=80)),", "('updated', models.DateTimeField(auto_now=True)), ('image_url', models.URLField()), ('image_caption', models.CharField(blank=True, max_length=80)), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='photos',", "Migration(migrations.Migration): initial = True dependencies = [ ('exercises', '0018_photo_file'), ]", "), migrations.CreateModel( name='Photo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created',", "operations = [ migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "('updated', models.DateTimeField(auto_now=True)), ('name', models.CharField(max_length=80)), ('description', models.TextField(blank=True)), ('exercises', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='categories',", "3.0.3 on 2020-03-24 09:59 from django.db import migrations, models import", "models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='categories', to='exercises.Exercise')), ], options={ 'abstract': False, }, ),", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('name', models.CharField(max_length=80)),", "models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('image_url', models.URLField()), ('image_caption', models.CharField(blank=True, max_length=80)), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('image_url', models.URLField()), ('image_caption', models.CharField(blank=True, max_length=80)), ('category',", "('exercises', '0018_photo_file'), ] operations = [ migrations.CreateModel( name='Category', fields=[ ('id',", "name='Photo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('updated',", "verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('name', models.CharField(max_length=80)), ('description', models.TextField(blank=True)), ('exercises',", "], options={ 'abstract': False, }, ), migrations.CreateModel( name='Photo', fields=[ ('id',", "}, ), migrations.CreateModel( name='Photo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "models.DateTimeField(auto_now=True)), ('image_url', models.URLField()), ('image_caption', models.CharField(blank=True, max_length=80)), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='photos', to='categories.Category')),", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies", "09:59 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('exercises',", "to='exercises.Exercise')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Photo', fields=[", "('description', models.TextField(blank=True)), ('exercises', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='categories', to='exercises.Exercise')), ], options={ 'abstract':", "('image_caption', models.CharField(blank=True, max_length=80)), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='photos', to='categories.Category')), ], options={ 'abstract':", "models.DateTimeField(auto_now=True)), ('name', models.CharField(max_length=80)), ('description', models.TextField(blank=True)), ('exercises', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='categories', to='exercises.Exercise')),", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('image_url', models.URLField()),", "primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('image_url', models.URLField()), ('image_caption',", "] operations = [ migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "Django 3.0.3 on 2020-03-24 09:59 from django.db import migrations, models", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('name',", "migrations.CreateModel( name='Photo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)),", "True dependencies = [ ('exercises', '0018_photo_file'), ] operations = [", "models.TextField(blank=True)), ('exercises', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='categories', to='exercises.Exercise')), ], options={ 'abstract': False,", "= [ ('exercises', '0018_photo_file'), ] operations = [ migrations.CreateModel( name='Category',", "models.CharField(blank=True, max_length=80)), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='photos', to='categories.Category')), ], options={ 'abstract': False,", "related_name='categories', to='exercises.Exercise')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Photo',", "False, }, ), migrations.CreateModel( name='Photo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "= True dependencies = [ ('exercises', '0018_photo_file'), ] operations =", "models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies =", "= [ migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "models.URLField()), ('image_caption', models.CharField(blank=True, max_length=80)), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='photos', to='categories.Category')), ], options={", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial =", "on 2020-03-24 09:59 from django.db import migrations, models import django.db.models.deletion", "('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='photos', to='categories.Category')), ], options={ 'abstract': False, }, ),", "models.CharField(max_length=80)), ('description', models.TextField(blank=True)), ('exercises', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='categories', to='exercises.Exercise')), ], options={", "('name', models.CharField(max_length=80)), ('description', models.TextField(blank=True)), ('exercises', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='categories', to='exercises.Exercise')), ],", "dependencies = [ ('exercises', '0018_photo_file'), ] operations = [ migrations.CreateModel(", "initial = True dependencies = [ ('exercises', '0018_photo_file'), ] operations", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('image_url',", "on_delete=django.db.models.deletion.CASCADE, related_name='categories', to='exercises.Exercise')), ], options={ 'abstract': False, }, ), migrations.CreateModel(", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='photos', to='categories.Category')), ], options={ 'abstract': False, }, ), ]", "migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)),", "('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('name', models.CharField(max_length=80)), ('description', models.TextField(blank=True)), ('exercises', models.ForeignKey(blank=True,", "'abstract': False, }, ), migrations.CreateModel( name='Photo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True," ]
[ "self._name = name or type(env.task).__name__ self._env = env self._viewer =", "def step(self, action): time_step = self._env.step(action) return Step( flatten_observation(time_step.observation)['observations'], time_step.reward,", "name=None): self._name = name or type(env.task).__name__ self._env = env self._viewer", "name or type(env.task).__name__ self._env = env self._viewer = None @classmethod", "<https://arxiv.org/pdf/1801.00690.pdf>`_ \"\"\" def __init__(self, env, name=None): self._name = name or", "self._viewer.render() return None elif mode == 'rgb_array': return self._env.physics.render() else:", "self._viewer = None self._env = None def _flat_shape(self, observation): return", "suite from dm_control.rl.control import flatten_observation from dm_env import StepType import", "import DmControlViewer class DmControlEnv(gym.Env): \"\"\" Binding for `dm_control <https://arxiv.org/pdf/1801.00690.pdf>`_ \"\"\"", "np from metarl.envs import Step from metarl.envs.dm_control.dm_control_viewer import DmControlViewer class", "= 'dm_control {}'.format(self._name) self._viewer = DmControlViewer(title=title) self._viewer.launch(self._env) self._viewer.render() return None", "Binding for `dm_control <https://arxiv.org/pdf/1801.00690.pdf>`_ \"\"\" def __init__(self, env, name=None): self._name", "= self._env.action_spec() if (len(action_spec.shape) == 1) and (-np.inf in action_spec.minimum", "mode='human'): # pylint: disable=inconsistent-return-statements if mode == 'human': if not", "self._env = None def _flat_shape(self, observation): return np.sum(int(np.prod(v.shape)) for k,", "env self._viewer = None @classmethod def from_suite(cls, domain_name, task_name): return", "action_spec = self._env.action_spec() if (len(action_spec.shape) == 1) and (-np.inf in", "@property def action_space(self): action_spec = self._env.action_spec() if (len(action_spec.shape) == 1)", "observation_space(self): flat_dim = self._flat_shape(self._env.observation_spec()) return gym.spaces.Box(low=-np.inf, high=np.inf, shape=[flat_dim], dtype=np.float32) def", "= None @classmethod def from_suite(cls, domain_name, task_name): return cls(suite.load(domain_name, task_name),", "task_name)) def step(self, action): time_step = self._env.step(action) return Step( flatten_observation(time_step.observation)['observations'],", "def observation_space(self): flat_dim = self._flat_shape(self._env.observation_spec()) return gym.spaces.Box(low=-np.inf, high=np.inf, shape=[flat_dim], dtype=np.float32)", "from metarl.envs.dm_control.dm_control_viewer import DmControlViewer class DmControlEnv(gym.Env): \"\"\" Binding for `dm_control", "else: raise NotImplementedError def close(self): if self._viewer: self._viewer.close() self._env.close() self._viewer", "'human': if not self._viewer: title = 'dm_control {}'.format(self._name) self._viewer =", "StepType import gym import numpy as np from metarl.envs import", "from metarl.envs import Step from metarl.envs.dm_control.dm_control_viewer import DmControlViewer class DmControlEnv(gym.Env):", "== 'rgb_array': return self._env.physics.render() else: raise NotImplementedError def close(self): if", "= None self._env = None def _flat_shape(self, observation): return np.sum(int(np.prod(v.shape))", "in observation.items()) @property def action_space(self): action_spec = self._env.action_spec() if (len(action_spec.shape)", "@property def observation_space(self): flat_dim = self._flat_shape(self._env.observation_spec()) return gym.spaces.Box(low=-np.inf, high=np.inf, shape=[flat_dim],", "self._env.step(action) return Step( flatten_observation(time_step.observation)['observations'], time_step.reward, time_step.step_type == StepType.LAST, **time_step.observation) def", "self._env.close() self._viewer = None self._env = None def _flat_shape(self, observation):", "(-np.inf in action_spec.minimum or np.inf in action_spec.maximum): return gym.spaces.Discrete(np.prod(action_spec.shape)) else:", "reset(self): time_step = self._env.reset() return flatten_observation(time_step.observation)['observations'] def render(self, mode='human'): #", "flatten_observation from dm_env import StepType import gym import numpy as", "dm_env import StepType import gym import numpy as np from", "return None elif mode == 'rgb_array': return self._env.physics.render() else: raise", "time_step = self._env.reset() return flatten_observation(time_step.observation)['observations'] def render(self, mode='human'): # pylint:", "metarl.envs import Step from metarl.envs.dm_control.dm_control_viewer import DmControlViewer class DmControlEnv(gym.Env): \"\"\"", "# pylint: disable=inconsistent-return-statements if mode == 'human': if not self._viewer:", "or type(env.task).__name__ self._env = env self._viewer = None @classmethod def", "DmControlViewer(title=title) self._viewer.launch(self._env) self._viewer.render() return None elif mode == 'rgb_array': return", "action_space(self): action_spec = self._env.action_spec() if (len(action_spec.shape) == 1) and (-np.inf", "mode == 'rgb_array': return self._env.physics.render() else: raise NotImplementedError def close(self):", "close(self): if self._viewer: self._viewer.close() self._env.close() self._viewer = None self._env =", "observation.items()) @property def action_space(self): action_spec = self._env.action_spec() if (len(action_spec.shape) ==", "or np.inf in action_spec.maximum): return gym.spaces.Discrete(np.prod(action_spec.shape)) else: return gym.spaces.Box(action_spec.minimum, action_spec.maximum,", "mode == 'human': if not self._viewer: title = 'dm_control {}'.format(self._name)", "`dm_control <https://arxiv.org/pdf/1801.00690.pdf>`_ \"\"\" def __init__(self, env, name=None): self._name = name", "pylint: disable=inconsistent-return-statements if mode == 'human': if not self._viewer: title", "in action_spec.minimum or np.inf in action_spec.maximum): return gym.spaces.Discrete(np.prod(action_spec.shape)) else: return", "cls(suite.load(domain_name, task_name), name='{}.{}'.format(domain_name, task_name)) def step(self, action): time_step = self._env.step(action)", "disable=inconsistent-return-statements if mode == 'human': if not self._viewer: title =", "_flat_shape(self, observation): return np.sum(int(np.prod(v.shape)) for k, v in observation.items()) @property", "def __getstate__(self): d = self.__dict__.copy() d['_viewer'] = None return d", "None elif mode == 'rgb_array': return self._env.physics.render() else: raise NotImplementedError", "time_step.reward, time_step.step_type == StepType.LAST, **time_step.observation) def reset(self): time_step = self._env.reset()", "{}'.format(self._name) self._viewer = DmControlViewer(title=title) self._viewer.launch(self._env) self._viewer.render() return None elif mode", "from dm_env import StepType import gym import numpy as np", "numpy as np from metarl.envs import Step from metarl.envs.dm_control.dm_control_viewer import", "DmControlViewer class DmControlEnv(gym.Env): \"\"\" Binding for `dm_control <https://arxiv.org/pdf/1801.00690.pdf>`_ \"\"\" def", "return gym.spaces.Box(action_spec.minimum, action_spec.maximum, dtype=np.float32) @property def observation_space(self): flat_dim = self._flat_shape(self._env.observation_spec())", "self._viewer: self._viewer.close() self._env.close() self._viewer = None self._env = None def", "self._env.action_spec() if (len(action_spec.shape) == 1) and (-np.inf in action_spec.minimum or", "else: return gym.spaces.Box(action_spec.minimum, action_spec.maximum, dtype=np.float32) @property def observation_space(self): flat_dim =", "elif mode == 'rgb_array': return self._env.physics.render() else: raise NotImplementedError def", "action_spec.maximum, dtype=np.float32) @property def observation_space(self): flat_dim = self._flat_shape(self._env.observation_spec()) return gym.spaces.Box(low=-np.inf,", "**time_step.observation) def reset(self): time_step = self._env.reset() return flatten_observation(time_step.observation)['observations'] def render(self,", "import gym import numpy as np from metarl.envs import Step", "step(self, action): time_step = self._env.step(action) return Step( flatten_observation(time_step.observation)['observations'], time_step.reward, time_step.step_type", "from_suite(cls, domain_name, task_name): return cls(suite.load(domain_name, task_name), name='{}.{}'.format(domain_name, task_name)) def step(self,", "flatten_observation(time_step.observation)['observations'], time_step.reward, time_step.step_type == StepType.LAST, **time_step.observation) def reset(self): time_step =", "def _flat_shape(self, observation): return np.sum(int(np.prod(v.shape)) for k, v in observation.items())", "self._env = env self._viewer = None @classmethod def from_suite(cls, domain_name,", "self._flat_shape(self._env.observation_spec()) return gym.spaces.Box(low=-np.inf, high=np.inf, shape=[flat_dim], dtype=np.float32) def __getstate__(self): d =", "= self._env.step(action) return Step( flatten_observation(time_step.observation)['observations'], time_step.reward, time_step.step_type == StepType.LAST, **time_step.observation)", "NotImplementedError def close(self): if self._viewer: self._viewer.close() self._env.close() self._viewer = None", "dm_control.rl.control import flatten_observation from dm_env import StepType import gym import", "= self._env.reset() return flatten_observation(time_step.observation)['observations'] def render(self, mode='human'): # pylint: disable=inconsistent-return-statements", "return Step( flatten_observation(time_step.observation)['observations'], time_step.reward, time_step.step_type == StepType.LAST, **time_step.observation) def reset(self):", "def from_suite(cls, domain_name, task_name): return cls(suite.load(domain_name, task_name), name='{}.{}'.format(domain_name, task_name)) def", "(len(action_spec.shape) == 1) and (-np.inf in action_spec.minimum or np.inf in", "DmControlEnv(gym.Env): \"\"\" Binding for `dm_control <https://arxiv.org/pdf/1801.00690.pdf>`_ \"\"\" def __init__(self, env,", "StepType.LAST, **time_step.observation) def reset(self): time_step = self._env.reset() return flatten_observation(time_step.observation)['observations'] def", "return np.sum(int(np.prod(v.shape)) for k, v in observation.items()) @property def action_space(self):", "type(env.task).__name__ self._env = env self._viewer = None @classmethod def from_suite(cls,", "render(self, mode='human'): # pylint: disable=inconsistent-return-statements if mode == 'human': if", "name='{}.{}'.format(domain_name, task_name)) def step(self, action): time_step = self._env.step(action) return Step(", "def reset(self): time_step = self._env.reset() return flatten_observation(time_step.observation)['observations'] def render(self, mode='human'):", "= name or type(env.task).__name__ self._env = env self._viewer = None", "flatten_observation(time_step.observation)['observations'] def render(self, mode='human'): # pylint: disable=inconsistent-return-statements if mode ==", "__init__(self, env, name=None): self._name = name or type(env.task).__name__ self._env =", "def close(self): if self._viewer: self._viewer.close() self._env.close() self._viewer = None self._env", "import flatten_observation from dm_env import StepType import gym import numpy", "@classmethod def from_suite(cls, domain_name, task_name): return cls(suite.load(domain_name, task_name), name='{}.{}'.format(domain_name, task_name))", "task_name): return cls(suite.load(domain_name, task_name), name='{}.{}'.format(domain_name, task_name)) def step(self, action): time_step", "self._viewer: title = 'dm_control {}'.format(self._name) self._viewer = DmControlViewer(title=title) self._viewer.launch(self._env) self._viewer.render()", "raise NotImplementedError def close(self): if self._viewer: self._viewer.close() self._env.close() self._viewer =", "dtype=np.float32) @property def observation_space(self): flat_dim = self._flat_shape(self._env.observation_spec()) return gym.spaces.Box(low=-np.inf, high=np.inf,", "for k, v in observation.items()) @property def action_space(self): action_spec =", "def action_space(self): action_spec = self._env.action_spec() if (len(action_spec.shape) == 1) and", "self._viewer.launch(self._env) self._viewer.render() return None elif mode == 'rgb_array': return self._env.physics.render()", "1) and (-np.inf in action_spec.minimum or np.inf in action_spec.maximum): return", "self._viewer.close() self._env.close() self._viewer = None self._env = None def _flat_shape(self,", "np.inf in action_spec.maximum): return gym.spaces.Discrete(np.prod(action_spec.shape)) else: return gym.spaces.Box(action_spec.minimum, action_spec.maximum, dtype=np.float32)", "not self._viewer: title = 'dm_control {}'.format(self._name) self._viewer = DmControlViewer(title=title) self._viewer.launch(self._env)", "np.sum(int(np.prod(v.shape)) for k, v in observation.items()) @property def action_space(self): action_spec", "import Step from metarl.envs.dm_control.dm_control_viewer import DmControlViewer class DmControlEnv(gym.Env): \"\"\" Binding", "'rgb_array': return self._env.physics.render() else: raise NotImplementedError def close(self): if self._viewer:", "k, v in observation.items()) @property def action_space(self): action_spec = self._env.action_spec()", "time_step = self._env.step(action) return Step( flatten_observation(time_step.observation)['observations'], time_step.reward, time_step.step_type == StepType.LAST,", "observation): return np.sum(int(np.prod(v.shape)) for k, v in observation.items()) @property def", "dtype=np.float32) def __getstate__(self): d = self.__dict__.copy() d['_viewer'] = None return", "from dm_control import suite from dm_control.rl.control import flatten_observation from dm_env", "if mode == 'human': if not self._viewer: title = 'dm_control", "metarl.envs.dm_control.dm_control_viewer import DmControlViewer class DmControlEnv(gym.Env): \"\"\" Binding for `dm_control <https://arxiv.org/pdf/1801.00690.pdf>`_", "\"\"\" Binding for `dm_control <https://arxiv.org/pdf/1801.00690.pdf>`_ \"\"\" def __init__(self, env, name=None):", "self._viewer = None @classmethod def from_suite(cls, domain_name, task_name): return cls(suite.load(domain_name,", "Step( flatten_observation(time_step.observation)['observations'], time_step.reward, time_step.step_type == StepType.LAST, **time_step.observation) def reset(self): time_step", "time_step.step_type == StepType.LAST, **time_step.observation) def reset(self): time_step = self._env.reset() return", "<filename>src/metarl/envs/dm_control/dm_control_env.py<gh_stars>1-10 from dm_control import suite from dm_control.rl.control import flatten_observation from", "dm_control import suite from dm_control.rl.control import flatten_observation from dm_env import", "class DmControlEnv(gym.Env): \"\"\" Binding for `dm_control <https://arxiv.org/pdf/1801.00690.pdf>`_ \"\"\" def __init__(self,", "domain_name, task_name): return cls(suite.load(domain_name, task_name), name='{}.{}'.format(domain_name, task_name)) def step(self, action):", "task_name), name='{}.{}'.format(domain_name, task_name)) def step(self, action): time_step = self._env.step(action) return", "gym.spaces.Box(action_spec.minimum, action_spec.maximum, dtype=np.float32) @property def observation_space(self): flat_dim = self._flat_shape(self._env.observation_spec()) return", "if self._viewer: self._viewer.close() self._env.close() self._viewer = None self._env = None", "gym.spaces.Box(low=-np.inf, high=np.inf, shape=[flat_dim], dtype=np.float32) def __getstate__(self): d = self.__dict__.copy() d['_viewer']", "= env self._viewer = None @classmethod def from_suite(cls, domain_name, task_name):", "Step from metarl.envs.dm_control.dm_control_viewer import DmControlViewer class DmControlEnv(gym.Env): \"\"\" Binding for", "def render(self, mode='human'): # pylint: disable=inconsistent-return-statements if mode == 'human':", "for `dm_control <https://arxiv.org/pdf/1801.00690.pdf>`_ \"\"\" def __init__(self, env, name=None): self._name =", "return flatten_observation(time_step.observation)['observations'] def render(self, mode='human'): # pylint: disable=inconsistent-return-statements if mode", "self._viewer = DmControlViewer(title=title) self._viewer.launch(self._env) self._viewer.render() return None elif mode ==", "None def _flat_shape(self, observation): return np.sum(int(np.prod(v.shape)) for k, v in", "return cls(suite.load(domain_name, task_name), name='{}.{}'.format(domain_name, task_name)) def step(self, action): time_step =", "from dm_control.rl.control import flatten_observation from dm_env import StepType import gym", "action): time_step = self._env.step(action) return Step( flatten_observation(time_step.observation)['observations'], time_step.reward, time_step.step_type ==", "title = 'dm_control {}'.format(self._name) self._viewer = DmControlViewer(title=title) self._viewer.launch(self._env) self._viewer.render() return", "action_spec.minimum or np.inf in action_spec.maximum): return gym.spaces.Discrete(np.prod(action_spec.shape)) else: return gym.spaces.Box(action_spec.minimum,", "return gym.spaces.Box(low=-np.inf, high=np.inf, shape=[flat_dim], dtype=np.float32) def __getstate__(self): d = self.__dict__.copy()", "if (len(action_spec.shape) == 1) and (-np.inf in action_spec.minimum or np.inf", "\"\"\" def __init__(self, env, name=None): self._name = name or type(env.task).__name__", "gym import numpy as np from metarl.envs import Step from", "return gym.spaces.Discrete(np.prod(action_spec.shape)) else: return gym.spaces.Box(action_spec.minimum, action_spec.maximum, dtype=np.float32) @property def observation_space(self):", "shape=[flat_dim], dtype=np.float32) def __getstate__(self): d = self.__dict__.copy() d['_viewer'] = None", "flat_dim = self._flat_shape(self._env.observation_spec()) return gym.spaces.Box(low=-np.inf, high=np.inf, shape=[flat_dim], dtype=np.float32) def __getstate__(self):", "== StepType.LAST, **time_step.observation) def reset(self): time_step = self._env.reset() return flatten_observation(time_step.observation)['observations']", "import StepType import gym import numpy as np from metarl.envs", "return self._env.physics.render() else: raise NotImplementedError def close(self): if self._viewer: self._viewer.close()", "import suite from dm_control.rl.control import flatten_observation from dm_env import StepType", "env, name=None): self._name = name or type(env.task).__name__ self._env = env", "and (-np.inf in action_spec.minimum or np.inf in action_spec.maximum): return gym.spaces.Discrete(np.prod(action_spec.shape))", "self._env.reset() return flatten_observation(time_step.observation)['observations'] def render(self, mode='human'): # pylint: disable=inconsistent-return-statements if", "'dm_control {}'.format(self._name) self._viewer = DmControlViewer(title=title) self._viewer.launch(self._env) self._viewer.render() return None elif", "= DmControlViewer(title=title) self._viewer.launch(self._env) self._viewer.render() return None elif mode == 'rgb_array':", "= None def _flat_shape(self, observation): return np.sum(int(np.prod(v.shape)) for k, v", "None self._env = None def _flat_shape(self, observation): return np.sum(int(np.prod(v.shape)) for", "= self._flat_shape(self._env.observation_spec()) return gym.spaces.Box(low=-np.inf, high=np.inf, shape=[flat_dim], dtype=np.float32) def __getstate__(self): d", "def __init__(self, env, name=None): self._name = name or type(env.task).__name__ self._env", "v in observation.items()) @property def action_space(self): action_spec = self._env.action_spec() if", "self._env.physics.render() else: raise NotImplementedError def close(self): if self._viewer: self._viewer.close() self._env.close()", "high=np.inf, shape=[flat_dim], dtype=np.float32) def __getstate__(self): d = self.__dict__.copy() d['_viewer'] =", "action_spec.maximum): return gym.spaces.Discrete(np.prod(action_spec.shape)) else: return gym.spaces.Box(action_spec.minimum, action_spec.maximum, dtype=np.float32) @property def", "import numpy as np from metarl.envs import Step from metarl.envs.dm_control.dm_control_viewer", "gym.spaces.Discrete(np.prod(action_spec.shape)) else: return gym.spaces.Box(action_spec.minimum, action_spec.maximum, dtype=np.float32) @property def observation_space(self): flat_dim", "as np from metarl.envs import Step from metarl.envs.dm_control.dm_control_viewer import DmControlViewer", "if not self._viewer: title = 'dm_control {}'.format(self._name) self._viewer = DmControlViewer(title=title)", "== 1) and (-np.inf in action_spec.minimum or np.inf in action_spec.maximum):", "None @classmethod def from_suite(cls, domain_name, task_name): return cls(suite.load(domain_name, task_name), name='{}.{}'.format(domain_name,", "in action_spec.maximum): return gym.spaces.Discrete(np.prod(action_spec.shape)) else: return gym.spaces.Box(action_spec.minimum, action_spec.maximum, dtype=np.float32) @property", "== 'human': if not self._viewer: title = 'dm_control {}'.format(self._name) self._viewer" ]
[ "def snowflake_table( name=None, input_tables=None, other_input_defs=None, tags=None, required_resource_keys=None, description=None, ): tags", "None def snowflake_table( name=None, input_tables=None, other_input_defs=None, tags=None, required_resource_keys=None, description=None, ):", "required_resource_keys.add('snowflake') if callable(name): fn = name return create_lakehouse_table_def( name=fn.__name__, lakehouse_fn=fn,", "tags = check.opt_dict_param(tags, 'tags') tags['lakehouse_type'] = 'snowflake_table' tags['kind'] = 'snowflake'", "_wrap(fn): return create_lakehouse_table_def( name=name if name is not None else", "check.opt_dict_param(tags, 'tags') tags['lakehouse_type'] = 'snowflake_table' tags['kind'] = 'snowflake' required_resource_keys =", "required_resource_keys=required_resource_keys, ) def _wrap(fn): return create_lakehouse_table_def( name=name if name is", "from dagster import check from .house import Lakehouse from .table", "dagster import check from .house import Lakehouse from .table import", "table_handle, _dest_metadata): return None def materialize(self, context, table_type, table_metadata, value):", "fn = name return create_lakehouse_table_def( name=fn.__name__, lakehouse_fn=fn, input_tables=[], required_resource_keys=required_resource_keys, )", "SnowflakeLakehouse(Lakehouse): def __init__(self): pass def hydrate(self, _context, _table_type, _table_metadata, table_handle,", "def _wrap(fn): return create_lakehouse_table_def( name=name if name is not None", "callable(name): fn = name return create_lakehouse_table_def( name=fn.__name__, lakehouse_fn=fn, input_tables=[], required_resource_keys=required_resource_keys,", "from .house import Lakehouse from .table import create_lakehouse_table_def class SnowflakeLakehouse(Lakehouse):", "return create_lakehouse_table_def( name=name if name is not None else fn.__name__,", "from .table import create_lakehouse_table_def class SnowflakeLakehouse(Lakehouse): def __init__(self): pass def", "name=fn.__name__, lakehouse_fn=fn, input_tables=[], required_resource_keys=required_resource_keys, ) def _wrap(fn): return create_lakehouse_table_def( name=name", "required_resource_keys = check.opt_set_param(required_resource_keys, 'required_resource_keys') required_resource_keys.add('snowflake') if callable(name): fn = name", "return None def materialize(self, context, table_type, table_metadata, value): return None,", "hydrate(self, _context, _table_type, _table_metadata, table_handle, _dest_metadata): return None def materialize(self,", "description=None, ): tags = check.opt_dict_param(tags, 'tags') tags['lakehouse_type'] = 'snowflake_table' tags['kind']", "'snowflake_table' tags['kind'] = 'snowflake' required_resource_keys = check.opt_set_param(required_resource_keys, 'required_resource_keys') required_resource_keys.add('snowflake') if", "fn.__name__, lakehouse_fn=fn, input_tables=input_tables, other_input_defs=other_input_defs, tags=tags, description=description, required_resource_keys=required_resource_keys, ) return _wrap", "name is not None else fn.__name__, lakehouse_fn=fn, input_tables=input_tables, other_input_defs=other_input_defs, tags=tags,", "create_lakehouse_table_def( name=name if name is not None else fn.__name__, lakehouse_fn=fn,", "lakehouse_fn=fn, input_tables=[], required_resource_keys=required_resource_keys, ) def _wrap(fn): return create_lakehouse_table_def( name=name if", "context, table_type, table_metadata, value): return None, None def snowflake_table( name=None,", "None, None def snowflake_table( name=None, input_tables=None, other_input_defs=None, tags=None, required_resource_keys=None, description=None,", "): tags = check.opt_dict_param(tags, 'tags') tags['lakehouse_type'] = 'snowflake_table' tags['kind'] =", "create_lakehouse_table_def( name=fn.__name__, lakehouse_fn=fn, input_tables=[], required_resource_keys=required_resource_keys, ) def _wrap(fn): return create_lakehouse_table_def(", "tags['lakehouse_type'] = 'snowflake_table' tags['kind'] = 'snowflake' required_resource_keys = check.opt_set_param(required_resource_keys, 'required_resource_keys')", "table_metadata, value): return None, None def snowflake_table( name=None, input_tables=None, other_input_defs=None,", ".table import create_lakehouse_table_def class SnowflakeLakehouse(Lakehouse): def __init__(self): pass def hydrate(self,", "check from .house import Lakehouse from .table import create_lakehouse_table_def class", "None else fn.__name__, lakehouse_fn=fn, input_tables=input_tables, other_input_defs=other_input_defs, tags=tags, description=description, required_resource_keys=required_resource_keys, )", "_table_type, _table_metadata, table_handle, _dest_metadata): return None def materialize(self, context, table_type,", "_dest_metadata): return None def materialize(self, context, table_type, table_metadata, value): return", "tags=None, required_resource_keys=None, description=None, ): tags = check.opt_dict_param(tags, 'tags') tags['lakehouse_type'] =", "import Lakehouse from .table import create_lakehouse_table_def class SnowflakeLakehouse(Lakehouse): def __init__(self):", "import check from .house import Lakehouse from .table import create_lakehouse_table_def", "pass def hydrate(self, _context, _table_type, _table_metadata, table_handle, _dest_metadata): return None", "if callable(name): fn = name return create_lakehouse_table_def( name=fn.__name__, lakehouse_fn=fn, input_tables=[],", "else fn.__name__, lakehouse_fn=fn, input_tables=input_tables, other_input_defs=other_input_defs, tags=tags, description=description, required_resource_keys=required_resource_keys, ) return", "return None, None def snowflake_table( name=None, input_tables=None, other_input_defs=None, tags=None, required_resource_keys=None,", "'tags') tags['lakehouse_type'] = 'snowflake_table' tags['kind'] = 'snowflake' required_resource_keys = check.opt_set_param(required_resource_keys,", "other_input_defs=None, tags=None, required_resource_keys=None, description=None, ): tags = check.opt_dict_param(tags, 'tags') tags['lakehouse_type']", "= 'snowflake_table' tags['kind'] = 'snowflake' required_resource_keys = check.opt_set_param(required_resource_keys, 'required_resource_keys') required_resource_keys.add('snowflake')", "is not None else fn.__name__, lakehouse_fn=fn, input_tables=input_tables, other_input_defs=other_input_defs, tags=tags, description=description,", "create_lakehouse_table_def class SnowflakeLakehouse(Lakehouse): def __init__(self): pass def hydrate(self, _context, _table_type,", "input_tables=[], required_resource_keys=required_resource_keys, ) def _wrap(fn): return create_lakehouse_table_def( name=name if name", "table_type, table_metadata, value): return None, None def snowflake_table( name=None, input_tables=None,", "value): return None, None def snowflake_table( name=None, input_tables=None, other_input_defs=None, tags=None,", "_context, _table_type, _table_metadata, table_handle, _dest_metadata): return None def materialize(self, context,", "= 'snowflake' required_resource_keys = check.opt_set_param(required_resource_keys, 'required_resource_keys') required_resource_keys.add('snowflake') if callable(name): fn", "= name return create_lakehouse_table_def( name=fn.__name__, lakehouse_fn=fn, input_tables=[], required_resource_keys=required_resource_keys, ) def", "tags['kind'] = 'snowflake' required_resource_keys = check.opt_set_param(required_resource_keys, 'required_resource_keys') required_resource_keys.add('snowflake') if callable(name):", "= check.opt_dict_param(tags, 'tags') tags['lakehouse_type'] = 'snowflake_table' tags['kind'] = 'snowflake' required_resource_keys", "= check.opt_set_param(required_resource_keys, 'required_resource_keys') required_resource_keys.add('snowflake') if callable(name): fn = name return", "materialize(self, context, table_type, table_metadata, value): return None, None def snowflake_table(", "__init__(self): pass def hydrate(self, _context, _table_type, _table_metadata, table_handle, _dest_metadata): return", ".house import Lakehouse from .table import create_lakehouse_table_def class SnowflakeLakehouse(Lakehouse): def", "check.opt_set_param(required_resource_keys, 'required_resource_keys') required_resource_keys.add('snowflake') if callable(name): fn = name return create_lakehouse_table_def(", "name=None, input_tables=None, other_input_defs=None, tags=None, required_resource_keys=None, description=None, ): tags = check.opt_dict_param(tags,", "name=name if name is not None else fn.__name__, lakehouse_fn=fn, input_tables=input_tables,", "def __init__(self): pass def hydrate(self, _context, _table_type, _table_metadata, table_handle, _dest_metadata):", "def hydrate(self, _context, _table_type, _table_metadata, table_handle, _dest_metadata): return None def", "def materialize(self, context, table_type, table_metadata, value): return None, None def", "return create_lakehouse_table_def( name=fn.__name__, lakehouse_fn=fn, input_tables=[], required_resource_keys=required_resource_keys, ) def _wrap(fn): return", "'snowflake' required_resource_keys = check.opt_set_param(required_resource_keys, 'required_resource_keys') required_resource_keys.add('snowflake') if callable(name): fn =", "not None else fn.__name__, lakehouse_fn=fn, input_tables=input_tables, other_input_defs=other_input_defs, tags=tags, description=description, required_resource_keys=required_resource_keys,", "required_resource_keys=None, description=None, ): tags = check.opt_dict_param(tags, 'tags') tags['lakehouse_type'] = 'snowflake_table'", "input_tables=None, other_input_defs=None, tags=None, required_resource_keys=None, description=None, ): tags = check.opt_dict_param(tags, 'tags')", "None def materialize(self, context, table_type, table_metadata, value): return None, None", "Lakehouse from .table import create_lakehouse_table_def class SnowflakeLakehouse(Lakehouse): def __init__(self): pass", "'required_resource_keys') required_resource_keys.add('snowflake') if callable(name): fn = name return create_lakehouse_table_def( name=fn.__name__,", "if name is not None else fn.__name__, lakehouse_fn=fn, input_tables=input_tables, other_input_defs=other_input_defs,", "import create_lakehouse_table_def class SnowflakeLakehouse(Lakehouse): def __init__(self): pass def hydrate(self, _context,", "class SnowflakeLakehouse(Lakehouse): def __init__(self): pass def hydrate(self, _context, _table_type, _table_metadata,", "_table_metadata, table_handle, _dest_metadata): return None def materialize(self, context, table_type, table_metadata,", "name return create_lakehouse_table_def( name=fn.__name__, lakehouse_fn=fn, input_tables=[], required_resource_keys=required_resource_keys, ) def _wrap(fn):", ") def _wrap(fn): return create_lakehouse_table_def( name=name if name is not", "snowflake_table( name=None, input_tables=None, other_input_defs=None, tags=None, required_resource_keys=None, description=None, ): tags =" ]
[ "allowed to have any of the \"default\" shaders present in", "@classmethod def get_invalid(cls, instance): invalid = set() for node in", "in cls.DEFAULT_SHADERS: cls.log.error(\"Node has unallowed connection to \" \"'{}': {}\".format(s,", "in the look must have any of default shaders applied.", "shape nodes in the look must have any of default", "families = ['look'] hosts = ['maya'] label = 'Look No", "= pype.api.ValidateContentsOrder + 0.01 families = ['look'] hosts = ['maya']", "\"\"\" order = pype.api.ValidateContentsOrder + 0.01 families = ['look'] hosts", "for any disallowed connections on *all* nodes if any(s in", "shaders applied. \"\"\" order = pype.api.ValidateContentsOrder + 0.01 families =", "s in cls.DEFAULT_SHADERS: cls.log.error(\"Node has unallowed connection to \" \"'{}':", "lambert1 - initialShadingGroup - initialParticleSE - particleCloud1 If any of", "look is not allowed to have any of the \"default\"", "a scene as they can introduce problems when referenced (overriding", "shading engine connections shaders = cmds.listConnections(node, type=\"shadingEngine\") or [] #", "to a default shader. This checks whether the look has", "shaders). To fix this no shape nodes in the look", "No Default Shaders' actions = [pype.maya.action.SelectInvalidAction] DEFAULT_SHADERS = {\"lambert1\", \"initialShadingGroup\",", "# Check for any disallowed connections on *all* nodes if", "problems when referenced (overriding local scene shaders). To fix this", "an error. A look is not allowed to have any", "they can introduce problems when referenced (overriding local scene shaders).", "any members of: - lambert1 - initialShadingGroup - initialParticleSE -", "of those is present it will raise an error. A", "no shape nodes in the look must have any of", "nodes in the look must have any of default shaders", "instance: # Get shading engine connections shaders = cmds.listConnections(node, type=\"shadingEngine\")", "shaders present in a scene as they can introduce problems", "import cmds import pyblish.api import pype.api import pype.maya.action class ValidateLookNoDefaultShaders(pyblish.api.InstancePlugin):", "node in instance: # Get shading engine connections shaders =", "Check for any disallowed connections on *all* nodes if any(s", "= cmds.listConnections(node, type=\"shadingEngine\") or [] # Check for any disallowed", "have any of the \"default\" shaders present in a scene", "must have any of default shaders applied. \"\"\" order =", "raise RuntimeError(\"Invalid node relationships found: \" \"{0}\".format(invalid)) @classmethod def get_invalid(cls,", "a default shader. This checks whether the look has any", "DEFAULT_SHADERS = {\"lambert1\", \"initialShadingGroup\", \"initialParticleSE\", \"particleCloud1\"} def process(self, instance): \"\"\"Process", "nodes in the instance\"\"\" invalid = self.get_invalid(instance) if invalid: raise", "nodes if any(s in cls.DEFAULT_SHADERS for s in shaders): #", "has a connection to a default shader. This checks whether", "RuntimeError(\"Invalid node relationships found: \" \"{0}\".format(invalid)) @classmethod def get_invalid(cls, instance):", "\"default\" shaders present in a scene as they can introduce", "is present it will raise an error. A look is", "label = 'Look No Default Shaders' actions = [pype.maya.action.SelectInvalidAction] DEFAULT_SHADERS", "= 'Look No Default Shaders' actions = [pype.maya.action.SelectInvalidAction] DEFAULT_SHADERS =", "any disallowed connections on *all* nodes if any(s in cls.DEFAULT_SHADERS", "s in shaders): # Explicitly log each individual \"wrong\" connection.", "is not allowed to have any of the \"default\" shaders", "in instance: # Get shading engine connections shaders = cmds.listConnections(node,", "the \"default\" shaders present in a scene as they can", "instance): \"\"\"Process all the nodes in the instance\"\"\" invalid =", "cmds.listConnections(node, type=\"shadingEngine\") or [] # Check for any disallowed connections", "= ['maya'] label = 'Look No Default Shaders' actions =", "= set() for node in instance: # Get shading engine", "Explicitly log each individual \"wrong\" connection. for s in shaders:", "Default Shaders' actions = [pype.maya.action.SelectInvalidAction] DEFAULT_SHADERS = {\"lambert1\", \"initialShadingGroup\", \"initialParticleSE\",", "node has a connection to a default shader. This checks", "can introduce problems when referenced (overriding local scene shaders). To", "cls.DEFAULT_SHADERS: cls.log.error(\"Node has unallowed connection to \" \"'{}': {}\".format(s, node))", "type=\"shadingEngine\") or [] # Check for any disallowed connections on", "# Explicitly log each individual \"wrong\" connection. for s in", "default shader. This checks whether the look has any members", "individual \"wrong\" connection. for s in shaders: if s in", "def process(self, instance): \"\"\"Process all the nodes in the instance\"\"\"", "cls.log.error(\"Node has unallowed connection to \" \"'{}': {}\".format(s, node)) invalid.add(node)", "in the instance\"\"\" invalid = self.get_invalid(instance) if invalid: raise RuntimeError(\"Invalid", "applied. \"\"\" order = pype.api.ValidateContentsOrder + 0.01 families = ['look']", "introduce problems when referenced (overriding local scene shaders). To fix", "particleCloud1 If any of those is present it will raise", "have any of default shaders applied. \"\"\" order = pype.api.ValidateContentsOrder", "{\"lambert1\", \"initialShadingGroup\", \"initialParticleSE\", \"particleCloud1\"} def process(self, instance): \"\"\"Process all the", "\"initialParticleSE\", \"particleCloud1\"} def process(self, instance): \"\"\"Process all the nodes in", "- initialShadingGroup - initialParticleSE - particleCloud1 If any of those", "\"wrong\" connection. for s in shaders: if s in cls.DEFAULT_SHADERS:", "import pype.api import pype.maya.action class ValidateLookNoDefaultShaders(pyblish.api.InstancePlugin): \"\"\"Validate if any node", "disallowed connections on *all* nodes if any(s in cls.DEFAULT_SHADERS for", "pyblish.api import pype.api import pype.maya.action class ValidateLookNoDefaultShaders(pyblish.api.InstancePlugin): \"\"\"Validate if any", "actions = [pype.maya.action.SelectInvalidAction] DEFAULT_SHADERS = {\"lambert1\", \"initialShadingGroup\", \"initialParticleSE\", \"particleCloud1\"} def", "connection. for s in shaders: if s in cls.DEFAULT_SHADERS: cls.log.error(\"Node", "log each individual \"wrong\" connection. for s in shaders: if", "connection to a default shader. This checks whether the look", "\"initialShadingGroup\", \"initialParticleSE\", \"particleCloud1\"} def process(self, instance): \"\"\"Process all the nodes", "the look has any members of: - lambert1 - initialShadingGroup", "To fix this no shape nodes in the look must", "local scene shaders). To fix this no shape nodes in", "present it will raise an error. A look is not", "of: - lambert1 - initialShadingGroup - initialParticleSE - particleCloud1 If", "any node has a connection to a default shader. This", "cmds import pyblish.api import pype.api import pype.maya.action class ValidateLookNoDefaultShaders(pyblish.api.InstancePlugin): \"\"\"Validate", "has unallowed connection to \" \"'{}': {}\".format(s, node)) invalid.add(node) return", "this no shape nodes in the look must have any", "self.get_invalid(instance) if invalid: raise RuntimeError(\"Invalid node relationships found: \" \"{0}\".format(invalid))", "default shaders applied. \"\"\" order = pype.api.ValidateContentsOrder + 0.01 families", "cls.DEFAULT_SHADERS for s in shaders): # Explicitly log each individual", "for s in shaders: if s in cls.DEFAULT_SHADERS: cls.log.error(\"Node has", "- particleCloud1 If any of those is present it will", "will raise an error. A look is not allowed to", "class ValidateLookNoDefaultShaders(pyblish.api.InstancePlugin): \"\"\"Validate if any node has a connection to", "shader. This checks whether the look has any members of:", "pype.api.ValidateContentsOrder + 0.01 families = ['look'] hosts = ['maya'] label", "\"{0}\".format(invalid)) @classmethod def get_invalid(cls, instance): invalid = set() for node", "(overriding local scene shaders). To fix this no shape nodes", "['maya'] label = 'Look No Default Shaders' actions = [pype.maya.action.SelectInvalidAction]", "\"\"\"Validate if any node has a connection to a default", "of default shaders applied. \"\"\" order = pype.api.ValidateContentsOrder + 0.01", "each individual \"wrong\" connection. for s in shaders: if s", "= {\"lambert1\", \"initialShadingGroup\", \"initialParticleSE\", \"particleCloud1\"} def process(self, instance): \"\"\"Process all", "s in shaders: if s in cls.DEFAULT_SHADERS: cls.log.error(\"Node has unallowed", "['look'] hosts = ['maya'] label = 'Look No Default Shaders'", "connections shaders = cmds.listConnections(node, type=\"shadingEngine\") or [] # Check for", "def get_invalid(cls, instance): invalid = set() for node in instance:", "those is present it will raise an error. A look", "when referenced (overriding local scene shaders). To fix this no", "set() for node in instance: # Get shading engine connections", "of the \"default\" shaders present in a scene as they", "fix this no shape nodes in the look must have", "engine connections shaders = cmds.listConnections(node, type=\"shadingEngine\") or [] # Check", "import pyblish.api import pype.api import pype.maya.action class ValidateLookNoDefaultShaders(pyblish.api.InstancePlugin): \"\"\"Validate if", "Get shading engine connections shaders = cmds.listConnections(node, type=\"shadingEngine\") or []", "process(self, instance): \"\"\"Process all the nodes in the instance\"\"\" invalid", "Shaders' actions = [pype.maya.action.SelectInvalidAction] DEFAULT_SHADERS = {\"lambert1\", \"initialShadingGroup\", \"initialParticleSE\", \"particleCloud1\"}", "node relationships found: \" \"{0}\".format(invalid)) @classmethod def get_invalid(cls, instance): invalid", "relationships found: \" \"{0}\".format(invalid)) @classmethod def get_invalid(cls, instance): invalid =", "to have any of the \"default\" shaders present in a", "shaders): # Explicitly log each individual \"wrong\" connection. for s", "scene shaders). To fix this no shape nodes in the", "instance): invalid = set() for node in instance: # Get", "if any node has a connection to a default shader.", "as they can introduce problems when referenced (overriding local scene", "invalid = self.get_invalid(instance) if invalid: raise RuntimeError(\"Invalid node relationships found:", "if invalid: raise RuntimeError(\"Invalid node relationships found: \" \"{0}\".format(invalid)) @classmethod", "order = pype.api.ValidateContentsOrder + 0.01 families = ['look'] hosts =", "invalid: raise RuntimeError(\"Invalid node relationships found: \" \"{0}\".format(invalid)) @classmethod def", "pype.maya.action class ValidateLookNoDefaultShaders(pyblish.api.InstancePlugin): \"\"\"Validate if any node has a connection", "get_invalid(cls, instance): invalid = set() for node in instance: #", "initialParticleSE - particleCloud1 If any of those is present it", "+ 0.01 families = ['look'] hosts = ['maya'] label =", "on *all* nodes if any(s in cls.DEFAULT_SHADERS for s in", "has any members of: - lambert1 - initialShadingGroup - initialParticleSE", "in cls.DEFAULT_SHADERS for s in shaders): # Explicitly log each", "any of the \"default\" shaders present in a scene as", "in a scene as they can introduce problems when referenced", "not allowed to have any of the \"default\" shaders present", "instance\"\"\" invalid = self.get_invalid(instance) if invalid: raise RuntimeError(\"Invalid node relationships", "scene as they can introduce problems when referenced (overriding local", "# Get shading engine connections shaders = cmds.listConnections(node, type=\"shadingEngine\") or", "[] # Check for any disallowed connections on *all* nodes", "This checks whether the look has any members of: -", "members of: - lambert1 - initialShadingGroup - initialParticleSE - particleCloud1", "or [] # Check for any disallowed connections on *all*", "shaders: if s in cls.DEFAULT_SHADERS: cls.log.error(\"Node has unallowed connection to", "whether the look has any members of: - lambert1 -", "look has any members of: - lambert1 - initialShadingGroup -", "checks whether the look has any members of: - lambert1", "\" \"{0}\".format(invalid)) @classmethod def get_invalid(cls, instance): invalid = set() for", "the nodes in the instance\"\"\" invalid = self.get_invalid(instance) if invalid:", "ValidateLookNoDefaultShaders(pyblish.api.InstancePlugin): \"\"\"Validate if any node has a connection to a", "<reponame>tokejepsen/pype from maya import cmds import pyblish.api import pype.api import", "maya import cmds import pyblish.api import pype.api import pype.maya.action class", "it will raise an error. A look is not allowed", "= self.get_invalid(instance) if invalid: raise RuntimeError(\"Invalid node relationships found: \"", "in shaders): # Explicitly log each individual \"wrong\" connection. for", "if any(s in cls.DEFAULT_SHADERS for s in shaders): # Explicitly", "the look must have any of default shaders applied. \"\"\"", "any(s in cls.DEFAULT_SHADERS for s in shaders): # Explicitly log", "look must have any of default shaders applied. \"\"\" order", "present in a scene as they can introduce problems when", "a connection to a default shader. This checks whether the", "\"particleCloud1\"} def process(self, instance): \"\"\"Process all the nodes in the", "error. A look is not allowed to have any of", "invalid = set() for node in instance: # Get shading", "*all* nodes if any(s in cls.DEFAULT_SHADERS for s in shaders):", "If any of those is present it will raise an", "A look is not allowed to have any of the", "found: \" \"{0}\".format(invalid)) @classmethod def get_invalid(cls, instance): invalid = set()", "- lambert1 - initialShadingGroup - initialParticleSE - particleCloud1 If any", "any of those is present it will raise an error.", "if s in cls.DEFAULT_SHADERS: cls.log.error(\"Node has unallowed connection to \"", "the instance\"\"\" invalid = self.get_invalid(instance) if invalid: raise RuntimeError(\"Invalid node", "0.01 families = ['look'] hosts = ['maya'] label = 'Look", "pype.api import pype.maya.action class ValidateLookNoDefaultShaders(pyblish.api.InstancePlugin): \"\"\"Validate if any node has", "shaders = cmds.listConnections(node, type=\"shadingEngine\") or [] # Check for any", "for s in shaders): # Explicitly log each individual \"wrong\"", "in shaders: if s in cls.DEFAULT_SHADERS: cls.log.error(\"Node has unallowed connection", "import pype.maya.action class ValidateLookNoDefaultShaders(pyblish.api.InstancePlugin): \"\"\"Validate if any node has a", "for node in instance: # Get shading engine connections shaders", "connections on *all* nodes if any(s in cls.DEFAULT_SHADERS for s", "'Look No Default Shaders' actions = [pype.maya.action.SelectInvalidAction] DEFAULT_SHADERS = {\"lambert1\",", "all the nodes in the instance\"\"\" invalid = self.get_invalid(instance) if", "hosts = ['maya'] label = 'Look No Default Shaders' actions", "\"\"\"Process all the nodes in the instance\"\"\" invalid = self.get_invalid(instance)", "from maya import cmds import pyblish.api import pype.api import pype.maya.action", "initialShadingGroup - initialParticleSE - particleCloud1 If any of those is", "- initialParticleSE - particleCloud1 If any of those is present", "[pype.maya.action.SelectInvalidAction] DEFAULT_SHADERS = {\"lambert1\", \"initialShadingGroup\", \"initialParticleSE\", \"particleCloud1\"} def process(self, instance):", "= ['look'] hosts = ['maya'] label = 'Look No Default", "referenced (overriding local scene shaders). To fix this no shape", "= [pype.maya.action.SelectInvalidAction] DEFAULT_SHADERS = {\"lambert1\", \"initialShadingGroup\", \"initialParticleSE\", \"particleCloud1\"} def process(self,", "any of default shaders applied. \"\"\" order = pype.api.ValidateContentsOrder +", "unallowed connection to \" \"'{}': {}\".format(s, node)) invalid.add(node) return list(invalid)", "raise an error. A look is not allowed to have" ]
[ "<gh_stars>0 from flask import Flask # initialize the app app", "def iris(): from sklearn.datasets import load_iris from sklearn.linear_model import LogisticRegression", "import load_iris from sklearn.linear_model import LogisticRegression X, y = load_iris(return_X_y=True)", "flask import Flask # initialize the app app = Flask(__name__)", "LogisticRegression( random_state = 42, solver=\"lbfgs\", multi_class=\"multinomial\" ).fit(X, y) return str(clf.predict(X[:2,", "random_state = 42, solver=\"lbfgs\", multi_class=\"multinomial\" ).fit(X, y) return str(clf.predict(X[:2, :]))", "function at /iris route @app.route(\"/iris\") def iris(): from sklearn.datasets import", "from sklearn.datasets import load_iris from sklearn.linear_model import LogisticRegression X, y", "@app.route(\"/iris\") def iris(): from sklearn.datasets import load_iris from sklearn.linear_model import", "import LogisticRegression X, y = load_iris(return_X_y=True) clf = LogisticRegression( random_state", "sklearn.linear_model import LogisticRegression X, y = load_iris(return_X_y=True) clf = LogisticRegression(", "load_iris(return_X_y=True) clf = LogisticRegression( random_state = 42, solver=\"lbfgs\", multi_class=\"multinomial\" ).fit(X,", "sklearn.datasets import load_iris from sklearn.linear_model import LogisticRegression X, y =", "X, y = load_iris(return_X_y=True) clf = LogisticRegression( random_state = 42,", "app = Flask(__name__) # execute iris function at /iris route", "Flask # initialize the app app = Flask(__name__) # execute", "load_iris from sklearn.linear_model import LogisticRegression X, y = load_iris(return_X_y=True) clf", "/iris route @app.route(\"/iris\") def iris(): from sklearn.datasets import load_iris from", "at /iris route @app.route(\"/iris\") def iris(): from sklearn.datasets import load_iris", "route @app.route(\"/iris\") def iris(): from sklearn.datasets import load_iris from sklearn.linear_model", "iris(): from sklearn.datasets import load_iris from sklearn.linear_model import LogisticRegression X,", "y = load_iris(return_X_y=True) clf = LogisticRegression( random_state = 42, solver=\"lbfgs\",", "clf = LogisticRegression( random_state = 42, solver=\"lbfgs\", multi_class=\"multinomial\" ).fit(X, y)", "execute iris function at /iris route @app.route(\"/iris\") def iris(): from", "from sklearn.linear_model import LogisticRegression X, y = load_iris(return_X_y=True) clf =", "iris function at /iris route @app.route(\"/iris\") def iris(): from sklearn.datasets", "= Flask(__name__) # execute iris function at /iris route @app.route(\"/iris\")", "Flask(__name__) # execute iris function at /iris route @app.route(\"/iris\") def", "import Flask # initialize the app app = Flask(__name__) #", "from flask import Flask # initialize the app app =", "# execute iris function at /iris route @app.route(\"/iris\") def iris():", "app app = Flask(__name__) # execute iris function at /iris", "LogisticRegression X, y = load_iris(return_X_y=True) clf = LogisticRegression( random_state =", "# initialize the app app = Flask(__name__) # execute iris", "initialize the app app = Flask(__name__) # execute iris function", "= load_iris(return_X_y=True) clf = LogisticRegression( random_state = 42, solver=\"lbfgs\", multi_class=\"multinomial\"", "= LogisticRegression( random_state = 42, solver=\"lbfgs\", multi_class=\"multinomial\" ).fit(X, y) return", "the app app = Flask(__name__) # execute iris function at" ]
[ "return np.array(embeddings) def s_norm(self, test, enroll): \"\"\" Run speaker normalization", "assert 0 <= start < end, \\ f'Incorrect timing for", "# process utterances of the speakers features_dict = {} with", "normalization.') self.features_extractor = features_extractor self.embedding_extractor = embedding_extractor self.plda = plda", "in_emb_dir = None def __init__(self, norm_list, audio_dir=None, in_rttm_dir=None, in_emb_dir=None, out_emb_dir=None,", "= int(float(line.split()[3]) * 1000), int(float(line.split()[4]) * 1000) speaker = line.split()[7]", "os.path.join(self.in_emb_dir, '{}.pkl'.format(speaker)) if os.path.isfile(embedding_path): logger.info('Loading normalization pickle file `{}`.'.format(speaker)) with", "is None: self.embeddings = self.extract_embeddings() else: self.embeddings = self.load_embeddings() self.mean", "f: # append mean from speaker's embeddings speaker_embeddings = pickle.load(f)", "audio features = features_extractor.audio2features(os.path.join(audio_dir, '{}{}'.format(file_name, wav_suffix))) # process utterances of", "__iter__(self): current = 0 while current < len(self.embeddings): yield self.embeddings[current]", "speakers. Args: file_name (string_types): path to input audio file speakers_dict", "(str|None): path to directory for storing embeddings min_length (int): minimal", "self.embeddings).T c = cosine_similarity(enroll, test).T scores = [] for ii", "merged_speakers_dict: out_path = os.path.join(self.out_emb_dir, f'{speaker}.pkl') mkdir_p(os.path.dirname(out_path)) with open(out_path, 'wb') as", "out_emb_dir: self.out_emb_dir = os.path.abspath(out_emb_dir) self.min_length = min_length self.n_jobs = n_jobs", "get_frames_from_time from vbdiar.embeddings.embedding import extract_embeddings from vbdiar.utils import mkdir_p from", "(string_types): suffix of rttm files \"\"\" if audio_dir: self.audio_dir =", "file_name.split()[0] else: file_name = file_name.replace(os.linesep, '') with open('{}{}'.format(os.path.join(self.in_rttm_dir, file_name), self.rttm_suffix))", "__len__(self): return len(self.embeddings) def extract_embeddings(self): \"\"\" Extract normalization embeddings using", "= features_extractor.audio2features(os.path.join(audio_dir, '{}{}'.format(file_name, wav_suffix))) # process utterances of the speakers", "audio_dir (string_types): wav_suffix (string_types): in_rttm_dir (string_types): rttm_suffix (string_types): min_length (float):", "np.array(embeddings) def s_norm(self, test, enroll): \"\"\" Run speaker normalization (S-Norm)", "min_length, n_jobs=1): \"\"\" Args: fns: speakers_dict: features_extractor: embedding_extractor: audio_dir: wav_suffix:", "in fns: ret.append(process_file(file_name=fn, **kwargs)) return ret def process_file(file_name, speakers_dict, features_extractor,", "wav_suffix, in_rttm_dir, rttm_suffix, min_length): \"\"\" Extract embeddings for all defined", "speaker in merged_speakers_dict: merged_speakers_dict[speaker] = np.mean(merged_speakers_dict[speaker], axis=0) return np.array(list(merged_speakers_dict.values())) def", "= np.mean(b.T[jj]), np.std(b.T[jj]) s = c[ii][jj] test_scores.append((((s - test_mean) /", "vbdiar.embeddings.embedding import extract_embeddings from vbdiar.utils import mkdir_p from vbdiar.utils.utils import", "f'{speaker}.pkl') mkdir_p(os.path.dirname(out_path)) with open(out_path, 'wb') as f: pickle.dump(merged_speakers_dict[speaker], f, pickle.HIGHEST_PROTOCOL)", "(string_types): path to input audio file speakers_dict (dict): dictionary containing", "[], set() with open(self.norm_list) as f: for file_name in f:", "pickled normalization embeddings from `{}`.'.format(self.in_emb_dir)) for speaker in speakers: embedding_path", "current += 1 def __getitem__(self, key): return self.embeddings[key] def __setitem__(self,", "embeddings = None in_emb_dir = None def __init__(self, norm_list, audio_dir=None,", "is defined line = line.split()[0] else: line = line.replace(os.linesep, '')", "hypothesis \"\"\" if self.plda: a = self.plda.score(test, self.embeddings).T b =", "s = c[ii][jj] test_scores.append((((s - test_mean) / test_std + (s", "= features[start:end] for speaker in features_dict: embedding_set = extract_embeddings(features_dict[speaker], embedding_extractor)", "= [] for fn in fns: ret.append(process_file(file_name=fn, **kwargs)) return ret", "in fp: speakers.add(line.split()[7]) logger.info('Loading pickled normalization embeddings from `{}`.'.format(self.in_emb_dir)) for", "+= 1 def __getitem__(self, key): return self.embeddings[key] def __setitem__(self, key,", "kwargs = dargs ret = [] for fn in fns:", "ii in range(test.shape[0]): test_scores = [] for jj in range(enroll.shape[0]):", "= 0 while current < len(self.embeddings): yield self.embeddings[current] current +=", "in speakers_dict.keys(): speakers_dict[speaker] = embeddings_long else: speakers_dict[speaker] = np.concatenate((speakers_dict[speaker], embeddings_long),", "= dargs ret = [] for fn in fns: ret.append(process_file(file_name=fn,", "1 def __getitem__(self, key): return self.embeddings[key] def __setitem__(self, key, value):", "fn in fns: ret.append(process_file(file_name=fn, **kwargs)) return ret def process_file(file_name, speakers_dict,", "from vbdiar.utils import mkdir_p from vbdiar.utils.utils import Utils logger =", "embedding_extractor, audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length, n_jobs=1): \"\"\" Args: fns:", "size: {features.shape[0]}, end: {end}.' if end >= features.shape[0]: end =", "for speaker in features_dict: embedding_set = extract_embeddings(features_dict[speaker], embedding_extractor) embeddings_long =", "f: for file_name in f: if len(file_name.split()) > 1: #", "(string_types): wav_suffix (string_types): in_rttm_dir (string_types): rttm_suffix (string_types): min_length (float): Returns:", "speakers_dict[0] if self.out_emb_dir: for speaker in merged_speakers_dict: out_path = os.path.join(self.out_emb_dir,", "suffix of rttm files \"\"\" if audio_dir: self.audio_dir = os.path.abspath(audio_dir)", "if out_emb_dir: self.out_emb_dir = os.path.abspath(out_emb_dir) self.min_length = min_length self.n_jobs =", "n_jobs if self.in_emb_dir is None: self.embeddings = self.extract_embeddings() else: self.embeddings", "\"\"\" speakers_dict, fns = {}, [] with open(self.norm_list) as f:", "line in f: if len(line.split()) > 1: # number of", "min_length=min_length) if n_jobs == 1: ret = _process_files((fns, kwargs)) else:", "np.mean(merged_speakers_dict[speaker], axis=0) return np.array(list(merged_speakers_dict.values())) def load_embeddings(self): \"\"\" Load normalization embeddings", "embeddings.append(np.mean(speaker_embeddings, axis=0)) else: logger.warning('No pickle file found for `{}` in", "self.embeddings[key] def __setitem__(self, key, value): self.embeddings[key] = value def __len__(self):", "= get_frames_from_time(int(start_time)), get_frames_from_time(int(end_time)) if speaker not in features_dict: features_dict[speaker] =", "not in features_dict: features_dict[speaker] = {} assert 0 <= start", "for file_name in f: if len(file_name.split()) > 1: # number", "= [] for jj in range(enroll.shape[0]): test_mean, test_std = np.mean(a.T[ii]),", "> 1: # number of speakers is defined file_name =", "np from sklearn.metrics.pairwise import cosine_similarity from vbdiar.features.segments import get_frames_from_time from", "((part, kwargs) for part in Utils.partition(fns, n_jobs))) return ret def", "on cached embeddings. Args: test (np.array): test embedding enroll (np.array):", "value def __len__(self): return len(self.embeddings) def extract_embeddings(self): \"\"\" Extract normalization", "with open(self.norm_list) as f: for line in f: if len(line.split())", "start, end = get_frames_from_time(int(start_time)), get_frames_from_time(int(end_time)) if speaker not in features_dict:", "features_extractor (Any): object for feature extraction embedding_extractor (Any): object for", "part in Utils.partition(fns, n_jobs))) return ret def _process_files(dargs): \"\"\" Args:", "= cosine_similarity(enroll, test).T scores = [] for ii in range(test.shape[0]):", "Tuple[np.array, np.array]: vectors for individual speakers, global mean over all", "in_emb_dir=None, out_emb_dir=None, min_length=None, features_extractor=None, embedding_extractor=None, plda=None, wav_suffix='.wav', rttm_suffix='.rttm', n_jobs=1): \"\"\"", "\"\"\" Run speaker normalization (S-Norm) on cached embeddings. Args: test", "sklearn.metrics.pairwise import cosine_similarity from vbdiar.features.segments import get_frames_from_time from vbdiar.embeddings.embedding import", "(string_types): in_rttm_dir (string_types): rttm_suffix (string_types): min_length (float): Returns: dict: updated", "features.shape[0]: end = features.shape[0] - 1 features_dict[speaker][(start_time, end_time)] = features[start:end]", "speaker not in speakers_dict.keys(): speakers_dict[speaker] = embeddings_long else: speakers_dict[speaker] =", "normalization list audio_dir (string_types|None): path to audio directory in_rttm_dir (string_types|None):", "a = self.plda.score(test, self.embeddings).T b = self.plda.score(enroll, self.embeddings).T c =", "plda self.wav_suffix = wav_suffix self.rttm_suffix = rttm_suffix if in_emb_dir: self.in_emb_dir", "pool.map(_process_files, ((part, kwargs) for part in Utils.partition(fns, n_jobs))) return ret", "\"\"\" Extract normalization embeddings using averaging. Returns: Tuple[np.array, np.array]: vectors", "are the same merged_speakers_dict = speakers_dict[0] if self.out_emb_dir: for speaker", "\"\"\" Extract embeddings for all defined speakers. Args: file_name (string_types):", "Brno University of Technology FIT # Author: <NAME> <<EMAIL>> #", "embedding Returns: float: hypothesis \"\"\" if self.plda: a = self.plda.score(test,", "files for normalization.') self.features_extractor = features_extractor self.embedding_extractor = embedding_extractor self.plda", "test_scores = [] for jj in range(enroll.shape[0]): test_mean, test_std =", "self.embeddings = self.load_embeddings() self.mean = np.mean(self.embeddings, axis=0) def __iter__(self): current", "# append mean from speaker's embeddings speaker_embeddings = pickle.load(f) embeddings.append(np.mean(speaker_embeddings,", "with open(self.norm_list) as f: for file_name in f: if len(file_name.split())", "vectors for individual speakers, global mean over all speakers \"\"\"", "path to input audio file speakers_dict (dict): dictionary containing all", "wav_suffix))) # process utterances of the speakers features_dict = {}", "`{}`.'.format(speaker, self.in_emb_dir)) return np.array(embeddings) def s_norm(self, test, enroll): \"\"\" Run", "wav_suffix, in_rttm_dir, rttm_suffix, min_length, n_jobs=1): \"\"\" Args: fns: speakers_dict: features_extractor:", "test).T scores = [] for ii in range(test.shape[0]): test_scores =", "fns: ret.append(process_file(file_name=fn, **kwargs)) return ret def process_file(file_name, speakers_dict, features_extractor, embedding_extractor,", "directory in_rttm_dir (string_types|None): path to directory with rttm files in_emb_dir", "multiprocessing import numpy as np from sklearn.metrics.pairwise import cosine_similarity from", "= cosine_similarity(test, self.embeddings).T b = cosine_similarity(enroll, self.embeddings).T c = cosine_similarity(enroll,", "= rttm_suffix if in_emb_dir: self.in_emb_dir = os.path.abspath(in_emb_dir) if out_emb_dir: self.out_emb_dir", "self.embeddings[key] = value def __len__(self): return len(self.embeddings) def extract_embeddings(self): \"\"\"", "in features_dict: features_dict[speaker] = {} assert 0 <= start <", "for ii in range(test.shape[0]): test_scores = [] for jj in", "os.path.join(self.out_emb_dir, f'{speaker}.pkl') mkdir_p(os.path.dirname(out_path)) with open(out_path, 'wb') as f: pickle.dump(merged_speakers_dict[speaker], f,", "os.path.abspath(in_rttm_dir) else: raise ValueError('It is required to have input rttm", "path to directory with rttm files in_emb_dir (str|None): path to", "embeddings min_length (int): minimal length for extracting embeddings features_extractor (Any):", "axis=0)) else: logger.warning('No pickle file found for `{}` in `{}`.'.format(speaker,", "if end >= features.shape[0]: end = features.shape[0] - 1 features_dict[speaker][(start_time,", "in_rttm_dir: self.in_rttm_dir = os.path.abspath(in_rttm_dir) else: raise ValueError('It is required to", "import multiprocessing import numpy as np from sklearn.metrics.pairwise import cosine_similarity", "import os import logging import pickle import multiprocessing import numpy", "(dict): dictionary containing all embedding across speakers features_extractor (Any): embedding_extractor", "if dur > min_length: end_time = start_time + dur start,", ">= features.shape[0]: end = features.shape[0] - 1 features_dict[speaker][(start_time, end_time)] =", "features.shape[0] - 1 features_dict[speaker][(start_time, end_time)] = features[start:end] for speaker in", "of speakers is defined line = line.split()[0] else: line =", "= os.path.abspath(audio_dir) self.norm_list = norm_list if in_rttm_dir: self.in_rttm_dir = os.path.abspath(in_rttm_dir)", "c = cosine_similarity(enroll, test).T scores = [] for ii in", "list audio_dir (string_types|None): path to audio directory in_rttm_dir (string_types|None): path", "path to normalization list audio_dir (string_types|None): path to audio directory", "dargs: Returns: \"\"\" fns, kwargs = dargs ret = []", "logging.getLogger(__name__) def process_files(fns, speakers_dict, features_extractor, embedding_extractor, audio_dir, wav_suffix, in_rttm_dir, rttm_suffix,", "# extract features from whole audio features = features_extractor.audio2features(os.path.join(audio_dir, '{}{}'.format(file_name,", "mean from speaker's embeddings speaker_embeddings = pickle.load(f) embeddings.append(np.mean(speaker_embeddings, axis=0)) else:", "file_name in f: if len(file_name.split()) > 1: # number of", "class Normalization(object): \"\"\" Speaker normalization S-Norm. \"\"\" embeddings = None", "features_dict: features_dict[speaker] = {} assert 0 <= start < end,", "length for extracting embeddings features_extractor (Any): object for feature extraction", "= np.mean(self.embeddings, axis=0) def __iter__(self): current = 0 while current", "= os.path.join(self.in_emb_dir, '{}.pkl'.format(speaker)) if os.path.isfile(embedding_path): logger.info('Loading normalization pickle file `{}`.'.format(speaker))", "wav_suffix self.rttm_suffix = rttm_suffix if in_emb_dir: self.in_emb_dir = os.path.abspath(in_emb_dir) if", "'') fns.append(line) speakers_dict = process_files(fns, speakers_dict=speakers_dict, features_extractor=self.features_extractor, embedding_extractor=self.embedding_extractor, audio_dir=self.audio_dir, wav_suffix=self.wav_suffix,", "Returns: dict: updated dictionary with speakers \"\"\" logger.info('Processing file `{}`.'.format(file_name.split()[0]))", "line in f: start_time, dur = int(float(line.split()[3]) * 1000), int(float(line.split()[4])", "directory with i-vectors out_emb_dir (str|None): path to directory for storing", "logger.info('Loading normalization pickle file `{}`.'.format(speaker)) with open(embedding_path, 'rb') as f:", "logger.info('Loading pickled normalization embeddings from `{}`.'.format(self.in_emb_dir)) for speaker in speakers:", "speakers_dict class Normalization(object): \"\"\" Speaker normalization S-Norm. \"\"\" embeddings =", "individual speakers, global mean over all speakers \"\"\" speakers_dict, fns", "logger.info('Processing file `{}`.'.format(file_name.split()[0])) # extract features from whole audio features", "`{}`.'.format(file_name.split()[0])) # extract features from whole audio features = features_extractor.audio2features(os.path.join(audio_dir,", "np.concatenate((speakers_dict[speaker], embeddings_long), axis=0) return speakers_dict class Normalization(object): \"\"\" Speaker normalization", "= logging.getLogger(__name__) def process_files(fns, speakers_dict, features_extractor, embedding_extractor, audio_dir, wav_suffix, in_rttm_dir,", "def process_files(fns, speakers_dict, features_extractor, embedding_extractor, audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length,", "enroll (np.array): enroll embedding Returns: float: hypothesis \"\"\" if self.plda:", "int(float(line.split()[3]) * 1000), int(float(line.split()[4]) * 1000) speaker = line.split()[7] if", "ret = pool.map(_process_files, ((part, kwargs) for part in Utils.partition(fns, n_jobs)))", "normalization embeddings from `{}`.'.format(self.in_emb_dir)) for speaker in speakers: embedding_path =", "enroll_mean, enroll_std = np.mean(b.T[jj]), np.std(b.T[jj]) s = c[ii][jj] test_scores.append((((s -", "min_length (float): Returns: dict: updated dictionary with speakers \"\"\" logger.info('Processing", "out_emb_dir=None, min_length=None, features_extractor=None, embedding_extractor=None, plda=None, wav_suffix='.wav', rttm_suffix='.rttm', n_jobs=1): \"\"\" Initialize", "rttm_suffix=rttm_suffix, min_length=min_length) if n_jobs == 1: ret = _process_files((fns, kwargs))", "self.in_emb_dir)) return np.array(embeddings) def s_norm(self, test, enroll): \"\"\" Run speaker", "Reserved import os import logging import pickle import multiprocessing import", "= embeddings_long else: speakers_dict[speaker] = np.concatenate((speakers_dict[speaker], embeddings_long), axis=0) return speakers_dict", "norm_list if in_rttm_dir: self.in_rttm_dir = os.path.abspath(in_rttm_dir) else: raise ValueError('It is", "1000) speaker = line.split()[7] if dur > min_length: end_time =", "= embedding_set.get_all_embeddings() if speaker not in speakers_dict.keys(): speakers_dict[speaker] = embeddings_long", "= {}, [] with open(self.norm_list) as f: for line in", "mkdir_p from vbdiar.utils.utils import Utils logger = logging.getLogger(__name__) def process_files(fns,", "= None def __init__(self, norm_list, audio_dir=None, in_rttm_dir=None, in_emb_dir=None, out_emb_dir=None, min_length=None,", "1: # number of speakers is defined line = line.split()[0]", "speakers: embedding_path = os.path.join(self.in_emb_dir, '{}.pkl'.format(speaker)) if os.path.isfile(embedding_path): logger.info('Loading normalization pickle", "rttm_suffix=self.rttm_suffix, min_length=self.min_length, n_jobs=self.n_jobs) assert len(speakers_dict) == len(fns) # all are", "across speakers features_extractor (Any): embedding_extractor (Any): audio_dir (string_types): wav_suffix (string_types):", "normalization S-Norm. \"\"\" embeddings = None in_emb_dir = None def", "n_jobs))) return ret def _process_files(dargs): \"\"\" Args: dargs: Returns: \"\"\"", "= min_length self.n_jobs = n_jobs if self.in_emb_dir is None: self.embeddings", "== 1: ret = _process_files((fns, kwargs)) else: pool = multiprocessing.Pool(n_jobs)", "over all speakers \"\"\" speakers_dict, fns = {}, [] with", "= dict(speakers_dict=speakers_dict, features_extractor=features_extractor, embedding_extractor=embedding_extractor, audio_dir=audio_dir, wav_suffix=wav_suffix, in_rttm_dir=in_rttm_dir, rttm_suffix=rttm_suffix, min_length=min_length) if", "Extract embeddings for all defined speakers. Args: file_name (string_types): path", "number of speakers is defined file_name = file_name.split()[0] else: file_name", "> min_length: end_time = start_time + dur start, end =", "int(float(line.split()[4]) * 1000) speaker = line.split()[7] if dur > min_length:", "for line in f: start_time, dur = int(float(line.split()[3]) * 1000),", "kwargs) for part in Utils.partition(fns, n_jobs))) return ret def _process_files(dargs):", "embeddings per speaker \"\"\" embeddings, speakers = [], set() with", "end: {end}.' if end >= features.shape[0]: end = features.shape[0] -", "pickle import multiprocessing import numpy as np from sklearn.metrics.pairwise import", "(Any): object for extracting embedding plda (PLDA|None): plda model object", "features_extractor self.embedding_extractor = embedding_extractor self.plda = plda self.wav_suffix = wav_suffix", "line.split()[0] else: line = line.replace(os.linesep, '') fns.append(line) speakers_dict = process_files(fns,", "= file_name.split()[0] else: file_name = file_name.replace(os.linesep, '') with open('{}{}'.format(os.path.join(self.in_rttm_dir, file_name),", "embeddings_long else: speakers_dict[speaker] = np.concatenate((speakers_dict[speaker], embeddings_long), axis=0) return speakers_dict class", "in_rttm_dir=None, in_emb_dir=None, out_emb_dir=None, min_length=None, features_extractor=None, embedding_extractor=None, plda=None, wav_suffix='.wav', rttm_suffix='.rttm', n_jobs=1):", "test_scores.append((((s - test_mean) / test_std + (s - enroll_mean) /", "embedding_extractor) embeddings_long = embedding_set.get_all_embeddings() if speaker not in speakers_dict.keys(): speakers_dict[speaker]", "if in_emb_dir: self.in_emb_dir = os.path.abspath(in_emb_dir) if out_emb_dir: self.out_emb_dir = os.path.abspath(out_emb_dir)", "os import logging import pickle import multiprocessing import numpy as", "self.embeddings = self.extract_embeddings() else: self.embeddings = self.load_embeddings() self.mean = np.mean(self.embeddings,", "= multiprocessing.Pool(n_jobs) ret = pool.map(_process_files, ((part, kwargs) for part in", "merged_speakers_dict: merged_speakers_dict[speaker] = np.mean(merged_speakers_dict[speaker], axis=0) return np.array(list(merged_speakers_dict.values())) def load_embeddings(self): \"\"\"", "a = cosine_similarity(test, self.embeddings).T b = cosine_similarity(enroll, self.embeddings).T c =", "f: start_time, dur = int(float(line.split()[3]) * 1000), int(float(line.split()[4]) * 1000)", "in merged_speakers_dict: merged_speakers_dict[speaker] = np.mean(merged_speakers_dict[speaker], axis=0) return np.array(list(merged_speakers_dict.values())) def load_embeddings(self):", "as np from sklearn.metrics.pairwise import cosine_similarity from vbdiar.features.segments import get_frames_from_time", "in_emb_dir: self.in_emb_dir = os.path.abspath(in_emb_dir) if out_emb_dir: self.out_emb_dir = os.path.abspath(out_emb_dir) self.min_length", "_process_files((fns, kwargs)) else: pool = multiprocessing.Pool(n_jobs) ret = pool.map(_process_files, ((part,", "cosine_similarity(test, self.embeddings).T b = cosine_similarity(enroll, self.embeddings).T c = cosine_similarity(enroll, test).T", "features_extractor=features_extractor, embedding_extractor=embedding_extractor, audio_dir=audio_dir, wav_suffix=wav_suffix, in_rttm_dir=in_rttm_dir, rttm_suffix=rttm_suffix, min_length=min_length) if n_jobs ==", "embedding_set = extract_embeddings(features_dict[speaker], embedding_extractor) embeddings_long = embedding_set.get_all_embeddings() if speaker not", "= embedding_extractor self.plda = plda self.wav_suffix = wav_suffix self.rttm_suffix =", "\"\"\" fns, kwargs = dargs ret = [] for fn", "end >= features.shape[0]: end = features.shape[0] - 1 features_dict[speaker][(start_time, end_time)]", "\"\"\" kwargs = dict(speakers_dict=speakers_dict, features_extractor=features_extractor, embedding_extractor=embedding_extractor, audio_dir=audio_dir, wav_suffix=wav_suffix, in_rttm_dir=in_rttm_dir, rttm_suffix=rttm_suffix,", "\"\"\" embeddings, speakers = [], set() with open(self.norm_list) as f:", "numpy as np from sklearn.metrics.pairwise import cosine_similarity from vbdiar.features.segments import", "to input audio file speakers_dict (dict): dictionary containing all embedding", "def _process_files(dargs): \"\"\" Args: dargs: Returns: \"\"\" fns, kwargs =", "Args: dargs: Returns: \"\"\" fns, kwargs = dargs ret =", "rttm_suffix if in_emb_dir: self.in_emb_dir = os.path.abspath(in_emb_dir) if out_emb_dir: self.out_emb_dir =", "= np.concatenate((speakers_dict[speaker], embeddings_long), axis=0) return speakers_dict class Normalization(object): \"\"\" Speaker", "<= start < end, \\ f'Incorrect timing for extracting features,", "def extract_embeddings(self): \"\"\" Extract normalization embeddings using averaging. Returns: Tuple[np.array,", "import logging import pickle import multiprocessing import numpy as np", "features from whole audio features = features_extractor.audio2features(os.path.join(audio_dir, '{}{}'.format(file_name, wav_suffix))) #", "rttm files \"\"\" if audio_dir: self.audio_dir = os.path.abspath(audio_dir) self.norm_list =", "for individual speakers, global mean over all speakers \"\"\" speakers_dict,", "return np.array(list(merged_speakers_dict.values())) def load_embeddings(self): \"\"\" Load normalization embeddings from pickle", "= None in_emb_dir = None def __init__(self, norm_list, audio_dir=None, in_rttm_dir=None,", "features = features_extractor.audio2features(os.path.join(audio_dir, '{}{}'.format(file_name, wav_suffix))) # process utterances of the", "wav_suffix='.wav', rttm_suffix='.rttm', n_jobs=1): \"\"\" Initialize normalization object. Args: norm_list (string_types):", "{} assert 0 <= start < end, \\ f'Incorrect timing", "Initialize normalization object. Args: norm_list (string_types): path to normalization list", "if len(file_name.split()) > 1: # number of speakers is defined", "enroll_std = np.mean(b.T[jj]), np.std(b.T[jj]) s = c[ii][jj] test_scores.append((((s - test_mean)", "test_mean, test_std = np.mean(a.T[ii]), np.std(a.T[ii]) enroll_mean, enroll_std = np.mean(b.T[jj]), np.std(b.T[jj])", "Speaker normalization S-Norm. \"\"\" embeddings = None in_emb_dir = None", "embedding plda (PLDA|None): plda model object wav_suffix (string_types): suffix of", "\\ f'Incorrect timing for extracting features, start: {start}, size: {features.shape[0]},", "np.std(b.T[jj]) s = c[ii][jj] test_scores.append((((s - test_mean) / test_std +", "Args: test (np.array): test embedding enroll (np.array): enroll embedding Returns:", "in f: if len(file_name.split()) > 1: # number of speakers", "'rb') as f: # append mean from speaker's embeddings speaker_embeddings", "as fp: for line in fp: speakers.add(line.split()[7]) logger.info('Loading pickled normalization", "path to directory with i-vectors out_emb_dir (str|None): path to directory", "features_extractor, embedding_extractor, audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length): \"\"\" Extract embeddings", "(S-Norm) on cached embeddings. Args: test (np.array): test embedding enroll", "audio_dir: wav_suffix: in_rttm_dir: rttm_suffix: min_length: n_jobs: Returns: \"\"\" kwargs =", "all are the same merged_speakers_dict = speakers_dict[0] if self.out_emb_dir: for", "self.norm_list = norm_list if in_rttm_dir: self.in_rttm_dir = os.path.abspath(in_rttm_dir) else: raise", "speakers_dict, fns = {}, [] with open(self.norm_list) as f: for", "fns.append(line) speakers_dict = process_files(fns, speakers_dict=speakers_dict, features_extractor=self.features_extractor, embedding_extractor=self.embedding_extractor, audio_dir=self.audio_dir, wav_suffix=self.wav_suffix, in_rttm_dir=self.in_rttm_dir,", "\"\"\" embeddings = None in_emb_dir = None def __init__(self, norm_list,", "= os.path.join(self.out_emb_dir, f'{speaker}.pkl') mkdir_p(os.path.dirname(out_path)) with open(out_path, 'wb') as f: pickle.dump(merged_speakers_dict[speaker],", "(string_types): min_length (float): Returns: dict: updated dictionary with speakers \"\"\"", "with open(out_path, 'wb') as f: pickle.dump(merged_speakers_dict[speaker], f, pickle.HIGHEST_PROTOCOL) for speaker", "in_rttm_dir=in_rttm_dir, rttm_suffix=rttm_suffix, min_length=min_length) if n_jobs == 1: ret = _process_files((fns,", "len(line.split()) > 1: # number of speakers is defined line", "containing all embedding across speakers features_extractor (Any): embedding_extractor (Any): audio_dir", "[] for fn in fns: ret.append(process_file(file_name=fn, **kwargs)) return ret def", "vbdiar.utils.utils import Utils logger = logging.getLogger(__name__) def process_files(fns, speakers_dict, features_extractor,", "np.array(list(merged_speakers_dict.values())) def load_embeddings(self): \"\"\" Load normalization embeddings from pickle files.", "(Any): embedding_extractor (Any): audio_dir (string_types): wav_suffix (string_types): in_rttm_dir (string_types): rttm_suffix", "fp: speakers.add(line.split()[7]) logger.info('Loading pickled normalization embeddings from `{}`.'.format(self.in_emb_dir)) for speaker", "jj in range(enroll.shape[0]): test_mean, test_std = np.mean(a.T[ii]), np.std(a.T[ii]) enroll_mean, enroll_std", "import pickle import multiprocessing import numpy as np from sklearn.metrics.pairwise", "test (np.array): test embedding enroll (np.array): enroll embedding Returns: float:", "embedding_extractor=self.embedding_extractor, audio_dir=self.audio_dir, wav_suffix=self.wav_suffix, in_rttm_dir=self.in_rttm_dir, rttm_suffix=self.rttm_suffix, min_length=self.min_length, n_jobs=self.n_jobs) assert len(speakers_dict) ==", "All Rights Reserved import os import logging import pickle import", "= [] for ii in range(test.shape[0]): test_scores = [] for", "return ret def _process_files(dargs): \"\"\" Args: dargs: Returns: \"\"\" fns,", "norm_list, audio_dir=None, in_rttm_dir=None, in_emb_dir=None, out_emb_dir=None, min_length=None, features_extractor=None, embedding_extractor=None, plda=None, wav_suffix='.wav',", "else: self.embeddings = self.load_embeddings() self.mean = np.mean(self.embeddings, axis=0) def __iter__(self):", "from vbdiar.embeddings.embedding import extract_embeddings from vbdiar.utils import mkdir_p from vbdiar.utils.utils", "extract_embeddings(self): \"\"\" Extract normalization embeddings using averaging. Returns: Tuple[np.array, np.array]:", "f: for line in f: if len(line.split()) > 1: #", "embeddings, speakers = [], set() with open(self.norm_list) as f: for", "in range(test.shape[0]): test_scores = [] for jj in range(enroll.shape[0]): test_mean,", "self.mean = np.mean(self.embeddings, axis=0) def __iter__(self): current = 0 while", "per speaker \"\"\" embeddings, speakers = [], set() with open(self.norm_list)", "get_frames_from_time(int(end_time)) if speaker not in features_dict: features_dict[speaker] = {} assert", "Copyright (C) 2018 Brno University of Technology FIT # Author:", "defined line = line.split()[0] else: line = line.replace(os.linesep, '') fns.append(line)", "file found for `{}` in `{}`.'.format(speaker, self.in_emb_dir)) return np.array(embeddings) def", "speakers_dict: features_extractor: embedding_extractor: audio_dir: wav_suffix: in_rttm_dir: rttm_suffix: min_length: n_jobs: Returns:", "rttm_suffix (string_types): suffix of rttm files \"\"\" if audio_dir: self.audio_dir", "def __iter__(self): current = 0 while current < len(self.embeddings): yield", "f: if len(file_name.split()) > 1: # number of speakers is", "start_time, dur = int(float(line.split()[3]) * 1000), int(float(line.split()[4]) * 1000) speaker", "embedding_extractor self.plda = plda self.wav_suffix = wav_suffix self.rttm_suffix = rttm_suffix", "'') with open('{}{}'.format(os.path.join(self.in_rttm_dir, file_name), self.rttm_suffix)) as fp: for line in", "logger.warning('No pickle file found for `{}` in `{}`.'.format(speaker, self.in_emb_dir)) return", "as f: for file_name in f: if len(file_name.split()) > 1:", "pickle files. Returns: np.array: embeddings per speaker \"\"\" embeddings, speakers", "rttm files in_emb_dir (str|None): path to directory with i-vectors out_emb_dir", "test_mean) / test_std + (s - enroll_mean) / enroll_std) /", "= self.plda.score(enroll, self.embeddings).T c = self.plda.score(enroll, test).T else: a =", "number of speakers is defined line = line.split()[0] else: line", "(Any): audio_dir (string_types): wav_suffix (string_types): in_rttm_dir (string_types): rttm_suffix (string_types): min_length", "of rttm files \"\"\" if audio_dir: self.audio_dir = os.path.abspath(audio_dir) self.norm_list", "open('{}{}'.format(os.path.join(self.in_rttm_dir, file_name), self.rttm_suffix)) as fp: for line in fp: speakers.add(line.split()[7])", "[] for jj in range(enroll.shape[0]): test_mean, test_std = np.mean(a.T[ii]), np.std(a.T[ii])", "Returns: float: hypothesis \"\"\" if self.plda: a = self.plda.score(test, self.embeddings).T", "scores = [] for ii in range(test.shape[0]): test_scores = []", "speaker in features_dict: embedding_set = extract_embeddings(features_dict[speaker], embedding_extractor) embeddings_long = embedding_set.get_all_embeddings()", "using averaging. Returns: Tuple[np.array, np.array]: vectors for individual speakers, global", "for speaker in merged_speakers_dict: merged_speakers_dict[speaker] = np.mean(merged_speakers_dict[speaker], axis=0) return np.array(list(merged_speakers_dict.values()))", "(str|None): path to directory with i-vectors out_emb_dir (str|None): path to", "Extract normalization embeddings using averaging. Returns: Tuple[np.array, np.array]: vectors for", "len(speakers_dict) == len(fns) # all are the same merged_speakers_dict =", "- test_mean) / test_std + (s - enroll_mean) / enroll_std)", "import mkdir_p from vbdiar.utils.utils import Utils logger = logging.getLogger(__name__) def", "path to directory for storing embeddings min_length (int): minimal length", "else: pool = multiprocessing.Pool(n_jobs) ret = pool.map(_process_files, ((part, kwargs) for", "= [], set() with open(self.norm_list) as f: for file_name in", "extracting embedding plda (PLDA|None): plda model object wav_suffix (string_types): suffix", "pool = multiprocessing.Pool(n_jobs) ret = pool.map(_process_files, ((part, kwargs) for part", "{} with open(f'{os.path.join(in_rttm_dir, file_name)}{rttm_suffix}') as f: for line in f:", "audio_dir: self.audio_dir = os.path.abspath(audio_dir) self.norm_list = norm_list if in_rttm_dir: self.in_rttm_dir", "float: hypothesis \"\"\" if self.plda: a = self.plda.score(test, self.embeddings).T b", "all defined speakers. Args: file_name (string_types): path to input audio", "Utils.partition(fns, n_jobs))) return ret def _process_files(dargs): \"\"\" Args: dargs: Returns:", "if audio_dir: self.audio_dir = os.path.abspath(audio_dir) self.norm_list = norm_list if in_rttm_dir:", "= os.path.abspath(out_emb_dir) self.min_length = min_length self.n_jobs = n_jobs if self.in_emb_dir", "embeddings speaker_embeddings = pickle.load(f) embeddings.append(np.mean(speaker_embeddings, axis=0)) else: logger.warning('No pickle file", "extracting embeddings features_extractor (Any): object for feature extraction embedding_extractor (Any):", "ret = [] for fn in fns: ret.append(process_file(file_name=fn, **kwargs)) return", "audio directory in_rttm_dir (string_types|None): path to directory with rttm files", "features_dict[speaker][(start_time, end_time)] = features[start:end] for speaker in features_dict: embedding_set =", "speakers = [], set() with open(self.norm_list) as f: for file_name", "embedding_path = os.path.join(self.in_emb_dir, '{}.pkl'.format(speaker)) if os.path.isfile(embedding_path): logger.info('Loading normalization pickle file", "as f: for line in f: start_time, dur = int(float(line.split()[3])", "+ dur start, end = get_frames_from_time(int(start_time)), get_frames_from_time(int(end_time)) if speaker not", "files \"\"\" if audio_dir: self.audio_dir = os.path.abspath(audio_dir) self.norm_list = norm_list", "\"\"\" Args: dargs: Returns: \"\"\" fns, kwargs = dargs ret", "speakers is defined line = line.split()[0] else: line = line.replace(os.linesep,", "= features_extractor self.embedding_extractor = embedding_extractor self.plda = plda self.wav_suffix =", "for `{}` in `{}`.'.format(speaker, self.in_emb_dir)) return np.array(embeddings) def s_norm(self, test,", "len(fns) # all are the same merged_speakers_dict = speakers_dict[0] if", "f'Incorrect timing for extracting features, start: {start}, size: {features.shape[0]}, end:", "features_dict[speaker] = {} assert 0 <= start < end, \\", "audio_dir (string_types|None): path to audio directory in_rttm_dir (string_types|None): path to", "to have input rttm files for normalization.') self.features_extractor = features_extractor", "def __setitem__(self, key, value): self.embeddings[key] = value def __len__(self): return", "self.plda.score(test, self.embeddings).T b = self.plda.score(enroll, self.embeddings).T c = self.plda.score(enroll, test).T", "{}, [] with open(self.norm_list) as f: for line in f:", "directory with rttm files in_emb_dir (str|None): path to directory with", "# number of speakers is defined line = line.split()[0] else:", "else: file_name = file_name.replace(os.linesep, '') with open('{}{}'.format(os.path.join(self.in_rttm_dir, file_name), self.rttm_suffix)) as", "open(self.norm_list) as f: for file_name in f: if len(file_name.split()) >", "open(out_path, 'wb') as f: pickle.dump(merged_speakers_dict[speaker], f, pickle.HIGHEST_PROTOCOL) for speaker in", "rttm_suffix, min_length): \"\"\" Extract embeddings for all defined speakers. Args:", "speakers features_dict = {} with open(f'{os.path.join(in_rttm_dir, file_name)}{rttm_suffix}') as f: for", "= start_time + dur start, end = get_frames_from_time(int(start_time)), get_frames_from_time(int(end_time)) if", "cosine_similarity(enroll, test).T scores = [] for ii in range(test.shape[0]): test_scores", "embeddings for all defined speakers. Args: file_name (string_types): path to", "\"\"\" Speaker normalization S-Norm. \"\"\" embeddings = None in_emb_dir =", "for jj in range(enroll.shape[0]): test_mean, test_std = np.mean(a.T[ii]), np.std(a.T[ii]) enroll_mean,", "ret def process_file(file_name, speakers_dict, features_extractor, embedding_extractor, audio_dir, wav_suffix, in_rttm_dir, rttm_suffix,", "# # Copyright (C) 2018 Brno University of Technology FIT", "file_name = file_name.replace(os.linesep, '') with open('{}{}'.format(os.path.join(self.in_rttm_dir, file_name), self.rttm_suffix)) as fp:", "b = cosine_similarity(enroll, self.embeddings).T c = cosine_similarity(enroll, test).T scores =", "line.split()[7] if dur > min_length: end_time = start_time + dur", "(Any): object for feature extraction embedding_extractor (Any): object for extracting", "multiprocessing.Pool(n_jobs) ret = pool.map(_process_files, ((part, kwargs) for part in Utils.partition(fns,", "min_length (int): minimal length for extracting embeddings features_extractor (Any): object", "averaging. Returns: Tuple[np.array, np.array]: vectors for individual speakers, global mean", "2018 Brno University of Technology FIT # Author: <NAME> <<EMAIL>>", "min_length: end_time = start_time + dur start, end = get_frames_from_time(int(start_time)),", "if os.path.isfile(embedding_path): logger.info('Loading normalization pickle file `{}`.'.format(speaker)) with open(embedding_path, 'rb')", "speakers_dict[speaker] = np.concatenate((speakers_dict[speaker], embeddings_long), axis=0) return speakers_dict class Normalization(object): \"\"\"", "in_rttm_dir (string_types): rttm_suffix (string_types): min_length (float): Returns: dict: updated dictionary", "(float): Returns: dict: updated dictionary with speakers \"\"\" logger.info('Processing file", "FIT # Author: <NAME> <<EMAIL>> # All Rights Reserved import", "kwargs = dict(speakers_dict=speakers_dict, features_extractor=features_extractor, embedding_extractor=embedding_extractor, audio_dir=audio_dir, wav_suffix=wav_suffix, in_rttm_dir=in_rttm_dir, rttm_suffix=rttm_suffix, min_length=min_length)", "speakers_dict.keys(): speakers_dict[speaker] = embeddings_long else: speakers_dict[speaker] = np.concatenate((speakers_dict[speaker], embeddings_long), axis=0)", "embedding_extractor=None, plda=None, wav_suffix='.wav', rttm_suffix='.rttm', n_jobs=1): \"\"\" Initialize normalization object. Args:", "defined speakers. Args: file_name (string_types): path to input audio file", "in merged_speakers_dict: out_path = os.path.join(self.out_emb_dir, f'{speaker}.pkl') mkdir_p(os.path.dirname(out_path)) with open(out_path, 'wb')", "pickle file `{}`.'.format(speaker)) with open(embedding_path, 'rb') as f: # append", "= np.mean(a.T[ii]), np.std(a.T[ii]) enroll_mean, enroll_std = np.mean(b.T[jj]), np.std(b.T[jj]) s =", "Returns: \"\"\" kwargs = dict(speakers_dict=speakers_dict, features_extractor=features_extractor, embedding_extractor=embedding_extractor, audio_dir=audio_dir, wav_suffix=wav_suffix, in_rttm_dir=in_rttm_dir,", "0 while current < len(self.embeddings): yield self.embeddings[current] current += 1", "Returns: np.array: embeddings per speaker \"\"\" embeddings, speakers = [],", "speakers is defined file_name = file_name.split()[0] else: file_name = file_name.replace(os.linesep,", "to audio directory in_rttm_dir (string_types|None): path to directory with rttm", "dur start, end = get_frames_from_time(int(start_time)), get_frames_from_time(int(end_time)) if speaker not in", "min_length=None, features_extractor=None, embedding_extractor=None, plda=None, wav_suffix='.wav', rttm_suffix='.rttm', n_jobs=1): \"\"\" Initialize normalization", "- 1 features_dict[speaker][(start_time, end_time)] = features[start:end] for speaker in features_dict:", "/ test_std + (s - enroll_mean) / enroll_std) / 2))", "audio_dir=audio_dir, wav_suffix=wav_suffix, in_rttm_dir=in_rttm_dir, rttm_suffix=rttm_suffix, min_length=min_length) if n_jobs == 1: ret", "for part in Utils.partition(fns, n_jobs))) return ret def _process_files(dargs): \"\"\"", "all speakers \"\"\" speakers_dict, fns = {}, [] with open(self.norm_list)", "in_rttm_dir, rttm_suffix, min_length): \"\"\" Extract embeddings for all defined speakers.", "b = self.plda.score(enroll, self.embeddings).T c = self.plda.score(enroll, test).T else: a", "Args: file_name (string_types): path to input audio file speakers_dict (dict):", "(string_types): suffix of wav files rttm_suffix (string_types): suffix of rttm", "wav_suffix=self.wav_suffix, in_rttm_dir=self.in_rttm_dir, rttm_suffix=self.rttm_suffix, min_length=self.min_length, n_jobs=self.n_jobs) assert len(speakers_dict) == len(fns) #", "suffix of wav files rttm_suffix (string_types): suffix of rttm files", "import Utils logger = logging.getLogger(__name__) def process_files(fns, speakers_dict, features_extractor, embedding_extractor,", "(string_types|None): path to directory with rttm files in_emb_dir (str|None): path", "mean over all speakers \"\"\" speakers_dict, fns = {}, []", "speakers_dict (dict): dictionary containing all embedding across speakers features_extractor (Any):", "speaker normalization (S-Norm) on cached embeddings. Args: test (np.array): test", "logger = logging.getLogger(__name__) def process_files(fns, speakers_dict, features_extractor, embedding_extractor, audio_dir, wav_suffix,", "cached embeddings. Args: test (np.array): test embedding enroll (np.array): enroll", "utf-8 -*- # # Copyright (C) 2018 Brno University of", "1: ret = _process_files((fns, kwargs)) else: pool = multiprocessing.Pool(n_jobs) ret", "Returns: Tuple[np.array, np.array]: vectors for individual speakers, global mean over", "input rttm files for normalization.') self.features_extractor = features_extractor self.embedding_extractor =", "global mean over all speakers \"\"\" speakers_dict, fns = {},", "n_jobs == 1: ret = _process_files((fns, kwargs)) else: pool =", "= speakers_dict[0] if self.out_emb_dir: for speaker in merged_speakers_dict: out_path =", "(np.array): enroll embedding Returns: float: hypothesis \"\"\" if self.plda: a", "timing for extracting features, start: {start}, size: {features.shape[0]}, end: {end}.'", "required to have input rttm files for normalization.') self.features_extractor =", "audio file speakers_dict (dict): dictionary containing all embedding across speakers", "cosine_similarity(enroll, self.embeddings).T c = cosine_similarity(enroll, test).T scores = [] for", "of speakers is defined file_name = file_name.split()[0] else: file_name =", "len(file_name.split()) > 1: # number of speakers is defined file_name", "self.n_jobs = n_jobs if self.in_emb_dir is None: self.embeddings = self.extract_embeddings()", "\"\"\" if audio_dir: self.audio_dir = os.path.abspath(audio_dir) self.norm_list = norm_list if", "speaker not in features_dict: features_dict[speaker] = {} assert 0 <=", "if n_jobs == 1: ret = _process_files((fns, kwargs)) else: pool", "path to audio directory in_rttm_dir (string_types|None): path to directory with", "rttm files for normalization.') self.features_extractor = features_extractor self.embedding_extractor = embedding_extractor", "in `{}`.'.format(speaker, self.in_emb_dir)) return np.array(embeddings) def s_norm(self, test, enroll): \"\"\"", "self.load_embeddings() self.mean = np.mean(self.embeddings, axis=0) def __iter__(self): current = 0", "python # -*- coding: utf-8 -*- # # Copyright (C)", "ret def _process_files(dargs): \"\"\" Args: dargs: Returns: \"\"\" fns, kwargs", "if self.in_emb_dir is None: self.embeddings = self.extract_embeddings() else: self.embeddings =", "extract_embeddings(features_dict[speaker], embedding_extractor) embeddings_long = embedding_set.get_all_embeddings() if speaker not in speakers_dict.keys():", "from vbdiar.features.segments import get_frames_from_time from vbdiar.embeddings.embedding import extract_embeddings from vbdiar.utils", "start: {start}, size: {features.shape[0]}, end: {end}.' if end >= features.shape[0]:", "pickle.dump(merged_speakers_dict[speaker], f, pickle.HIGHEST_PROTOCOL) for speaker in merged_speakers_dict: merged_speakers_dict[speaker] = np.mean(merged_speakers_dict[speaker],", "speakers \"\"\" logger.info('Processing file `{}`.'.format(file_name.split()[0])) # extract features from whole", "= {} with open(f'{os.path.join(in_rttm_dir, file_name)}{rttm_suffix}') as f: for line in", "extracting features, start: {start}, size: {features.shape[0]}, end: {end}.' if end", "\"\"\" logger.info('Processing file `{}`.'.format(file_name.split()[0])) # extract features from whole audio", "features_extractor: embedding_extractor: audio_dir: wav_suffix: in_rttm_dir: rttm_suffix: min_length: n_jobs: Returns: \"\"\"", "features_extractor=None, embedding_extractor=None, plda=None, wav_suffix='.wav', rttm_suffix='.rttm', n_jobs=1): \"\"\" Initialize normalization object.", "for extracting features, start: {start}, size: {features.shape[0]}, end: {end}.' if", "object for feature extraction embedding_extractor (Any): object for extracting embedding", "key, value): self.embeddings[key] = value def __len__(self): return len(self.embeddings) def", "dictionary containing all embedding across speakers features_extractor (Any): embedding_extractor (Any):", "end, \\ f'Incorrect timing for extracting features, start: {start}, size:", "return self.embeddings[key] def __setitem__(self, key, value): self.embeddings[key] = value def", "else: line = line.replace(os.linesep, '') fns.append(line) speakers_dict = process_files(fns, speakers_dict=speakers_dict,", "(int): minimal length for extracting embeddings features_extractor (Any): object for", "<<EMAIL>> # All Rights Reserved import os import logging import", "speakers features_extractor (Any): embedding_extractor (Any): audio_dir (string_types): wav_suffix (string_types): in_rttm_dir", "self.in_rttm_dir = os.path.abspath(in_rttm_dir) else: raise ValueError('It is required to have", "if in_rttm_dir: self.in_rttm_dir = os.path.abspath(in_rttm_dir) else: raise ValueError('It is required", "def __getitem__(self, key): return self.embeddings[key] def __setitem__(self, key, value): self.embeddings[key]", "np.mean(b.T[jj]), np.std(b.T[jj]) s = c[ii][jj] test_scores.append((((s - test_mean) / test_std", "embeddings_long = embedding_set.get_all_embeddings() if speaker not in speakers_dict.keys(): speakers_dict[speaker] =", "for extracting embeddings features_extractor (Any): object for feature extraction embedding_extractor", "embedding_extractor (Any): object for extracting embedding plda (PLDA|None): plda model", "normalization embeddings from pickle files. Returns: np.array: embeddings per speaker", "normalization pickle file `{}`.'.format(speaker)) with open(embedding_path, 'rb') as f: #", "dur > min_length: end_time = start_time + dur start, end", "`{}` in `{}`.'.format(speaker, self.in_emb_dir)) return np.array(embeddings) def s_norm(self, test, enroll):", "features_extractor (Any): embedding_extractor (Any): audio_dir (string_types): wav_suffix (string_types): in_rttm_dir (string_types):", "fns, kwargs = dargs ret = [] for fn in", "of the speakers features_dict = {} with open(f'{os.path.join(in_rttm_dir, file_name)}{rttm_suffix}') as", "== len(fns) # all are the same merged_speakers_dict = speakers_dict[0]", "normalization embeddings using averaging. Returns: Tuple[np.array, np.array]: vectors for individual", "os.path.abspath(audio_dir) self.norm_list = norm_list if in_rttm_dir: self.in_rttm_dir = os.path.abspath(in_rttm_dir) else:", "file speakers_dict (dict): dictionary containing all embedding across speakers features_extractor", "'wb') as f: pickle.dump(merged_speakers_dict[speaker], f, pickle.HIGHEST_PROTOCOL) for speaker in merged_speakers_dict:", "f: for line in f: start_time, dur = int(float(line.split()[3]) *", "speakers_dict[speaker] = embeddings_long else: speakers_dict[speaker] = np.concatenate((speakers_dict[speaker], embeddings_long), axis=0) return", "speakers \"\"\" speakers_dict, fns = {}, [] with open(self.norm_list) as", "self.plda.score(enroll, self.embeddings).T c = self.plda.score(enroll, test).T else: a = cosine_similarity(test,", "rttm_suffix (string_types): min_length (float): Returns: dict: updated dictionary with speakers", "f: if len(line.split()) > 1: # number of speakers is", "current = 0 while current < len(self.embeddings): yield self.embeddings[current] current", "file `{}`.'.format(file_name.split()[0])) # extract features from whole audio features =", "in range(enroll.shape[0]): test_mean, test_std = np.mean(a.T[ii]), np.std(a.T[ii]) enroll_mean, enroll_std =", "the speakers features_dict = {} with open(f'{os.path.join(in_rttm_dir, file_name)}{rttm_suffix}') as f:", "merged_speakers_dict[speaker] = np.mean(merged_speakers_dict[speaker], axis=0) return np.array(list(merged_speakers_dict.values())) def load_embeddings(self): \"\"\" Load", "__setitem__(self, key, value): self.embeddings[key] = value def __len__(self): return len(self.embeddings)", "else: logger.warning('No pickle file found for `{}` in `{}`.'.format(speaker, self.in_emb_dir))", "= norm_list if in_rttm_dir: self.in_rttm_dir = os.path.abspath(in_rttm_dir) else: raise ValueError('It", "S-Norm. \"\"\" embeddings = None in_emb_dir = None def __init__(self,", "-*- # # Copyright (C) 2018 Brno University of Technology", "{end}.' if end >= features.shape[0]: end = features.shape[0] - 1", "norm_list (string_types): path to normalization list audio_dir (string_types|None): path to", "with open(f'{os.path.join(in_rttm_dir, file_name)}{rttm_suffix}') as f: for line in f: start_time,", "minimal length for extracting embeddings features_extractor (Any): object for feature", "np.mean(a.T[ii]), np.std(a.T[ii]) enroll_mean, enroll_std = np.mean(b.T[jj]), np.std(b.T[jj]) s = c[ii][jj]", "return ret def process_file(file_name, speakers_dict, features_extractor, embedding_extractor, audio_dir, wav_suffix, in_rttm_dir,", "feature extraction embedding_extractor (Any): object for extracting embedding plda (PLDA|None):", "mkdir_p(os.path.dirname(out_path)) with open(out_path, 'wb') as f: pickle.dump(merged_speakers_dict[speaker], f, pickle.HIGHEST_PROTOCOL) for", "= features.shape[0] - 1 features_dict[speaker][(start_time, end_time)] = features[start:end] for speaker", "not in speakers_dict.keys(): speakers_dict[speaker] = embeddings_long else: speakers_dict[speaker] = np.concatenate((speakers_dict[speaker],", "(string_types): rttm_suffix (string_types): min_length (float): Returns: dict: updated dictionary with", "key): return self.embeddings[key] def __setitem__(self, key, value): self.embeddings[key] = value", "features, start: {start}, size: {features.shape[0]}, end: {end}.' if end >=", "\"\"\" Initialize normalization object. Args: norm_list (string_types): path to normalization", "file_name = file_name.split()[0] else: file_name = file_name.replace(os.linesep, '') with open('{}{}'.format(os.path.join(self.in_rttm_dir,", "vbdiar.features.segments import get_frames_from_time from vbdiar.embeddings.embedding import extract_embeddings from vbdiar.utils import", "np.mean(self.embeddings, axis=0) def __iter__(self): current = 0 while current <", "= file_name.replace(os.linesep, '') with open('{}{}'.format(os.path.join(self.in_rttm_dir, file_name), self.rttm_suffix)) as fp: for", "features_extractor.audio2features(os.path.join(audio_dir, '{}{}'.format(file_name, wav_suffix))) # process utterances of the speakers features_dict", "self.plda.score(enroll, test).T else: a = cosine_similarity(test, self.embeddings).T b = cosine_similarity(enroll,", "n_jobs: Returns: \"\"\" kwargs = dict(speakers_dict=speakers_dict, features_extractor=features_extractor, embedding_extractor=embedding_extractor, audio_dir=audio_dir, wav_suffix=wav_suffix,", "the same merged_speakers_dict = speakers_dict[0] if self.out_emb_dir: for speaker in", "logging import pickle import multiprocessing import numpy as np from", "Load normalization embeddings from pickle files. Returns: np.array: embeddings per", "extract features from whole audio features = features_extractor.audio2features(os.path.join(audio_dir, '{}{}'.format(file_name, wav_suffix)))", "from pickle files. Returns: np.array: embeddings per speaker \"\"\" embeddings,", "<NAME> <<EMAIL>> # All Rights Reserved import os import logging", "= plda self.wav_suffix = wav_suffix self.rttm_suffix = rttm_suffix if in_emb_dir:", "f, pickle.HIGHEST_PROTOCOL) for speaker in merged_speakers_dict: merged_speakers_dict[speaker] = np.mean(merged_speakers_dict[speaker], axis=0)", "speakers, global mean over all speakers \"\"\" speakers_dict, fns =", "University of Technology FIT # Author: <NAME> <<EMAIL>> # All", "features_dict: embedding_set = extract_embeddings(features_dict[speaker], embedding_extractor) embeddings_long = embedding_set.get_all_embeddings() if speaker", "import get_frames_from_time from vbdiar.embeddings.embedding import extract_embeddings from vbdiar.utils import mkdir_p", "self.wav_suffix = wav_suffix self.rttm_suffix = rttm_suffix if in_emb_dir: self.in_emb_dir =", "ret.append(process_file(file_name=fn, **kwargs)) return ret def process_file(file_name, speakers_dict, features_extractor, embedding_extractor, audio_dir,", "np.array: embeddings per speaker \"\"\" embeddings, speakers = [], set()", "# -*- coding: utf-8 -*- # # Copyright (C) 2018", "\"\"\" Load normalization embeddings from pickle files. Returns: np.array: embeddings", "file_name), self.rttm_suffix)) as fp: for line in fp: speakers.add(line.split()[7]) logger.info('Loading", "for normalization.') self.features_extractor = features_extractor self.embedding_extractor = embedding_extractor self.plda =", "def __len__(self): return len(self.embeddings) def extract_embeddings(self): \"\"\" Extract normalization embeddings", "ValueError('It is required to have input rttm files for normalization.')", "embedding_extractor (Any): audio_dir (string_types): wav_suffix (string_types): in_rttm_dir (string_types): rttm_suffix (string_types):", "audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length, n_jobs=1): \"\"\" Args: fns: speakers_dict:", "import extract_embeddings from vbdiar.utils import mkdir_p from vbdiar.utils.utils import Utils", "in Utils.partition(fns, n_jobs))) return ret def _process_files(dargs): \"\"\" Args: dargs:", "pickle.HIGHEST_PROTOCOL) for speaker in merged_speakers_dict: merged_speakers_dict[speaker] = np.mean(merged_speakers_dict[speaker], axis=0) return", "coding: utf-8 -*- # # Copyright (C) 2018 Brno University", "is required to have input rttm files for normalization.') self.features_extractor", "for line in f: if len(line.split()) > 1: # number", "= c[ii][jj] test_scores.append((((s - test_mean) / test_std + (s -", "= self.load_embeddings() self.mean = np.mean(self.embeddings, axis=0) def __iter__(self): current =", "line.replace(os.linesep, '') fns.append(line) speakers_dict = process_files(fns, speakers_dict=speakers_dict, features_extractor=self.features_extractor, embedding_extractor=self.embedding_extractor, audio_dir=self.audio_dir,", "object. Args: norm_list (string_types): path to normalization list audio_dir (string_types|None):", "else: speakers_dict[speaker] = np.concatenate((speakers_dict[speaker], embeddings_long), axis=0) return speakers_dict class Normalization(object):", "input audio file speakers_dict (dict): dictionary containing all embedding across", "value): self.embeddings[key] = value def __len__(self): return len(self.embeddings) def extract_embeddings(self):", "np.array]: vectors for individual speakers, global mean over all speakers", "fns = {}, [] with open(self.norm_list) as f: for line", "= np.mean(merged_speakers_dict[speaker], axis=0) return np.array(list(merged_speakers_dict.values())) def load_embeddings(self): \"\"\" Load normalization", "self.rttm_suffix)) as fp: for line in fp: speakers.add(line.split()[7]) logger.info('Loading pickled", "(C) 2018 Brno University of Technology FIT # Author: <NAME>", "dargs ret = [] for fn in fns: ret.append(process_file(file_name=fn, **kwargs))", "yield self.embeddings[current] current += 1 def __getitem__(self, key): return self.embeddings[key]", "embedding across speakers features_extractor (Any): embedding_extractor (Any): audio_dir (string_types): wav_suffix", "from whole audio features = features_extractor.audio2features(os.path.join(audio_dir, '{}{}'.format(file_name, wav_suffix))) # process", "to normalization list audio_dir (string_types|None): path to audio directory in_rttm_dir", "enroll embedding Returns: float: hypothesis \"\"\" if self.plda: a =", "(np.array): test embedding enroll (np.array): enroll embedding Returns: float: hypothesis", "embedding_extractor, audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length): \"\"\" Extract embeddings for", "# all are the same merged_speakers_dict = speakers_dict[0] if self.out_emb_dir:", "os.path.isfile(embedding_path): logger.info('Loading normalization pickle file `{}`.'.format(speaker)) with open(embedding_path, 'rb') as", "= self.plda.score(test, self.embeddings).T b = self.plda.score(enroll, self.embeddings).T c = self.plda.score(enroll,", "to directory for storing embeddings min_length (int): minimal length for", "speakers_dict, features_extractor, embedding_extractor, audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length, n_jobs=1): \"\"\"", "object for extracting embedding plda (PLDA|None): plda model object wav_suffix", "file_name.replace(os.linesep, '') with open('{}{}'.format(os.path.join(self.in_rttm_dir, file_name), self.rttm_suffix)) as fp: for line", "range(enroll.shape[0]): test_mean, test_std = np.mean(a.T[ii]), np.std(a.T[ii]) enroll_mean, enroll_std = np.mean(b.T[jj]),", "test_std = np.mean(a.T[ii]), np.std(a.T[ii]) enroll_mean, enroll_std = np.mean(b.T[jj]), np.std(b.T[jj]) s", "pickle file found for `{}` in `{}`.'.format(speaker, self.in_emb_dir)) return np.array(embeddings)", "'{}{}'.format(file_name, wav_suffix))) # process utterances of the speakers features_dict =", "= wav_suffix self.rttm_suffix = rttm_suffix if in_emb_dir: self.in_emb_dir = os.path.abspath(in_emb_dir)", "embeddings from `{}`.'.format(self.in_emb_dir)) for speaker in speakers: embedding_path = os.path.join(self.in_emb_dir,", "speakers.add(line.split()[7]) logger.info('Loading pickled normalization embeddings from `{}`.'.format(self.in_emb_dir)) for speaker in", "speaker_embeddings = pickle.load(f) embeddings.append(np.mean(speaker_embeddings, axis=0)) else: logger.warning('No pickle file found", "as f: pickle.dump(merged_speakers_dict[speaker], f, pickle.HIGHEST_PROTOCOL) for speaker in merged_speakers_dict: merged_speakers_dict[speaker]", "with speakers \"\"\" logger.info('Processing file `{}`.'.format(file_name.split()[0])) # extract features from", "of wav files rttm_suffix (string_types): suffix of rttm files \"\"\"", "in f: if len(line.split()) > 1: # number of speakers", "1: # number of speakers is defined file_name = file_name.split()[0]", "to directory with i-vectors out_emb_dir (str|None): path to directory for", "(string_types|None): path to audio directory in_rttm_dir (string_types|None): path to directory", "set() with open(self.norm_list) as f: for file_name in f: if", "embedding enroll (np.array): enroll embedding Returns: float: hypothesis \"\"\" if", "features_extractor, embedding_extractor, audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length, n_jobs=1): \"\"\" Args:", "(PLDA|None): plda model object wav_suffix (string_types): suffix of wav files", "in_rttm_dir (string_types|None): path to directory with rttm files in_emb_dir (str|None):", "to directory with rttm files in_emb_dir (str|None): path to directory", "min_length=self.min_length, n_jobs=self.n_jobs) assert len(speakers_dict) == len(fns) # all are the", "of Technology FIT # Author: <NAME> <<EMAIL>> # All Rights", "self.rttm_suffix = rttm_suffix if in_emb_dir: self.in_emb_dir = os.path.abspath(in_emb_dir) if out_emb_dir:", "for line in fp: speakers.add(line.split()[7]) logger.info('Loading pickled normalization embeddings from", "extraction embedding_extractor (Any): object for extracting embedding plda (PLDA|None): plda", "while current < len(self.embeddings): yield self.embeddings[current] current += 1 def", "\"\"\" if self.plda: a = self.plda.score(test, self.embeddings).T b = self.plda.score(enroll,", "start < end, \\ f'Incorrect timing for extracting features, start:", "# Copyright (C) 2018 Brno University of Technology FIT #", "embeddings features_extractor (Any): object for feature extraction embedding_extractor (Any): object", "fns: speakers_dict: features_extractor: embedding_extractor: audio_dir: wav_suffix: in_rttm_dir: rttm_suffix: min_length: n_jobs:", "files rttm_suffix (string_types): suffix of rttm files \"\"\" if audio_dir:", "[] with open(self.norm_list) as f: for line in f: if", "'{}.pkl'.format(speaker)) if os.path.isfile(embedding_path): logger.info('Loading normalization pickle file `{}`.'.format(speaker)) with open(embedding_path,", "-*- coding: utf-8 -*- # # Copyright (C) 2018 Brno", "= self.plda.score(enroll, test).T else: a = cosine_similarity(test, self.embeddings).T b =", "0 <= start < end, \\ f'Incorrect timing for extracting", "= line.replace(os.linesep, '') fns.append(line) speakers_dict = process_files(fns, speakers_dict=speakers_dict, features_extractor=self.features_extractor, embedding_extractor=self.embedding_extractor,", "min_length self.n_jobs = n_jobs if self.in_emb_dir is None: self.embeddings =", "c = self.plda.score(enroll, test).T else: a = cosine_similarity(test, self.embeddings).T b", "append mean from speaker's embeddings speaker_embeddings = pickle.load(f) embeddings.append(np.mean(speaker_embeddings, axis=0))", "wav_suffix (string_types): suffix of wav files rttm_suffix (string_types): suffix of", "test_std + (s - enroll_mean) / enroll_std) / 2)) scores.append(test_scores)", "__init__(self, norm_list, audio_dir=None, in_rttm_dir=None, in_emb_dir=None, out_emb_dir=None, min_length=None, features_extractor=None, embedding_extractor=None, plda=None,", "wav files rttm_suffix (string_types): suffix of rttm files \"\"\" if", "= n_jobs if self.in_emb_dir is None: self.embeddings = self.extract_embeddings() else:", "from sklearn.metrics.pairwise import cosine_similarity from vbdiar.features.segments import get_frames_from_time from vbdiar.embeddings.embedding", "from speaker's embeddings speaker_embeddings = pickle.load(f) embeddings.append(np.mean(speaker_embeddings, axis=0)) else: logger.warning('No", "__getitem__(self, key): return self.embeddings[key] def __setitem__(self, key, value): self.embeddings[key] =", "= self.extract_embeddings() else: self.embeddings = self.load_embeddings() self.mean = np.mean(self.embeddings, axis=0)", "is defined file_name = file_name.split()[0] else: file_name = file_name.replace(os.linesep, '')", "for speaker in speakers: embedding_path = os.path.join(self.in_emb_dir, '{}.pkl'.format(speaker)) if os.path.isfile(embedding_path):", "speaker in speakers: embedding_path = os.path.join(self.in_emb_dir, '{}.pkl'.format(speaker)) if os.path.isfile(embedding_path): logger.info('Loading", "= cosine_similarity(enroll, self.embeddings).T c = cosine_similarity(enroll, test).T scores = []", "else: a = cosine_similarity(test, self.embeddings).T b = cosine_similarity(enroll, self.embeddings).T c", "in_emb_dir (str|None): path to directory with i-vectors out_emb_dir (str|None): path", "open(embedding_path, 'rb') as f: # append mean from speaker's embeddings", "plda (PLDA|None): plda model object wav_suffix (string_types): suffix of wav", "speaker in merged_speakers_dict: out_path = os.path.join(self.out_emb_dir, f'{speaker}.pkl') mkdir_p(os.path.dirname(out_path)) with open(out_path,", "wav_suffix (string_types): in_rttm_dir (string_types): rttm_suffix (string_types): min_length (float): Returns: dict:", "speakers_dict, features_extractor, embedding_extractor, audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length): \"\"\" Extract", "for fn in fns: ret.append(process_file(file_name=fn, **kwargs)) return ret def process_file(file_name,", "self.min_length = min_length self.n_jobs = n_jobs if self.in_emb_dir is None:", "open(f'{os.path.join(in_rttm_dir, file_name)}{rttm_suffix}') as f: for line in f: start_time, dur", "os.path.abspath(in_emb_dir) if out_emb_dir: self.out_emb_dir = os.path.abspath(out_emb_dir) self.min_length = min_length self.n_jobs", "found for `{}` in `{}`.'.format(speaker, self.in_emb_dir)) return np.array(embeddings) def s_norm(self,", "embedding_extractor=embedding_extractor, audio_dir=audio_dir, wav_suffix=wav_suffix, in_rttm_dir=in_rttm_dir, rttm_suffix=rttm_suffix, min_length=min_length) if n_jobs == 1:", "self.plda: a = self.plda.score(test, self.embeddings).T b = self.plda.score(enroll, self.embeddings).T c", "file `{}`.'.format(speaker)) with open(embedding_path, 'rb') as f: # append mean", "1000), int(float(line.split()[4]) * 1000) speaker = line.split()[7] if dur >", "fp: for line in fp: speakers.add(line.split()[7]) logger.info('Loading pickled normalization embeddings", "`{}`.'.format(speaker)) with open(embedding_path, 'rb') as f: # append mean from", "rttm_suffix='.rttm', n_jobs=1): \"\"\" Initialize normalization object. Args: norm_list (string_types): path", "Args: norm_list (string_types): path to normalization list audio_dir (string_types|None): path", "dict: updated dictionary with speakers \"\"\" logger.info('Processing file `{}`.'.format(file_name.split()[0])) #", "ret = _process_files((fns, kwargs)) else: pool = multiprocessing.Pool(n_jobs) ret =", "self.out_emb_dir = os.path.abspath(out_emb_dir) self.min_length = min_length self.n_jobs = n_jobs if", "self.plda = plda self.wav_suffix = wav_suffix self.rttm_suffix = rttm_suffix if", "embeddings from pickle files. Returns: np.array: embeddings per speaker \"\"\"", "model object wav_suffix (string_types): suffix of wav files rttm_suffix (string_types):", "in_rttm_dir: rttm_suffix: min_length: n_jobs: Returns: \"\"\" kwargs = dict(speakers_dict=speakers_dict, features_extractor=features_extractor,", "def __init__(self, norm_list, audio_dir=None, in_rttm_dir=None, in_emb_dir=None, out_emb_dir=None, min_length=None, features_extractor=None, embedding_extractor=None,", "test embedding enroll (np.array): enroll embedding Returns: float: hypothesis \"\"\"", "process_file(file_name, speakers_dict, features_extractor, embedding_extractor, audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length): \"\"\"", "speaker = line.split()[7] if dur > min_length: end_time = start_time", "+ (s - enroll_mean) / enroll_std) / 2)) scores.append(test_scores) return", "from vbdiar.utils.utils import Utils logger = logging.getLogger(__name__) def process_files(fns, speakers_dict,", "start_time + dur start, end = get_frames_from_time(int(start_time)), get_frames_from_time(int(end_time)) if speaker", "n_jobs=1): \"\"\" Initialize normalization object. Args: norm_list (string_types): path to", "enroll): \"\"\" Run speaker normalization (S-Norm) on cached embeddings. Args:", "None in_emb_dir = None def __init__(self, norm_list, audio_dir=None, in_rttm_dir=None, in_emb_dir=None,", "merged_speakers_dict = speakers_dict[0] if self.out_emb_dir: for speaker in merged_speakers_dict: out_path", "= pool.map(_process_files, ((part, kwargs) for part in Utils.partition(fns, n_jobs))) return", "for storing embeddings min_length (int): minimal length for extracting embeddings", "whole audio features = features_extractor.audio2features(os.path.join(audio_dir, '{}{}'.format(file_name, wav_suffix))) # process utterances", "for extracting embedding plda (PLDA|None): plda model object wav_suffix (string_types):", "Normalization(object): \"\"\" Speaker normalization S-Norm. \"\"\" embeddings = None in_emb_dir", "from `{}`.'.format(self.in_emb_dir)) for speaker in speakers: embedding_path = os.path.join(self.in_emb_dir, '{}.pkl'.format(speaker))", "if len(line.split()) > 1: # number of speakers is defined", "normalization object. Args: norm_list (string_types): path to normalization list audio_dir", "**kwargs)) return ret def process_file(file_name, speakers_dict, features_extractor, embedding_extractor, audio_dir, wav_suffix,", "line in fp: speakers.add(line.split()[7]) logger.info('Loading pickled normalization embeddings from `{}`.'.format(self.in_emb_dir))", "(string_types): path to normalization list audio_dir (string_types|None): path to audio", "same merged_speakers_dict = speakers_dict[0] if self.out_emb_dir: for speaker in merged_speakers_dict:", "process_files(fns, speakers_dict, features_extractor, embedding_extractor, audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length, n_jobs=1):", "vbdiar.utils import mkdir_p from vbdiar.utils.utils import Utils logger = logging.getLogger(__name__)", "features_extractor=self.features_extractor, embedding_extractor=self.embedding_extractor, audio_dir=self.audio_dir, wav_suffix=self.wav_suffix, in_rttm_dir=self.in_rttm_dir, rttm_suffix=self.rttm_suffix, min_length=self.min_length, n_jobs=self.n_jobs) assert len(speakers_dict)", "axis=0) return speakers_dict class Normalization(object): \"\"\" Speaker normalization S-Norm. \"\"\"", "n_jobs=1): \"\"\" Args: fns: speakers_dict: features_extractor: embedding_extractor: audio_dir: wav_suffix: in_rttm_dir:", "[] for ii in range(test.shape[0]): test_scores = [] for jj", "dictionary with speakers \"\"\" logger.info('Processing file `{}`.'.format(file_name.split()[0])) # extract features", "min_length): \"\"\" Extract embeddings for all defined speakers. Args: file_name", "defined file_name = file_name.split()[0] else: file_name = file_name.replace(os.linesep, '') with", "< len(self.embeddings): yield self.embeddings[current] current += 1 def __getitem__(self, key):", "end = features.shape[0] - 1 features_dict[speaker][(start_time, end_time)] = features[start:end] for", "all embedding across speakers features_extractor (Any): embedding_extractor (Any): audio_dir (string_types):", "= value def __len__(self): return len(self.embeddings) def extract_embeddings(self): \"\"\" Extract", "# All Rights Reserved import os import logging import pickle", "line = line.replace(os.linesep, '') fns.append(line) speakers_dict = process_files(fns, speakers_dict=speakers_dict, features_extractor=self.features_extractor,", "assert len(speakers_dict) == len(fns) # all are the same merged_speakers_dict", "Run speaker normalization (S-Norm) on cached embeddings. Args: test (np.array):", "audio_dir=self.audio_dir, wav_suffix=self.wav_suffix, in_rttm_dir=self.in_rttm_dir, rttm_suffix=self.rttm_suffix, min_length=self.min_length, n_jobs=self.n_jobs) assert len(speakers_dict) == len(fns)", "kwargs)) else: pool = multiprocessing.Pool(n_jobs) ret = pool.map(_process_files, ((part, kwargs)", "self.embeddings).T b = cosine_similarity(enroll, self.embeddings).T c = cosine_similarity(enroll, test).T scores", "len(self.embeddings): yield self.embeddings[current] current += 1 def __getitem__(self, key): return", "with rttm files in_emb_dir (str|None): path to directory with i-vectors", "self.embeddings).T c = self.plda.score(enroll, test).T else: a = cosine_similarity(test, self.embeddings).T", "test).T else: a = cosine_similarity(test, self.embeddings).T b = cosine_similarity(enroll, self.embeddings).T", "if speaker not in speakers_dict.keys(): speakers_dict[speaker] = embeddings_long else: speakers_dict[speaker]", "if speaker not in features_dict: features_dict[speaker] = {} assert 0", "= _process_files((fns, kwargs)) else: pool = multiprocessing.Pool(n_jobs) ret = pool.map(_process_files,", "return speakers_dict class Normalization(object): \"\"\" Speaker normalization S-Norm. \"\"\" embeddings", "audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length): \"\"\" Extract embeddings for all", "file_name (string_types): path to input audio file speakers_dict (dict): dictionary", "else: raise ValueError('It is required to have input rttm files", "_process_files(dargs): \"\"\" Args: dargs: Returns: \"\"\" fns, kwargs = dargs", "process utterances of the speakers features_dict = {} with open(f'{os.path.join(in_rttm_dir,", "plda model object wav_suffix (string_types): suffix of wav files rttm_suffix", "= os.path.abspath(in_emb_dir) if out_emb_dir: self.out_emb_dir = os.path.abspath(out_emb_dir) self.min_length = min_length", "pickle.load(f) embeddings.append(np.mean(speaker_embeddings, axis=0)) else: logger.warning('No pickle file found for `{}`", "self.embeddings[current] current += 1 def __getitem__(self, key): return self.embeddings[key] def", "dur = int(float(line.split()[3]) * 1000), int(float(line.split()[4]) * 1000) speaker =", "< end, \\ f'Incorrect timing for extracting features, start: {start},", "{features.shape[0]}, end: {end}.' if end >= features.shape[0]: end = features.shape[0]", "self.audio_dir = os.path.abspath(audio_dir) self.norm_list = norm_list if in_rttm_dir: self.in_rttm_dir =", "self.extract_embeddings() else: self.embeddings = self.load_embeddings() self.mean = np.mean(self.embeddings, axis=0) def", "n_jobs=self.n_jobs) assert len(speakers_dict) == len(fns) # all are the same", "embedding_extractor: audio_dir: wav_suffix: in_rttm_dir: rttm_suffix: min_length: n_jobs: Returns: \"\"\" kwargs", "= {} assert 0 <= start < end, \\ f'Incorrect", "#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright", "process_files(fns, speakers_dict=speakers_dict, features_extractor=self.features_extractor, embedding_extractor=self.embedding_extractor, audio_dir=self.audio_dir, wav_suffix=self.wav_suffix, in_rttm_dir=self.in_rttm_dir, rttm_suffix=self.rttm_suffix, min_length=self.min_length, n_jobs=self.n_jobs)", "axis=0) return np.array(list(merged_speakers_dict.values())) def load_embeddings(self): \"\"\" Load normalization embeddings from", "= os.path.abspath(in_rttm_dir) else: raise ValueError('It is required to have input", "utterances of the speakers features_dict = {} with open(f'{os.path.join(in_rttm_dir, file_name)}{rttm_suffix}')", "> 1: # number of speakers is defined line =", "rttm_suffix: min_length: n_jobs: Returns: \"\"\" kwargs = dict(speakers_dict=speakers_dict, features_extractor=features_extractor, embedding_extractor=embedding_extractor,", "= line.split()[7] if dur > min_length: end_time = start_time +", "end = get_frames_from_time(int(start_time)), get_frames_from_time(int(end_time)) if speaker not in features_dict: features_dict[speaker]", "self.in_emb_dir = os.path.abspath(in_emb_dir) if out_emb_dir: self.out_emb_dir = os.path.abspath(out_emb_dir) self.min_length =", "def process_file(file_name, speakers_dict, features_extractor, embedding_extractor, audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length):", "plda=None, wav_suffix='.wav', rttm_suffix='.rttm', n_jobs=1): \"\"\" Initialize normalization object. Args: norm_list", "for feature extraction embedding_extractor (Any): object for extracting embedding plda", "Utils logger = logging.getLogger(__name__) def process_files(fns, speakers_dict, features_extractor, embedding_extractor, audio_dir,", "* 1000) speaker = line.split()[7] if dur > min_length: end_time", "directory for storing embeddings min_length (int): minimal length for extracting", "if self.out_emb_dir: for speaker in merged_speakers_dict: out_path = os.path.join(self.out_emb_dir, f'{speaker}.pkl')", "in_rttm_dir=self.in_rttm_dir, rttm_suffix=self.rttm_suffix, min_length=self.min_length, n_jobs=self.n_jobs) assert len(speakers_dict) == len(fns) # all", "def load_embeddings(self): \"\"\" Load normalization embeddings from pickle files. Returns:", "current < len(self.embeddings): yield self.embeddings[current] current += 1 def __getitem__(self,", "\"\"\" Args: fns: speakers_dict: features_extractor: embedding_extractor: audio_dir: wav_suffix: in_rttm_dir: rttm_suffix:", "in f: start_time, dur = int(float(line.split()[3]) * 1000), int(float(line.split()[4]) *", "files. Returns: np.array: embeddings per speaker \"\"\" embeddings, speakers =", "speaker \"\"\" embeddings, speakers = [], set() with open(self.norm_list) as", "end_time = start_time + dur start, end = get_frames_from_time(int(start_time)), get_frames_from_time(int(end_time))", "s_norm(self, test, enroll): \"\"\" Run speaker normalization (S-Norm) on cached", "self.features_extractor = features_extractor self.embedding_extractor = embedding_extractor self.plda = plda self.wav_suffix", "np.std(a.T[ii]) enroll_mean, enroll_std = np.mean(b.T[jj]), np.std(b.T[jj]) s = c[ii][jj] test_scores.append((((s", "Author: <NAME> <<EMAIL>> # All Rights Reserved import os import", "None def __init__(self, norm_list, audio_dir=None, in_rttm_dir=None, in_emb_dir=None, out_emb_dir=None, min_length=None, features_extractor=None,", "as f: for line in f: if len(line.split()) > 1:", "Returns: \"\"\" fns, kwargs = dargs ret = [] for", "with open(embedding_path, 'rb') as f: # append mean from speaker's", "out_emb_dir (str|None): path to directory for storing embeddings min_length (int):", "speaker's embeddings speaker_embeddings = pickle.load(f) embeddings.append(np.mean(speaker_embeddings, axis=0)) else: logger.warning('No pickle", "for speaker in merged_speakers_dict: out_path = os.path.join(self.out_emb_dir, f'{speaker}.pkl') mkdir_p(os.path.dirname(out_path)) with", "def s_norm(self, test, enroll): \"\"\" Run speaker normalization (S-Norm) on", "have input rttm files for normalization.') self.features_extractor = features_extractor self.embedding_extractor", "features[start:end] for speaker in features_dict: embedding_set = extract_embeddings(features_dict[speaker], embedding_extractor) embeddings_long", "None: self.embeddings = self.extract_embeddings() else: self.embeddings = self.load_embeddings() self.mean =", "if self.plda: a = self.plda.score(test, self.embeddings).T b = self.plda.score(enroll, self.embeddings).T", "get_frames_from_time(int(start_time)), get_frames_from_time(int(end_time)) if speaker not in features_dict: features_dict[speaker] = {}", "end_time)] = features[start:end] for speaker in features_dict: embedding_set = extract_embeddings(features_dict[speaker],", "cosine_similarity from vbdiar.features.segments import get_frames_from_time from vbdiar.embeddings.embedding import extract_embeddings from", "self.out_emb_dir: for speaker in merged_speakers_dict: out_path = os.path.join(self.out_emb_dir, f'{speaker}.pkl') mkdir_p(os.path.dirname(out_path))", "files in_emb_dir (str|None): path to directory with i-vectors out_emb_dir (str|None):", "c[ii][jj] test_scores.append((((s - test_mean) / test_std + (s - enroll_mean)", "os.path.abspath(out_emb_dir) self.min_length = min_length self.n_jobs = n_jobs if self.in_emb_dir is", "Technology FIT # Author: <NAME> <<EMAIL>> # All Rights Reserved", "`{}`.'.format(self.in_emb_dir)) for speaker in speakers: embedding_path = os.path.join(self.in_emb_dir, '{}.pkl'.format(speaker)) if", "1 features_dict[speaker][(start_time, end_time)] = features[start:end] for speaker in features_dict: embedding_set", "in speakers: embedding_path = os.path.join(self.in_emb_dir, '{}.pkl'.format(speaker)) if os.path.isfile(embedding_path): logger.info('Loading normalization", "in_rttm_dir, rttm_suffix, min_length, n_jobs=1): \"\"\" Args: fns: speakers_dict: features_extractor: embedding_extractor:", "import cosine_similarity from vbdiar.features.segments import get_frames_from_time from vbdiar.embeddings.embedding import extract_embeddings", "* 1000), int(float(line.split()[4]) * 1000) speaker = line.split()[7] if dur", "normalization (S-Norm) on cached embeddings. Args: test (np.array): test embedding", "updated dictionary with speakers \"\"\" logger.info('Processing file `{}`.'.format(file_name.split()[0])) # extract", "speakers_dict = process_files(fns, speakers_dict=speakers_dict, features_extractor=self.features_extractor, embedding_extractor=self.embedding_extractor, audio_dir=self.audio_dir, wav_suffix=self.wav_suffix, in_rttm_dir=self.in_rttm_dir, rttm_suffix=self.rttm_suffix,", "= pickle.load(f) embeddings.append(np.mean(speaker_embeddings, axis=0)) else: logger.warning('No pickle file found for", "test, enroll): \"\"\" Run speaker normalization (S-Norm) on cached embeddings.", "len(self.embeddings) def extract_embeddings(self): \"\"\" Extract normalization embeddings using averaging. Returns:", "line = line.split()[0] else: line = line.replace(os.linesep, '') fns.append(line) speakers_dict", "= process_files(fns, speakers_dict=speakers_dict, features_extractor=self.features_extractor, embedding_extractor=self.embedding_extractor, audio_dir=self.audio_dir, wav_suffix=self.wav_suffix, in_rttm_dir=self.in_rttm_dir, rttm_suffix=self.rttm_suffix, min_length=self.min_length,", "speakers_dict=speakers_dict, features_extractor=self.features_extractor, embedding_extractor=self.embedding_extractor, audio_dir=self.audio_dir, wav_suffix=self.wav_suffix, in_rttm_dir=self.in_rttm_dir, rttm_suffix=self.rttm_suffix, min_length=self.min_length, n_jobs=self.n_jobs) assert", "# number of speakers is defined file_name = file_name.split()[0] else:", "dict(speakers_dict=speakers_dict, features_extractor=features_extractor, embedding_extractor=embedding_extractor, audio_dir=audio_dir, wav_suffix=wav_suffix, in_rttm_dir=in_rttm_dir, rttm_suffix=rttm_suffix, min_length=min_length) if n_jobs", "embeddings_long), axis=0) return speakers_dict class Normalization(object): \"\"\" Speaker normalization S-Norm.", "self.in_emb_dir is None: self.embeddings = self.extract_embeddings() else: self.embeddings = self.load_embeddings()", "audio_dir=None, in_rttm_dir=None, in_emb_dir=None, out_emb_dir=None, min_length=None, features_extractor=None, embedding_extractor=None, plda=None, wav_suffix='.wav', rttm_suffix='.rttm',", "= line.split()[0] else: line = line.replace(os.linesep, '') fns.append(line) speakers_dict =", "{start}, size: {features.shape[0]}, end: {end}.' if end >= features.shape[0]: end", "storing embeddings min_length (int): minimal length for extracting embeddings features_extractor", "(s - enroll_mean) / enroll_std) / 2)) scores.append(test_scores) return np.array(scores)", "with i-vectors out_emb_dir (str|None): path to directory for storing embeddings", "range(test.shape[0]): test_scores = [] for jj in range(enroll.shape[0]): test_mean, test_std", "as f: # append mean from speaker's embeddings speaker_embeddings =", "Rights Reserved import os import logging import pickle import multiprocessing", "extract_embeddings from vbdiar.utils import mkdir_p from vbdiar.utils.utils import Utils logger", "rttm_suffix, min_length, n_jobs=1): \"\"\" Args: fns: speakers_dict: features_extractor: embedding_extractor: audio_dir:", "wav_suffix: in_rttm_dir: rttm_suffix: min_length: n_jobs: Returns: \"\"\" kwargs = dict(speakers_dict=speakers_dict,", "raise ValueError('It is required to have input rttm files for", "load_embeddings(self): \"\"\" Load normalization embeddings from pickle files. Returns: np.array:", "in features_dict: embedding_set = extract_embeddings(features_dict[speaker], embedding_extractor) embeddings_long = embedding_set.get_all_embeddings() if", "with open('{}{}'.format(os.path.join(self.in_rttm_dir, file_name), self.rttm_suffix)) as fp: for line in fp:", "self.embeddings).T b = self.plda.score(enroll, self.embeddings).T c = self.plda.score(enroll, test).T else:", "out_path = os.path.join(self.out_emb_dir, f'{speaker}.pkl') mkdir_p(os.path.dirname(out_path)) with open(out_path, 'wb') as f:", "Args: fns: speakers_dict: features_extractor: embedding_extractor: audio_dir: wav_suffix: in_rttm_dir: rttm_suffix: min_length:", "features_dict = {} with open(f'{os.path.join(in_rttm_dir, file_name)}{rttm_suffix}') as f: for line", "for all defined speakers. Args: file_name (string_types): path to input", "min_length: n_jobs: Returns: \"\"\" kwargs = dict(speakers_dict=speakers_dict, features_extractor=features_extractor, embedding_extractor=embedding_extractor, audio_dir=audio_dir,", "embeddings. Args: test (np.array): test embedding enroll (np.array): enroll embedding", "self.embedding_extractor = embedding_extractor self.plda = plda self.wav_suffix = wav_suffix self.rttm_suffix", "file_name)}{rttm_suffix}') as f: for line in f: start_time, dur =", "open(self.norm_list) as f: for line in f: if len(line.split()) >", "import numpy as np from sklearn.metrics.pairwise import cosine_similarity from vbdiar.features.segments", "= extract_embeddings(features_dict[speaker], embedding_extractor) embeddings_long = embedding_set.get_all_embeddings() if speaker not in", "return len(self.embeddings) def extract_embeddings(self): \"\"\" Extract normalization embeddings using averaging.", "i-vectors out_emb_dir (str|None): path to directory for storing embeddings min_length", "wav_suffix=wav_suffix, in_rttm_dir=in_rttm_dir, rttm_suffix=rttm_suffix, min_length=min_length) if n_jobs == 1: ret =", "embedding_set.get_all_embeddings() if speaker not in speakers_dict.keys(): speakers_dict[speaker] = embeddings_long else:", "# Author: <NAME> <<EMAIL>> # All Rights Reserved import os", "axis=0) def __iter__(self): current = 0 while current < len(self.embeddings):", "embeddings using averaging. Returns: Tuple[np.array, np.array]: vectors for individual speakers,", "f: pickle.dump(merged_speakers_dict[speaker], f, pickle.HIGHEST_PROTOCOL) for speaker in merged_speakers_dict: merged_speakers_dict[speaker] =", "object wav_suffix (string_types): suffix of wav files rttm_suffix (string_types): suffix" ]
[ "lattice_dict['B'], label='Debris') ax = plt.gca() ax.set_title('Cell Populations over time (n", "= lattice_dict['E'][0] + lattice_dict['D_a'][0] + lattice_dict['D_b'][0] + lattice_dict['B'][0] n =", "plot_dir): # total spaces on grid implies grid size total_cells", "lattice_dict['D_a'], label='Donors (Type A)') plt.plot(lattice_dict['time'], lattice_dict['D_b'], label='Donors (Type B)') plt.plot(lattice_dict['time'],", "over time (n = %d)' % n) ax.set_ylabel('Number of cells')", "time (n = %d)' % n) ax.set_ylabel('Number of cells') ax.set_xlabel('Time", "A)') plt.plot(lattice_dict['time'], lattice_dict['D_b'], label='Donors (Type B)') plt.plot(lattice_dict['time'], lattice_dict['B'], label='Debris') ax", "+ lattice_dict['D_a'][0] + lattice_dict['D_b'][0] + lattice_dict['B'][0] n = int(total_cells**0.5) plt.figure(1)", "ax = plt.gca() ax.set_title('Cell Populations over time (n = %d)'", "grid implies grid size total_cells = lattice_dict['E'][0] + lattice_dict['D_a'][0] +", "plt import os def data_plotter(lattice_dict, datafile_dir, plot_dir): # total spaces", "spaces on grid implies grid size total_cells = lattice_dict['E'][0] +", "lattice_dict['E'], label='Empty lattice points') plt.plot(lattice_dict['time'], lattice_dict['D_a'], label='Donors (Type A)') plt.plot(lattice_dict['time'],", "total_cells = lattice_dict['E'][0] + lattice_dict['D_a'][0] + lattice_dict['D_b'][0] + lattice_dict['B'][0] n", "lattice_dict['D_b'][0] + lattice_dict['B'][0] n = int(total_cells**0.5) plt.figure(1) plt.plot(lattice_dict['time'], lattice_dict['E'], label='Empty", "data_plotter(lattice_dict, datafile_dir, plot_dir): # total spaces on grid implies grid", "grid size total_cells = lattice_dict['E'][0] + lattice_dict['D_a'][0] + lattice_dict['D_b'][0] +", "lattice_dict['D_b'], label='Donors (Type B)') plt.plot(lattice_dict['time'], lattice_dict['B'], label='Debris') ax = plt.gca()", "n = int(total_cells**0.5) plt.figure(1) plt.plot(lattice_dict['time'], lattice_dict['E'], label='Empty lattice points') plt.plot(lattice_dict['time'],", "8.0) # alternative: 20.0, 8.0 f.tight_layout() plt.savefig(os.path.join(plot_dir, 'population_vs_time.png')) plt.clf() return", "lattice_dict['E'][0] + lattice_dict['D_a'][0] + lattice_dict['D_b'][0] + lattice_dict['B'][0] n = int(total_cells**0.5)", "= plt.gca() ax.set_title('Cell Populations over time (n = %d)' %", "import matplotlib.pyplot as plt import os def data_plotter(lattice_dict, datafile_dir, plot_dir):", "plt.plot(lattice_dict['time'], lattice_dict['B'], label='Debris') ax = plt.gca() ax.set_title('Cell Populations over time", "matplotlib.pyplot as plt import os def data_plotter(lattice_dict, datafile_dir, plot_dir): #", "datafile_dir, plot_dir): # total spaces on grid implies grid size", "# total spaces on grid implies grid size total_cells =", "lattice points') plt.plot(lattice_dict['time'], lattice_dict['D_a'], label='Donors (Type A)') plt.plot(lattice_dict['time'], lattice_dict['D_b'], label='Donors", "label='Donors (Type B)') plt.plot(lattice_dict['time'], lattice_dict['B'], label='Debris') ax = plt.gca() ax.set_title('Cell", "<filename>agent_based_models/abm_allelopathy/plot_data.py import matplotlib.pyplot as plt import os def data_plotter(lattice_dict, datafile_dir,", "lattice_dict['B'][0] n = int(total_cells**0.5) plt.figure(1) plt.plot(lattice_dict['time'], lattice_dict['E'], label='Empty lattice points')", "% n) ax.set_ylabel('Number of cells') ax.set_xlabel('Time (h)') plt.legend() f =", "label='Debris') ax = plt.gca() ax.set_title('Cell Populations over time (n =", "plt.figure(1) plt.plot(lattice_dict['time'], lattice_dict['E'], label='Empty lattice points') plt.plot(lattice_dict['time'], lattice_dict['D_a'], label='Donors (Type", "B)') plt.plot(lattice_dict['time'], lattice_dict['B'], label='Debris') ax = plt.gca() ax.set_title('Cell Populations over", "as plt import os def data_plotter(lattice_dict, datafile_dir, plot_dir): # total", "(n = %d)' % n) ax.set_ylabel('Number of cells') ax.set_xlabel('Time (h)')", "of cells') ax.set_xlabel('Time (h)') plt.legend() f = plt.gcf() f.set_size_inches(20.0, 8.0)", "= plt.gcf() f.set_size_inches(20.0, 8.0) # alternative: 20.0, 8.0 f.tight_layout() plt.savefig(os.path.join(plot_dir,", "plt.gcf() f.set_size_inches(20.0, 8.0) # alternative: 20.0, 8.0 f.tight_layout() plt.savefig(os.path.join(plot_dir, 'population_vs_time.png'))", "label='Donors (Type A)') plt.plot(lattice_dict['time'], lattice_dict['D_b'], label='Donors (Type B)') plt.plot(lattice_dict['time'], lattice_dict['B'],", "lattice_dict['D_a'][0] + lattice_dict['D_b'][0] + lattice_dict['B'][0] n = int(total_cells**0.5) plt.figure(1) plt.plot(lattice_dict['time'],", "points') plt.plot(lattice_dict['time'], lattice_dict['D_a'], label='Donors (Type A)') plt.plot(lattice_dict['time'], lattice_dict['D_b'], label='Donors (Type", "plt.plot(lattice_dict['time'], lattice_dict['D_a'], label='Donors (Type A)') plt.plot(lattice_dict['time'], lattice_dict['D_b'], label='Donors (Type B)')", "label='Empty lattice points') plt.plot(lattice_dict['time'], lattice_dict['D_a'], label='Donors (Type A)') plt.plot(lattice_dict['time'], lattice_dict['D_b'],", "ax.set_ylabel('Number of cells') ax.set_xlabel('Time (h)') plt.legend() f = plt.gcf() f.set_size_inches(20.0,", "size total_cells = lattice_dict['E'][0] + lattice_dict['D_a'][0] + lattice_dict['D_b'][0] + lattice_dict['B'][0]", "n) ax.set_ylabel('Number of cells') ax.set_xlabel('Time (h)') plt.legend() f = plt.gcf()", "plt.plot(lattice_dict['time'], lattice_dict['E'], label='Empty lattice points') plt.plot(lattice_dict['time'], lattice_dict['D_a'], label='Donors (Type A)')", "Populations over time (n = %d)' % n) ax.set_ylabel('Number of", "f.set_size_inches(20.0, 8.0) # alternative: 20.0, 8.0 f.tight_layout() plt.savefig(os.path.join(plot_dir, 'population_vs_time.png')) plt.clf()", "total spaces on grid implies grid size total_cells = lattice_dict['E'][0]", "plt.gca() ax.set_title('Cell Populations over time (n = %d)' % n)", "+ lattice_dict['B'][0] n = int(total_cells**0.5) plt.figure(1) plt.plot(lattice_dict['time'], lattice_dict['E'], label='Empty lattice", "= int(total_cells**0.5) plt.figure(1) plt.plot(lattice_dict['time'], lattice_dict['E'], label='Empty lattice points') plt.plot(lattice_dict['time'], lattice_dict['D_a'],", "%d)' % n) ax.set_ylabel('Number of cells') ax.set_xlabel('Time (h)') plt.legend() f", "ax.set_xlabel('Time (h)') plt.legend() f = plt.gcf() f.set_size_inches(20.0, 8.0) # alternative:", "= %d)' % n) ax.set_ylabel('Number of cells') ax.set_xlabel('Time (h)') plt.legend()", "cells') ax.set_xlabel('Time (h)') plt.legend() f = plt.gcf() f.set_size_inches(20.0, 8.0) #", "import os def data_plotter(lattice_dict, datafile_dir, plot_dir): # total spaces on", "(Type B)') plt.plot(lattice_dict['time'], lattice_dict['B'], label='Debris') ax = plt.gca() ax.set_title('Cell Populations", "os def data_plotter(lattice_dict, datafile_dir, plot_dir): # total spaces on grid", "on grid implies grid size total_cells = lattice_dict['E'][0] + lattice_dict['D_a'][0]", "(Type A)') plt.plot(lattice_dict['time'], lattice_dict['D_b'], label='Donors (Type B)') plt.plot(lattice_dict['time'], lattice_dict['B'], label='Debris')", "f = plt.gcf() f.set_size_inches(20.0, 8.0) # alternative: 20.0, 8.0 f.tight_layout()", "plt.legend() f = plt.gcf() f.set_size_inches(20.0, 8.0) # alternative: 20.0, 8.0", "int(total_cells**0.5) plt.figure(1) plt.plot(lattice_dict['time'], lattice_dict['E'], label='Empty lattice points') plt.plot(lattice_dict['time'], lattice_dict['D_a'], label='Donors", "(h)') plt.legend() f = plt.gcf() f.set_size_inches(20.0, 8.0) # alternative: 20.0,", "implies grid size total_cells = lattice_dict['E'][0] + lattice_dict['D_a'][0] + lattice_dict['D_b'][0]", "ax.set_title('Cell Populations over time (n = %d)' % n) ax.set_ylabel('Number", "def data_plotter(lattice_dict, datafile_dir, plot_dir): # total spaces on grid implies", "+ lattice_dict['D_b'][0] + lattice_dict['B'][0] n = int(total_cells**0.5) plt.figure(1) plt.plot(lattice_dict['time'], lattice_dict['E'],", "plt.plot(lattice_dict['time'], lattice_dict['D_b'], label='Donors (Type B)') plt.plot(lattice_dict['time'], lattice_dict['B'], label='Debris') ax =" ]
[ "{'key': 'supportedProviders', 'type': '[VirtualWanSecurityProvider]'}, } def __init__(self, **kwargs): super(VirtualWanSecurityProviders, self).__init__(**kwargs)", "# regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class VirtualWanSecurityProviders(Model):", "the MIT License. See License.txt in the project root for", ":type supported_providers: list[~azure.mgmt.network.v2018_10_01.models.VirtualWanSecurityProvider] \"\"\" _attribute_map = { 'supported_providers': {'key': 'supportedProviders',", "} def __init__(self, **kwargs): super(VirtualWanSecurityProviders, self).__init__(**kwargs) self.supported_providers = kwargs.get('supported_providers', None)", "the project root for # license information. # # Code", "Generator. # Changes may cause incorrect behavior and will be", "-------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. #", "license information. # # Code generated by Microsoft (R) AutoRest", "by Microsoft (R) AutoRest Code Generator. # Changes may cause", "Changes may cause incorrect behavior and will be lost if", "{ 'supported_providers': {'key': 'supportedProviders', 'type': '[VirtualWanSecurityProvider]'}, } def __init__(self, **kwargs):", "behavior and will be lost if the code is #", "'supportedProviders', 'type': '[VirtualWanSecurityProvider]'}, } def __init__(self, **kwargs): super(VirtualWanSecurityProviders, self).__init__(**kwargs) self.supported_providers", "incorrect behavior and will be lost if the code is", "MIT License. See License.txt in the project root for #", "AutoRest Code Generator. # Changes may cause incorrect behavior and", "\"\"\" _attribute_map = { 'supported_providers': {'key': 'supportedProviders', 'type': '[VirtualWanSecurityProvider]'}, }", "= { 'supported_providers': {'key': 'supportedProviders', 'type': '[VirtualWanSecurityProvider]'}, } def __init__(self,", ":param supported_providers: :type supported_providers: list[~azure.mgmt.network.v2018_10_01.models.VirtualWanSecurityProvider] \"\"\" _attribute_map = { 'supported_providers':", "may cause incorrect behavior and will be lost if the", "the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import", "\"\"\"Collection of SecurityProviders. :param supported_providers: :type supported_providers: list[~azure.mgmt.network.v2018_10_01.models.VirtualWanSecurityProvider] \"\"\" _attribute_map", "project root for # license information. # # Code generated", "See License.txt in the project root for # license information.", "# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed", "'supported_providers': {'key': 'supportedProviders', 'type': '[VirtualWanSecurityProvider]'}, } def __init__(self, **kwargs): super(VirtualWanSecurityProviders,", "generated by Microsoft (R) AutoRest Code Generator. # Changes may", "in the project root for # license information. # #", "reserved. # Licensed under the MIT License. See License.txt in", "'type': '[VirtualWanSecurityProvider]'}, } def __init__(self, **kwargs): super(VirtualWanSecurityProviders, self).__init__(**kwargs) self.supported_providers =", "of SecurityProviders. :param supported_providers: :type supported_providers: list[~azure.mgmt.network.v2018_10_01.models.VirtualWanSecurityProvider] \"\"\" _attribute_map =", "# # Code generated by Microsoft (R) AutoRest Code Generator.", "from msrest.serialization import Model class VirtualWanSecurityProviders(Model): \"\"\"Collection of SecurityProviders. :param", "Corporation. All rights reserved. # Licensed under the MIT License.", "# Licensed under the MIT License. See License.txt in the", "-------------------------------------------------------------------------- from msrest.serialization import Model class VirtualWanSecurityProviders(Model): \"\"\"Collection of SecurityProviders.", "SecurityProviders. :param supported_providers: :type supported_providers: list[~azure.mgmt.network.v2018_10_01.models.VirtualWanSecurityProvider] \"\"\" _attribute_map = {", "supported_providers: :type supported_providers: list[~azure.mgmt.network.v2018_10_01.models.VirtualWanSecurityProvider] \"\"\" _attribute_map = { 'supported_providers': {'key':", "msrest.serialization import Model class VirtualWanSecurityProviders(Model): \"\"\"Collection of SecurityProviders. :param supported_providers:", "# Changes may cause incorrect behavior and will be lost", "'[VirtualWanSecurityProvider]'}, } def __init__(self, **kwargs): super(VirtualWanSecurityProviders, self).__init__(**kwargs) self.supported_providers = kwargs.get('supported_providers',", "# -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved.", "Code generated by Microsoft (R) AutoRest Code Generator. # Changes", "information. # # Code generated by Microsoft (R) AutoRest Code", "coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights", "regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class VirtualWanSecurityProviders(Model): \"\"\"Collection", "License. See License.txt in the project root for # license", "will be lost if the code is # regenerated. #", "lost if the code is # regenerated. # -------------------------------------------------------------------------- from", "list[~azure.mgmt.network.v2018_10_01.models.VirtualWanSecurityProvider] \"\"\" _attribute_map = { 'supported_providers': {'key': 'supportedProviders', 'type': '[VirtualWanSecurityProvider]'},", "_attribute_map = { 'supported_providers': {'key': 'supportedProviders', 'type': '[VirtualWanSecurityProvider]'}, } def", "and will be lost if the code is # regenerated.", "is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class", "code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model", "import Model class VirtualWanSecurityProviders(Model): \"\"\"Collection of SecurityProviders. :param supported_providers: :type", "under the MIT License. See License.txt in the project root", "cause incorrect behavior and will be lost if the code", "(c) Microsoft Corporation. All rights reserved. # Licensed under the", "All rights reserved. # Licensed under the MIT License. See", "# -------------------------------------------------------------------------- from msrest.serialization import Model class VirtualWanSecurityProviders(Model): \"\"\"Collection of", "supported_providers: list[~azure.mgmt.network.v2018_10_01.models.VirtualWanSecurityProvider] \"\"\" _attribute_map = { 'supported_providers': {'key': 'supportedProviders', 'type':", "Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect", "Model class VirtualWanSecurityProviders(Model): \"\"\"Collection of SecurityProviders. :param supported_providers: :type supported_providers:", "root for # license information. # # Code generated by", "Microsoft Corporation. All rights reserved. # Licensed under the MIT", "Licensed under the MIT License. See License.txt in the project", "# Code generated by Microsoft (R) AutoRest Code Generator. #", "rights reserved. # Licensed under the MIT License. See License.txt", "# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All", "class VirtualWanSecurityProviders(Model): \"\"\"Collection of SecurityProviders. :param supported_providers: :type supported_providers: list[~azure.mgmt.network.v2018_10_01.models.VirtualWanSecurityProvider]", "License.txt in the project root for # license information. #", "# license information. # # Code generated by Microsoft (R)", "VirtualWanSecurityProviders(Model): \"\"\"Collection of SecurityProviders. :param supported_providers: :type supported_providers: list[~azure.mgmt.network.v2018_10_01.models.VirtualWanSecurityProvider] \"\"\"", "Code Generator. # Changes may cause incorrect behavior and will", "be lost if the code is # regenerated. # --------------------------------------------------------------------------", "for # license information. # # Code generated by Microsoft", "if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization", "(R) AutoRest Code Generator. # Changes may cause incorrect behavior", "Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under" ]
[ ".exceptions import ObjectIsNotADate def format_date(value, format=\"%d %M %Y\"): regex =", "re from .exceptions import ObjectIsNotADate def format_date(value, format=\"%d %M %Y\"):", "def format_date(value, format=\"%d %M %Y\"): regex = re.match(r\"(?P<year>\\d{4})-(?P<month>\\d{2})-(?P<day>\\d{2})\", value) if", "format_date(value, format=\"%d %M %Y\"): regex = re.match(r\"(?P<year>\\d{4})-(?P<month>\\d{2})-(?P<day>\\d{2})\", value) if regex", "import datetime import re from .exceptions import ObjectIsNotADate def format_date(value,", "regex is not None: date = datetime.date( int(regex.group(\"year\")), int(regex.group(\"month\")), int(regex.group(\"day\")))", "from .exceptions import ObjectIsNotADate def format_date(value, format=\"%d %M %Y\"): regex", "%Y\"): regex = re.match(r\"(?P<year>\\d{4})-(?P<month>\\d{2})-(?P<day>\\d{2})\", value) if regex is not None:", "regex = re.match(r\"(?P<year>\\d{4})-(?P<month>\\d{2})-(?P<day>\\d{2})\", value) if regex is not None: date", "date = datetime.date( int(regex.group(\"year\")), int(regex.group(\"month\")), int(regex.group(\"day\"))) else: raise ObjectIsNotADate return", "import re from .exceptions import ObjectIsNotADate def format_date(value, format=\"%d %M", "ObjectIsNotADate def format_date(value, format=\"%d %M %Y\"): regex = re.match(r\"(?P<year>\\d{4})-(?P<month>\\d{2})-(?P<day>\\d{2})\", value)", "%M %Y\"): regex = re.match(r\"(?P<year>\\d{4})-(?P<month>\\d{2})-(?P<day>\\d{2})\", value) if regex is not", "None: date = datetime.date( int(regex.group(\"year\")), int(regex.group(\"month\")), int(regex.group(\"day\"))) else: raise ObjectIsNotADate", "= re.match(r\"(?P<year>\\d{4})-(?P<month>\\d{2})-(?P<day>\\d{2})\", value) if regex is not None: date =", "if regex is not None: date = datetime.date( int(regex.group(\"year\")), int(regex.group(\"month\")),", "import ObjectIsNotADate def format_date(value, format=\"%d %M %Y\"): regex = re.match(r\"(?P<year>\\d{4})-(?P<month>\\d{2})-(?P<day>\\d{2})\",", "value) if regex is not None: date = datetime.date( int(regex.group(\"year\")),", "not None: date = datetime.date( int(regex.group(\"year\")), int(regex.group(\"month\")), int(regex.group(\"day\"))) else: raise", "format=\"%d %M %Y\"): regex = re.match(r\"(?P<year>\\d{4})-(?P<month>\\d{2})-(?P<day>\\d{2})\", value) if regex is", "= datetime.date( int(regex.group(\"year\")), int(regex.group(\"month\")), int(regex.group(\"day\"))) else: raise ObjectIsNotADate return date.strftime(format)", "is not None: date = datetime.date( int(regex.group(\"year\")), int(regex.group(\"month\")), int(regex.group(\"day\"))) else:", "re.match(r\"(?P<year>\\d{4})-(?P<month>\\d{2})-(?P<day>\\d{2})\", value) if regex is not None: date = datetime.date(", "datetime import re from .exceptions import ObjectIsNotADate def format_date(value, format=\"%d" ]
[ "data :return: dict of (images, labels) :rtype: dict \"\"\" images", "of (images, labels) :rtype: dict \"\"\" images = DataLoader.test.images labels", "None or DataLoader.partial_dataset is not None: train_validation_split_point = int(DataLoader.train['images'].shape[0] *", "indices = np.random.permutation(data.shape[0]) training_idx= indices[:cut_point] data = data[training_idx, :] images", "data = np.loadtxt(path) if DataLoader.mode is None: data = data[0:1000,", ":] elif DataLoader.partial_dataset is not None and DataLoader.partial_dataset > 0", "\"\"\" get training data :return: dict of (images, labels) :rtype:", "'images': DataLoader.train['images'][0:train_validation_split_point, :, :, :], 'labels': DataLoader.train['labels'][0:train_validation_split_point] } splited_validation =", "data[:, 0:-1] labels = data[:, -1] images = np.reshape(images, [images.shape[0],", "sklearn.model_selection import train_test_split DATASET_ROOT_FOLDER = os.path.abspath('datasets') class DataLoader: train =", ":rtype: dict \"\"\" images = DataLoader.train.images labels = DataLoader.train.labels return", "or DataLoader.partial_dataset is not None: train_validation_split_point = int(DataLoader.train['images'].shape[0] * 0.8)", "class DataLoader: train = None validation = None test =", "(images, labels) :rtype: dict \"\"\" images = DataLoader.train.images labels =", "not None and train_validation_split_point > 0: if DataLoader.mode is None", "dict of (images, labels) :rtype: dict \"\"\" images = DataLoader.train.images", "= None @staticmethod def load(train_path=None, validation_path=None, test_path=None, height=28, length=28, train_validation_split_point=10000):", "@staticmethod def load_image_data_with_label_at_end(path, height, length): data = np.loadtxt(path) if DataLoader.mode", "splited_train = { 'images': DataLoader.train['images'][0:train_validation_split_point, :, :, :], 'labels': DataLoader.train['labels'][0:train_validation_split_point]", "-1] images = np.reshape(images, [images.shape[0], height, length, 1], order='F') return", "None: data = data[0:1000, :] elif DataLoader.partial_dataset is not None", "'images': images, 'labels': labels } @staticmethod def get_test_data(): \"\"\" get", "is None: data = data[0:1000, :] elif DataLoader.partial_dataset is not", "not None: DataLoader.test = DataLoader.load_image_data_with_label_at_end(os.path.join(DATASET_ROOT_FOLDER, test_path), height=height, length=length) logging.debug('Training data", "{ 'images': DataLoader.train['images'][0:train_validation_split_point, :, :, :], 'labels': DataLoader.train['labels'][0:train_validation_split_point] } splited_validation", "DataLoader.test.images labels = DataLoader.test.labels return { 'images': images, 'labels': labels", ":rtype: dict \"\"\" images = DataLoader.validation.images labels = DataLoader.validation.labels return", "= DataLoader.validation.images labels = DataLoader.validation.labels return { 'images': images, 'labels':", "np.random.permutation(data.shape[0]) training_idx= indices[:cut_point] data = data[training_idx, :] images = data[:,", "import train_test_split DATASET_ROOT_FOLDER = os.path.abspath('datasets') class DataLoader: train = None", "length=length) elif train_validation_split_point is not None and train_validation_split_point > 0:", "train_validation_split_point = int(DataLoader.train['images'].shape[0] * 0.8) splited_train = { 'images': DataLoader.train['images'][0:train_validation_split_point,", "'images': images, 'labels': labels } @staticmethod def get_validation_data(): \"\"\" get", "'labels': labels } @staticmethod def get_validation_data(): \"\"\" get validation data", "test_path), height=height, length=length) logging.debug('Training data shape:{}'.format(str(DataLoader.train['images'].shape))) logging.debug('Validation data shape:{}'.format(str(DataLoader.validation['images'].shape))) logging.debug('Test", "dataset cut_point = int(data.shape[0] * DataLoader.partial_dataset) indices = np.random.permutation(data.shape[0]) training_idx=", ":] images = data[:, 0:-1] labels = data[:, -1] images", "= data[:, 0:-1] labels = data[:, -1] images = np.reshape(images,", "os.path.abspath('datasets') class DataLoader: train = None validation = None test", "DataLoader.partial_dataset) indices = np.random.permutation(data.shape[0]) training_idx= indices[:cut_point] data = data[training_idx, :]", "height=28, length=28, train_validation_split_point=10000): if train_path is not None: DataLoader.train =", "labels) :rtype: dict \"\"\" images = DataLoader.train.images labels = DataLoader.train.labels", "= None partial_dataset = None @staticmethod def load(train_path=None, validation_path=None, test_path=None,", "data[:, -1] images = np.reshape(images, [images.shape[0], height, length, 1], order='F')", "* DataLoader.partial_dataset) indices = np.random.permutation(data.shape[0]) training_idx= indices[:cut_point] data = data[training_idx,", "if test_path is not None: DataLoader.test = DataLoader.load_image_data_with_label_at_end(os.path.join(DATASET_ROOT_FOLDER, test_path), height=height,", "@staticmethod def get_training_data(): \"\"\" get training data :return: dict of", "None @staticmethod def load(train_path=None, validation_path=None, test_path=None, height=28, length=28, train_validation_split_point=10000): if", "height, length): data = np.loadtxt(path) if DataLoader.mode is None: data", "validation_path), height=height, length=length) elif train_validation_split_point is not None and train_validation_split_point", "'images': DataLoader.train['images'][train_validation_split_point:, :, :, :], 'labels': DataLoader.train['labels'][train_validation_split_point:] } DataLoader.train =", "images = DataLoader.validation.images labels = DataLoader.validation.labels return { 'images': images,", "images = np.reshape(images, [images.shape[0], height, length, 1], order='F') return {", ":, :], 'labels': DataLoader.train['labels'][0:train_validation_split_point] } splited_validation = { 'images': DataLoader.train['images'][train_validation_split_point:,", "data[training_idx, :] images = data[:, 0:-1] labels = data[:, -1]", "None: train_validation_split_point = int(DataLoader.train['images'].shape[0] * 0.8) splited_train = { 'images':", "load_image_data_with_label_at_end(path, height, length): data = np.loadtxt(path) if DataLoader.mode is None:", "= DataLoader.load_image_data_with_label_at_end(os.path.join(DATASET_ROOT_FOLDER, test_path), height=height, length=length) logging.debug('Training data shape:{}'.format(str(DataLoader.train['images'].shape))) logging.debug('Validation data", "None and train_validation_split_point > 0: if DataLoader.mode is None or", "splited_validation = { 'images': DataLoader.train['images'][train_validation_split_point:, :, :, :], 'labels': DataLoader.train['labels'][train_validation_split_point:]", "labels = data[:, -1] images = np.reshape(images, [images.shape[0], height, length,", "\"\"\" images = DataLoader.train.images labels = DataLoader.train.labels return { 'images':", "0.8) splited_train = { 'images': DataLoader.train['images'][0:train_validation_split_point, :, :, :], 'labels':", "} splited_validation = { 'images': DataLoader.train['images'][train_validation_split_point:, :, :, :], 'labels':", "= DataLoader.load_image_data_with_label_at_end( os.path.join(DATASET_ROOT_FOLDER, train_path), height=height, length=length) if validation_path is not", "data[0:1000, :] elif DataLoader.partial_dataset is not None and DataLoader.partial_dataset >", "'images': images, 'labels': labels } @staticmethod def load_image_data_with_label_at_end(path, height, length):", "DataLoader.validation = splited_validation if test_path is not None: DataLoader.test =", "labels } @staticmethod def load_image_data_with_label_at_end(path, height, length): data = np.loadtxt(path)", "0: if DataLoader.mode is None or DataLoader.partial_dataset is not None:", "= DataLoader.test.images labels = DataLoader.test.labels return { 'images': images, 'labels':", "os import logging from sklearn.model_selection import train_test_split DATASET_ROOT_FOLDER = os.path.abspath('datasets')", "import numpy as np import os import logging from sklearn.model_selection", "height=height, length=length) elif train_validation_split_point is not None and train_validation_split_point >", "train_path), height=height, length=length) if validation_path is not None: DataLoader.validation =", "int(DataLoader.train['images'].shape[0] * 0.8) splited_train = { 'images': DataLoader.train['images'][0:train_validation_split_point, :, :,", "DataLoader.validation = DataLoader.load_image_data_with_label_at_end( os.path.join(DATASET_ROOT_FOLDER, validation_path), height=height, length=length) elif train_validation_split_point is", "images = DataLoader.train.images labels = DataLoader.train.labels return { 'images': images,", "np.loadtxt(path) if DataLoader.mode is None: data = data[0:1000, :] elif", "int(data.shape[0] * DataLoader.partial_dataset) indices = np.random.permutation(data.shape[0]) training_idx= indices[:cut_point] data =", "None and DataLoader.partial_dataset > 0 and DataLoader.partial_dataset <1: # randomly", "= data[training_idx, :] images = data[:, 0:-1] labels = data[:,", "= None test = None mode = None partial_dataset =", "os.path.join(DATASET_ROOT_FOLDER, validation_path), height=height, length=length) elif train_validation_split_point is not None and", "is None or DataLoader.partial_dataset is not None: train_validation_split_point = int(DataLoader.train['images'].shape[0]", "validation_path is not None: DataLoader.validation = DataLoader.load_image_data_with_label_at_end( os.path.join(DATASET_ROOT_FOLDER, validation_path), height=height,", "DataLoader: train = None validation = None test = None", "@staticmethod def load(train_path=None, validation_path=None, test_path=None, height=28, length=28, train_validation_split_point=10000): if train_path", "length=length) logging.debug('Training data shape:{}'.format(str(DataLoader.train['images'].shape))) logging.debug('Validation data shape:{}'.format(str(DataLoader.validation['images'].shape))) logging.debug('Test data shape:{}'.format(str(DataLoader.test['images'].shape)))", "labels = DataLoader.test.labels return { 'images': images, 'labels': labels }", "= splited_validation if test_path is not None: DataLoader.test = DataLoader.load_image_data_with_label_at_end(os.path.join(DATASET_ROOT_FOLDER,", "of (images, labels) :rtype: dict \"\"\" images = DataLoader.validation.images labels", "} DataLoader.train = splited_train DataLoader.validation = splited_validation if test_path is", "is not None and train_validation_split_point > 0: if DataLoader.mode is", "= data[0:1000, :] elif DataLoader.partial_dataset is not None and DataLoader.partial_dataset", "data = data[training_idx, :] images = data[:, 0:-1] labels =", "DataLoader.load_image_data_with_label_at_end( os.path.join(DATASET_ROOT_FOLDER, validation_path), height=height, length=length) elif train_validation_split_point is not None", "DataLoader.train = splited_train DataLoader.validation = splited_validation if test_path is not", "= { 'images': DataLoader.train['images'][train_validation_split_point:, :, :, :], 'labels': DataLoader.train['labels'][train_validation_split_point:] }", "= DataLoader.train.labels return { 'images': images, 'labels': labels } @staticmethod", "[images.shape[0], height, length, 1], order='F') return { 'images': images, 'labels':", "DataLoader @staticmethod def get_training_data(): \"\"\" get training data :return: dict", "and DataLoader.partial_dataset > 0 and DataLoader.partial_dataset <1: # randomly pick", "DataLoader.test = DataLoader.load_image_data_with_label_at_end(os.path.join(DATASET_ROOT_FOLDER, test_path), height=height, length=length) logging.debug('Training data shape:{}'.format(str(DataLoader.train['images'].shape))) logging.debug('Validation", "dict of (images, labels) :rtype: dict \"\"\" images = DataLoader.validation.images", "DataLoader.test.labels return { 'images': images, 'labels': labels } @staticmethod def", "dict \"\"\" images = DataLoader.test.images labels = DataLoader.test.labels return {", "height=height, length=length) if validation_path is not None: DataLoader.validation = DataLoader.load_image_data_with_label_at_end(", "} @staticmethod def load_image_data_with_label_at_end(path, height, length): data = np.loadtxt(path) if", "None: DataLoader.validation = DataLoader.load_image_data_with_label_at_end( os.path.join(DATASET_ROOT_FOLDER, validation_path), height=height, length=length) elif train_validation_split_point", "get test data :return: dict of (images, labels) :rtype: dict", "is not None: DataLoader.test = DataLoader.load_image_data_with_label_at_end(os.path.join(DATASET_ROOT_FOLDER, test_path), height=height, length=length) logging.debug('Training", "not None and DataLoader.partial_dataset > 0 and DataLoader.partial_dataset <1: #", "if validation_path is not None: DataLoader.validation = DataLoader.load_image_data_with_label_at_end( os.path.join(DATASET_ROOT_FOLDER, validation_path),", ":, :, :], 'labels': DataLoader.train['labels'][train_validation_split_point:] } DataLoader.train = splited_train DataLoader.validation", "None validation = None test = None mode = None", "None test = None mode = None partial_dataset = None", "dict of (images, labels) :rtype: dict \"\"\" images = DataLoader.test.images", "is not None: DataLoader.validation = DataLoader.load_image_data_with_label_at_end( os.path.join(DATASET_ROOT_FOLDER, validation_path), height=height, length=length)", "= None validation = None test = None mode =", ":, :], 'labels': DataLoader.train['labels'][train_validation_split_point:] } DataLoader.train = splited_train DataLoader.validation =", "return { 'images': images, 'labels': labels } @staticmethod def get_test_data():", "None: DataLoader.train = DataLoader.load_image_data_with_label_at_end( os.path.join(DATASET_ROOT_FOLDER, train_path), height=height, length=length) if validation_path", "logging.debug('Test data shape:{}'.format(str(DataLoader.test['images'].shape))) return DataLoader @staticmethod def get_training_data(): \"\"\" get", "DataLoader.train['labels'][0:train_validation_split_point] } splited_validation = { 'images': DataLoader.train['images'][train_validation_split_point:, :, :, :],", "= { 'images': DataLoader.train['images'][0:train_validation_split_point, :, :, :], 'labels': DataLoader.train['labels'][0:train_validation_split_point] }", "def get_test_data(): \"\"\" get test data :return: dict of (images,", "os.path.join(DATASET_ROOT_FOLDER, train_path), height=height, length=length) if validation_path is not None: DataLoader.validation", "partial dataset cut_point = int(data.shape[0] * DataLoader.partial_dataset) indices = np.random.permutation(data.shape[0])", "is not None: DataLoader.train = DataLoader.load_image_data_with_label_at_end( os.path.join(DATASET_ROOT_FOLDER, train_path), height=height, length=length)", "} @staticmethod def get_test_data(): \"\"\" get test data :return: dict", "logging.debug('Training data shape:{}'.format(str(DataLoader.train['images'].shape))) logging.debug('Validation data shape:{}'.format(str(DataLoader.validation['images'].shape))) logging.debug('Test data shape:{}'.format(str(DataLoader.test['images'].shape))) return", "> 0: if DataLoader.mode is None or DataLoader.partial_dataset is not", "logging from sklearn.model_selection import train_test_split DATASET_ROOT_FOLDER = os.path.abspath('datasets') class DataLoader:", "height, length, 1], order='F') return { 'images': images, 'labels': labels", "DataLoader.partial_dataset <1: # randomly pick partial dataset cut_point = int(data.shape[0]", "test = None mode = None partial_dataset = None @staticmethod", "None partial_dataset = None @staticmethod def load(train_path=None, validation_path=None, test_path=None, height=28,", "length): data = np.loadtxt(path) if DataLoader.mode is None: data =", "splited_validation if test_path is not None: DataLoader.test = DataLoader.load_image_data_with_label_at_end(os.path.join(DATASET_ROOT_FOLDER, test_path),", "= np.reshape(images, [images.shape[0], height, length, 1], order='F') return { 'images':", "* 0.8) splited_train = { 'images': DataLoader.train['images'][0:train_validation_split_point, :, :, :],", "elif DataLoader.partial_dataset is not None and DataLoader.partial_dataset > 0 and", "= int(DataLoader.train['images'].shape[0] * 0.8) splited_train = { 'images': DataLoader.train['images'][0:train_validation_split_point, :,", "train_validation_split_point is not None and train_validation_split_point > 0: if DataLoader.mode", "0:-1] labels = data[:, -1] images = np.reshape(images, [images.shape[0], height,", "shape:{}'.format(str(DataLoader.validation['images'].shape))) logging.debug('Test data shape:{}'.format(str(DataLoader.test['images'].shape))) return DataLoader @staticmethod def get_training_data(): \"\"\"", "None: DataLoader.test = DataLoader.load_image_data_with_label_at_end(os.path.join(DATASET_ROOT_FOLDER, test_path), height=height, length=length) logging.debug('Training data shape:{}'.format(str(DataLoader.train['images'].shape)))", "get validation data :return: dict of (images, labels) :rtype: dict", "validation_path=None, test_path=None, height=28, length=28, train_validation_split_point=10000): if train_path is not None:", ":], 'labels': DataLoader.train['labels'][train_validation_split_point:] } DataLoader.train = splited_train DataLoader.validation = splited_validation", "of (images, labels) :rtype: dict \"\"\" images = DataLoader.train.images labels", "{ 'images': images, 'labels': labels } @staticmethod def load_image_data_with_label_at_end(path, height,", "images, 'labels': labels } @staticmethod def load_image_data_with_label_at_end(path, height, length): data", "return { 'images': images, 'labels': labels } @staticmethod def get_validation_data():", "and DataLoader.partial_dataset <1: # randomly pick partial dataset cut_point =", "= DataLoader.load_image_data_with_label_at_end( os.path.join(DATASET_ROOT_FOLDER, validation_path), height=height, length=length) elif train_validation_split_point is not", "'labels': labels } @staticmethod def get_test_data(): \"\"\" get test data", "DataLoader.train.images labels = DataLoader.train.labels return { 'images': images, 'labels': labels", "DataLoader.train['labels'][train_validation_split_point:] } DataLoader.train = splited_train DataLoader.validation = splited_validation if test_path", "def get_training_data(): \"\"\" get training data :return: dict of (images,", "@staticmethod def get_validation_data(): \"\"\" get validation data :return: dict of", "= DataLoader.validation.labels return { 'images': images, 'labels': labels } @staticmethod", "train_validation_split_point > 0: if DataLoader.mode is None or DataLoader.partial_dataset is", "data shape:{}'.format(str(DataLoader.train['images'].shape))) logging.debug('Validation data shape:{}'.format(str(DataLoader.validation['images'].shape))) logging.debug('Test data shape:{}'.format(str(DataLoader.test['images'].shape))) return DataLoader", "labels) :rtype: dict \"\"\" images = DataLoader.validation.images labels = DataLoader.validation.labels", "images, 'labels': labels } @staticmethod def get_validation_data(): \"\"\" get validation", "0 and DataLoader.partial_dataset <1: # randomly pick partial dataset cut_point", "is not None: train_validation_split_point = int(DataLoader.train['images'].shape[0] * 0.8) splited_train =", "'labels': DataLoader.train['labels'][0:train_validation_split_point] } splited_validation = { 'images': DataLoader.train['images'][train_validation_split_point:, :, :,", "DataLoader.load_image_data_with_label_at_end(os.path.join(DATASET_ROOT_FOLDER, test_path), height=height, length=length) logging.debug('Training data shape:{}'.format(str(DataLoader.train['images'].shape))) logging.debug('Validation data shape:{}'.format(str(DataLoader.validation['images'].shape)))", "and train_validation_split_point > 0: if DataLoader.mode is None or DataLoader.partial_dataset", "shape:{}'.format(str(DataLoader.train['images'].shape))) logging.debug('Validation data shape:{}'.format(str(DataLoader.validation['images'].shape))) logging.debug('Test data shape:{}'.format(str(DataLoader.test['images'].shape))) return DataLoader @staticmethod", "splited_train DataLoader.validation = splited_validation if test_path is not None: DataLoader.test", "DataLoader.partial_dataset > 0 and DataLoader.partial_dataset <1: # randomly pick partial", "pick partial dataset cut_point = int(data.shape[0] * DataLoader.partial_dataset) indices =", "length, 1], order='F') return { 'images': images, 'labels': labels }", "DataLoader.train.labels return { 'images': images, 'labels': labels } @staticmethod def", "labels = DataLoader.train.labels return { 'images': images, 'labels': labels }", "\"\"\" get validation data :return: dict of (images, labels) :rtype:", "numpy as np import os import logging from sklearn.model_selection import", "} @staticmethod def get_validation_data(): \"\"\" get validation data :return: dict", "get_test_data(): \"\"\" get test data :return: dict of (images, labels)", "DataLoader.train['images'][0:train_validation_split_point, :, :, :], 'labels': DataLoader.train['labels'][0:train_validation_split_point] } splited_validation = {", "test_path is not None: DataLoader.test = DataLoader.load_image_data_with_label_at_end(os.path.join(DATASET_ROOT_FOLDER, test_path), height=height, length=length)", "{ 'images': DataLoader.train['images'][train_validation_split_point:, :, :, :], 'labels': DataLoader.train['labels'][train_validation_split_point:] } DataLoader.train", "= splited_train DataLoader.validation = splited_validation if test_path is not None:", ":rtype: dict \"\"\" images = DataLoader.test.images labels = DataLoader.test.labels return", "> 0 and DataLoader.partial_dataset <1: # randomly pick partial dataset", "def load_image_data_with_label_at_end(path, height, length): data = np.loadtxt(path) if DataLoader.mode is", "def get_validation_data(): \"\"\" get validation data :return: dict of (images,", "= DataLoader.test.labels return { 'images': images, 'labels': labels } @staticmethod", "DATASET_ROOT_FOLDER = os.path.abspath('datasets') class DataLoader: train = None validation =", "height=height, length=length) logging.debug('Training data shape:{}'.format(str(DataLoader.train['images'].shape))) logging.debug('Validation data shape:{}'.format(str(DataLoader.validation['images'].shape))) logging.debug('Test data", "(images, labels) :rtype: dict \"\"\" images = DataLoader.validation.images labels =", "= None mode = None partial_dataset = None @staticmethod def", "{ 'images': images, 'labels': labels } @staticmethod def get_test_data(): \"\"\"", "(images, labels) :rtype: dict \"\"\" images = DataLoader.test.images labels =", "data = data[0:1000, :] elif DataLoader.partial_dataset is not None and", "DataLoader.partial_dataset is not None and DataLoader.partial_dataset > 0 and DataLoader.partial_dataset", "not None: train_validation_split_point = int(DataLoader.train['images'].shape[0] * 0.8) splited_train = {", "length=28, train_validation_split_point=10000): if train_path is not None: DataLoader.train = DataLoader.load_image_data_with_label_at_end(", "if DataLoader.mode is None: data = data[0:1000, :] elif DataLoader.partial_dataset", ":, :, :], 'labels': DataLoader.train['labels'][0:train_validation_split_point] } splited_validation = { 'images':", "= int(data.shape[0] * DataLoader.partial_dataset) indices = np.random.permutation(data.shape[0]) training_idx= indices[:cut_point] data", "train_test_split DATASET_ROOT_FOLDER = os.path.abspath('datasets') class DataLoader: train = None validation", "= np.random.permutation(data.shape[0]) training_idx= indices[:cut_point] data = data[training_idx, :] images =", "data shape:{}'.format(str(DataLoader.validation['images'].shape))) logging.debug('Test data shape:{}'.format(str(DataLoader.test['images'].shape))) return DataLoader @staticmethod def get_training_data():", "logging.debug('Validation data shape:{}'.format(str(DataLoader.validation['images'].shape))) logging.debug('Test data shape:{}'.format(str(DataLoader.test['images'].shape))) return DataLoader @staticmethod def", "from sklearn.model_selection import train_test_split DATASET_ROOT_FOLDER = os.path.abspath('datasets') class DataLoader: train", "= os.path.abspath('datasets') class DataLoader: train = None validation = None", "data shape:{}'.format(str(DataLoader.test['images'].shape))) return DataLoader @staticmethod def get_training_data(): \"\"\" get training", ":], 'labels': DataLoader.train['labels'][0:train_validation_split_point] } splited_validation = { 'images': DataLoader.train['images'][train_validation_split_point:, :,", "get_validation_data(): \"\"\" get validation data :return: dict of (images, labels)", "not None: DataLoader.train = DataLoader.load_image_data_with_label_at_end( os.path.join(DATASET_ROOT_FOLDER, train_path), height=height, length=length) if", "not None: DataLoader.validation = DataLoader.load_image_data_with_label_at_end( os.path.join(DATASET_ROOT_FOLDER, validation_path), height=height, length=length) elif", "shape:{}'.format(str(DataLoader.test['images'].shape))) return DataLoader @staticmethod def get_training_data(): \"\"\" get training data", "@staticmethod def get_test_data(): \"\"\" get test data :return: dict of", "images, 'labels': labels } @staticmethod def get_test_data(): \"\"\" get test", "DataLoader.train = DataLoader.load_image_data_with_label_at_end( os.path.join(DATASET_ROOT_FOLDER, train_path), height=height, length=length) if validation_path is", "<1: # randomly pick partial dataset cut_point = int(data.shape[0] *", "test data :return: dict of (images, labels) :rtype: dict \"\"\"", "get training data :return: dict of (images, labels) :rtype: dict", "DataLoader.mode is None: data = data[0:1000, :] elif DataLoader.partial_dataset is", "labels } @staticmethod def get_validation_data(): \"\"\" get validation data :return:", "dict \"\"\" images = DataLoader.train.images labels = DataLoader.train.labels return {", "return DataLoader @staticmethod def get_training_data(): \"\"\" get training data :return:", "labels } @staticmethod def get_test_data(): \"\"\" get test data :return:", "as np import os import logging from sklearn.model_selection import train_test_split", "partial_dataset = None @staticmethod def load(train_path=None, validation_path=None, test_path=None, height=28, length=28,", "np import os import logging from sklearn.model_selection import train_test_split DATASET_ROOT_FOLDER", "get_training_data(): \"\"\" get training data :return: dict of (images, labels)", "\"\"\" images = DataLoader.validation.images labels = DataLoader.validation.labels return { 'images':", "DataLoader.validation.images labels = DataLoader.validation.labels return { 'images': images, 'labels': labels", "\"\"\" images = DataLoader.test.images labels = DataLoader.test.labels return { 'images':", "labels) :rtype: dict \"\"\" images = DataLoader.test.images labels = DataLoader.test.labels", "mode = None partial_dataset = None @staticmethod def load(train_path=None, validation_path=None,", "'labels': labels } @staticmethod def load_image_data_with_label_at_end(path, height, length): data =", "DataLoader.partial_dataset is not None: train_validation_split_point = int(DataLoader.train['images'].shape[0] * 0.8) splited_train", "load(train_path=None, validation_path=None, test_path=None, height=28, length=28, train_validation_split_point=10000): if train_path is not", "# randomly pick partial dataset cut_point = int(data.shape[0] * DataLoader.partial_dataset)", "import os import logging from sklearn.model_selection import train_test_split DATASET_ROOT_FOLDER =", "images = DataLoader.test.images labels = DataLoader.test.labels return { 'images': images,", "dict \"\"\" images = DataLoader.validation.images labels = DataLoader.validation.labels return {", "training data :return: dict of (images, labels) :rtype: dict \"\"\"", "validation data :return: dict of (images, labels) :rtype: dict \"\"\"", "test_path=None, height=28, length=28, train_validation_split_point=10000): if train_path is not None: DataLoader.train", "train = None validation = None test = None mode", "'labels': DataLoader.train['labels'][train_validation_split_point:] } DataLoader.train = splited_train DataLoader.validation = splited_validation if", "DataLoader.load_image_data_with_label_at_end( os.path.join(DATASET_ROOT_FOLDER, train_path), height=height, length=length) if validation_path is not None:", "if train_path is not None: DataLoader.train = DataLoader.load_image_data_with_label_at_end( os.path.join(DATASET_ROOT_FOLDER, train_path),", "cut_point = int(data.shape[0] * DataLoader.partial_dataset) indices = np.random.permutation(data.shape[0]) training_idx= indices[:cut_point]", "return { 'images': images, 'labels': labels } @staticmethod def load_image_data_with_label_at_end(path,", "images = data[:, 0:-1] labels = data[:, -1] images =", "\"\"\" get test data :return: dict of (images, labels) :rtype:", "np.reshape(images, [images.shape[0], height, length, 1], order='F') return { 'images': images,", "= DataLoader.train.images labels = DataLoader.train.labels return { 'images': images, 'labels':", "validation = None test = None mode = None partial_dataset", "elif train_validation_split_point is not None and train_validation_split_point > 0: if", "DataLoader.validation.labels return { 'images': images, 'labels': labels } @staticmethod def", "= np.loadtxt(path) if DataLoader.mode is None: data = data[0:1000, :]", "{ 'images': images, 'labels': labels } @staticmethod def get_validation_data(): \"\"\"", "None mode = None partial_dataset = None @staticmethod def load(train_path=None,", "train_validation_split_point=10000): if train_path is not None: DataLoader.train = DataLoader.load_image_data_with_label_at_end( os.path.join(DATASET_ROOT_FOLDER,", "length=length) if validation_path is not None: DataLoader.validation = DataLoader.load_image_data_with_label_at_end( os.path.join(DATASET_ROOT_FOLDER,", "= data[:, -1] images = np.reshape(images, [images.shape[0], height, length, 1],", "is not None and DataLoader.partial_dataset > 0 and DataLoader.partial_dataset <1:", "training_idx= indices[:cut_point] data = data[training_idx, :] images = data[:, 0:-1]", "indices[:cut_point] data = data[training_idx, :] images = data[:, 0:-1] labels", "DataLoader.mode is None or DataLoader.partial_dataset is not None: train_validation_split_point =", "labels = DataLoader.validation.labels return { 'images': images, 'labels': labels }", "def load(train_path=None, validation_path=None, test_path=None, height=28, length=28, train_validation_split_point=10000): if train_path is", "import logging from sklearn.model_selection import train_test_split DATASET_ROOT_FOLDER = os.path.abspath('datasets') class", ":return: dict of (images, labels) :rtype: dict \"\"\" images =", "train_path is not None: DataLoader.train = DataLoader.load_image_data_with_label_at_end( os.path.join(DATASET_ROOT_FOLDER, train_path), height=height,", "DataLoader.train['images'][train_validation_split_point:, :, :, :], 'labels': DataLoader.train['labels'][train_validation_split_point:] } DataLoader.train = splited_train", "randomly pick partial dataset cut_point = int(data.shape[0] * DataLoader.partial_dataset) indices", "if DataLoader.mode is None or DataLoader.partial_dataset is not None: train_validation_split_point" ]
[ "sexo de 4 pessoas. No final do programa, mostre: a", "anos. mediaidade = '' nomelista = [] idadelista = []", "nomelista = [] idadelista = [] sexolista = [] homens", "'' nomelista = [] idadelista = [] sexolista = []", "idade < 20: nomedelas.append(nome) mulherescommenosde20 += 1 elif sexo ==", "é: {mediaidade}') print(f'A pessoa que tem a maior idade, com", "de idade do grupo, qual é o nome do homem", "mediaidade = ((sum(idadelista))/4) # Adcionei todos os nomes em uma", "{mediaidade}') print(f'A pessoa que tem a maior idade, com {maximo}", "Armazenei em indexnome a posição de quem tem a maior", "# ------------------------------------------------------------------- print(f'A media das idades é: {mediaidade}') print(f'A pessoa", "de quem tem a maior idade indexnome = nomelista[indexidade] #", "nome: ')) idade = int(input('Sua idade: ')) sexo = int(input('Sexo?", "< 20: nomedelas.append(nome) mulherescommenosde20 += 1 elif sexo == 0:", "# Armazenei em idadexidade o INDEX do maior valor indexidade", "print(f'As mulheres que possuem menos de 20 anos: {mulherescommenosde20} e", "range(1,5): print(f'{i} PESSOA') nome = (input('Seu nome: ')) idade =", "No final do programa, mostre: a média de idade do", "tem a maior idade indexnome = nomelista[indexidade] # ------------------------------------------------------------------- print(f'A", "mais velho e quantas mulheres têm menos de 20 anos.", "print(f'A media das idades é: {mediaidade}') print(f'A pessoa que tem", "sexo = int(input('Sexo? [0]Masculino [1]Feminino: ')) if sexo == 1", "20 anos. mediaidade = '' nomelista = [] idadelista =", "int(input('Sua idade: ')) sexo = int(input('Sexo? [0]Masculino [1]Feminino: ')) if", "= [] sexolista = [] homens = [] mulherescommenosde20 =", "0 nomedelas = [] # ------------------------------------------------------------------- for i in range(1,5):", "os nomes em uma lista nomelista.append(nome) # ------------------------------------------------------------------- # Armazenei", "lista maximo = max(idadelista) # Armazenei em idadexidade o INDEX", "de 4 pessoas. No final do programa, mostre: a média", "menos de 20 anos. mediaidade = '' nomelista = []", "------------------------------------------------------------------- for i in range(1,5): print(f'{i} PESSOA') nome = (input('Seu", "idade indexnome = nomelista[indexidade] # ------------------------------------------------------------------- print(f'A media das idades", "# Exercício Python 56: Desenvolva um programa que leia o", "= int(input('Sua idade: ')) sexo = int(input('Sexo? [0]Masculino [1]Feminino: '))", "nomes em uma lista nomelista.append(nome) # ------------------------------------------------------------------- # Armazenei em", "nomelista[indexidade] # ------------------------------------------------------------------- print(f'A media das idades é: {mediaidade}') print(f'A", "que tem a maior idade, com {maximo} é essa: {indexnome}')", "(input('Seu nome: ')) idade = int(input('Sua idade: ')) sexo =", "')) if sexo == 1 and idade < 20: nomedelas.append(nome)", "é o nome do homem mais velho e quantas mulheres", "nome, idade e sexo de 4 pessoas. No final do", "== 0: homens.append(nome) # Adcionei todas idades em uma lista", "qual é o nome do homem mais velho e quantas", "o nome do homem mais velho e quantas mulheres têm", "sexo == 1 and idade < 20: nomedelas.append(nome) mulherescommenosde20 +=", "velho e quantas mulheres têm menos de 20 anos. mediaidade", "do maior valor indexidade = idadelista.index(maximo) # Armazenei em indexnome", "== 1 and idade < 20: nomedelas.append(nome) mulherescommenosde20 += 1", "# Adcionei todas idades em uma lista idadelista.append(idade) # Tirei", "print(f'{i} PESSOA') nome = (input('Seu nome: ')) idade = int(input('Sua", "idadelista = [] sexolista = [] homens = [] mulherescommenosde20", "grupo, qual é o nome do homem mais velho e", "mulherescommenosde20 += 1 elif sexo == 0: homens.append(nome) # Adcionei", "idades é: {mediaidade}') print(f'A pessoa que tem a maior idade,", "Exercício Python 56: Desenvolva um programa que leia o nome,", "max(idadelista) # Armazenei em idadexidade o INDEX do maior valor", "o maior valor encontrado dentro de uma lista maximo =", "parte mediaidade = ((sum(idadelista))/4) # Adcionei todos os nomes em", "{maximo} é essa: {indexnome}') print(f'As mulheres que possuem menos de", "0: homens.append(nome) # Adcionei todas idades em uma lista idadelista.append(idade)", "# Tirei a média dessas idades //Primeira parte mediaidade =", "print(f'A pessoa que tem a maior idade, com {maximo} é", "Tirei a média dessas idades //Primeira parte mediaidade = ((sum(idadelista))/4)", "1 elif sexo == 0: homens.append(nome) # Adcionei todas idades", "o nome, idade e sexo de 4 pessoas. No final", "idade: ')) sexo = int(input('Sexo? [0]Masculino [1]Feminino: ')) if sexo", "Adcionei todos os nomes em uma lista nomelista.append(nome) # -------------------------------------------------------------------", "indexnome = nomelista[indexidade] # ------------------------------------------------------------------- print(f'A media das idades é:", "e sexo de 4 pessoas. No final do programa, mostre:", "= ((sum(idadelista))/4) # Adcionei todos os nomes em uma lista", "[] homens = [] mulherescommenosde20 = 0 nomedelas = []", "nome do homem mais velho e quantas mulheres têm menos", "encontrado dentro de uma lista maximo = max(idadelista) # Armazenei", "sexo == 0: homens.append(nome) # Adcionei todas idades em uma", "uma lista idadelista.append(idade) # Tirei a média dessas idades //Primeira", "Python 56: Desenvolva um programa que leia o nome, idade", "com {maximo} é essa: {indexnome}') print(f'As mulheres que possuem menos", "= '' nomelista = [] idadelista = [] sexolista =", "in range(1,5): print(f'{i} PESSOA') nome = (input('Seu nome: ')) idade", "+= 1 elif sexo == 0: homens.append(nome) # Adcionei todas", "leia o nome, idade e sexo de 4 pessoas. No", "pessoa que tem a maior idade, com {maximo} é essa:", "idadelista.index(maximo) # Armazenei em indexnome a posição de quem tem", "das idades é: {mediaidade}') print(f'A pessoa que tem a maior", "pessoas. No final do programa, mostre: a média de idade", "a maior idade, com {maximo} é essa: {indexnome}') print(f'As mulheres", "lista nomelista.append(nome) # ------------------------------------------------------------------- # Armazenei em maximo o maior", "PESSOA') nome = (input('Seu nome: ')) idade = int(input('Sua idade:", "sexolista = [] homens = [] mulherescommenosde20 = 0 nomedelas", "dentro de uma lista maximo = max(idadelista) # Armazenei em", "mulherescommenosde20 = 0 nomedelas = [] # ------------------------------------------------------------------- for i", "o INDEX do maior valor indexidade = idadelista.index(maximo) # Armazenei", "i in range(1,5): print(f'{i} PESSOA') nome = (input('Seu nome: '))", "maximo = max(idadelista) # Armazenei em idadexidade o INDEX do", "idadexidade o INDEX do maior valor indexidade = idadelista.index(maximo) #", "média de idade do grupo, qual é o nome do", "lista idadelista.append(idade) # Tirei a média dessas idades //Primeira parte", "elif sexo == 0: homens.append(nome) # Adcionei todas idades em", "e quantas mulheres têm menos de 20 anos. mediaidade =", "{indexnome}') print(f'As mulheres que possuem menos de 20 anos: {mulherescommenosde20}", "1 and idade < 20: nomedelas.append(nome) mulherescommenosde20 += 1 elif", "<filename>FOR/Analisador-completo/main.py<gh_stars>1-10 # Exercício Python 56: Desenvolva um programa que leia", "Armazenei em maximo o maior valor encontrado dentro de uma", "em maximo o maior valor encontrado dentro de uma lista", "int(input('Sexo? [0]Masculino [1]Feminino: ')) if sexo == 1 and idade", "a média dessas idades //Primeira parte mediaidade = ((sum(idadelista))/4) #", "------------------------------------------------------------------- print(f'A media das idades é: {mediaidade}') print(f'A pessoa que", "programa, mostre: a média de idade do grupo, qual é", "media das idades é: {mediaidade}') print(f'A pessoa que tem a", "# ------------------------------------------------------------------- # Armazenei em maximo o maior valor encontrado", "20: nomedelas.append(nome) mulherescommenosde20 += 1 elif sexo == 0: homens.append(nome)", "Armazenei em idadexidade o INDEX do maior valor indexidade =", "if sexo == 1 and idade < 20: nomedelas.append(nome) mulherescommenosde20", "[] sexolista = [] homens = [] mulherescommenosde20 = 0", "= max(idadelista) # Armazenei em idadexidade o INDEX do maior", "em uma lista idadelista.append(idade) # Tirei a média dessas idades", "a posição de quem tem a maior idade indexnome =", "[0]Masculino [1]Feminino: ')) if sexo == 1 and idade <", "for i in range(1,5): print(f'{i} PESSOA') nome = (input('Seu nome:", "que leia o nome, idade e sexo de 4 pessoas.", "todas idades em uma lista idadelista.append(idade) # Tirei a média", "= [] homens = [] mulherescommenosde20 = 0 nomedelas =", "valor encontrado dentro de uma lista maximo = max(idadelista) #", "indexnome a posição de quem tem a maior idade indexnome", "quantas mulheres têm menos de 20 anos. mediaidade = ''", "------------------------------------------------------------------- # Armazenei em maximo o maior valor encontrado dentro", "nome = (input('Seu nome: ')) idade = int(input('Sua idade: '))", "and idade < 20: nomedelas.append(nome) mulherescommenosde20 += 1 elif sexo", "nomedelas.append(nome) mulherescommenosde20 += 1 elif sexo == 0: homens.append(nome) #", "idades //Primeira parte mediaidade = ((sum(idadelista))/4) # Adcionei todos os", "maior valor encontrado dentro de uma lista maximo = max(idadelista)", "maior valor indexidade = idadelista.index(maximo) # Armazenei em indexnome a", "um programa que leia o nome, idade e sexo de", "a média de idade do grupo, qual é o nome", "uma lista nomelista.append(nome) # ------------------------------------------------------------------- # Armazenei em maximo o", "a maior idade indexnome = nomelista[indexidade] # ------------------------------------------------------------------- print(f'A media", "# Adcionei todos os nomes em uma lista nomelista.append(nome) #", "= [] mulherescommenosde20 = 0 nomedelas = [] # -------------------------------------------------------------------", "maior idade indexnome = nomelista[indexidade] # ------------------------------------------------------------------- print(f'A media das", "= 0 nomedelas = [] # ------------------------------------------------------------------- for i in", "')) sexo = int(input('Sexo? [0]Masculino [1]Feminino: ')) if sexo ==", "= nomelista[indexidade] # ------------------------------------------------------------------- print(f'A media das idades é: {mediaidade}')", "idade = int(input('Sua idade: ')) sexo = int(input('Sexo? [0]Masculino [1]Feminino:", "dessas idades //Primeira parte mediaidade = ((sum(idadelista))/4) # Adcionei todos", "em uma lista nomelista.append(nome) # ------------------------------------------------------------------- # Armazenei em maximo", "= [] # ------------------------------------------------------------------- for i in range(1,5): print(f'{i} PESSOA')", "programa que leia o nome, idade e sexo de 4", "idadelista.append(idade) # Tirei a média dessas idades //Primeira parte mediaidade", "mediaidade = '' nomelista = [] idadelista = [] sexolista", "maximo o maior valor encontrado dentro de uma lista maximo", "# Armazenei em indexnome a posição de quem tem a", "que possuem menos de 20 anos: {mulherescommenosde20} e são: {nomedelas}')", "média dessas idades //Primeira parte mediaidade = ((sum(idadelista))/4) # Adcionei", "[] idadelista = [] sexolista = [] homens = []", "= [] idadelista = [] sexolista = [] homens =", "= int(input('Sexo? [0]Masculino [1]Feminino: ')) if sexo == 1 and", "Adcionei todas idades em uma lista idadelista.append(idade) # Tirei a", "de uma lista maximo = max(idadelista) # Armazenei em idadexidade", "//Primeira parte mediaidade = ((sum(idadelista))/4) # Adcionei todos os nomes", "em idadexidade o INDEX do maior valor indexidade = idadelista.index(maximo)", "tem a maior idade, com {maximo} é essa: {indexnome}') print(f'As", "# Armazenei em maximo o maior valor encontrado dentro de", "idade, com {maximo} é essa: {indexnome}') print(f'As mulheres que possuem", "INDEX do maior valor indexidade = idadelista.index(maximo) # Armazenei em", "do homem mais velho e quantas mulheres têm menos de", "indexidade = idadelista.index(maximo) # Armazenei em indexnome a posição de", "mulheres que possuem menos de 20 anos: {mulherescommenosde20} e são:", "têm menos de 20 anos. mediaidade = '' nomelista =", "# ------------------------------------------------------------------- for i in range(1,5): print(f'{i} PESSOA') nome =", "nomelista.append(nome) # ------------------------------------------------------------------- # Armazenei em maximo o maior valor", "[1]Feminino: ')) if sexo == 1 and idade < 20:", "em indexnome a posição de quem tem a maior idade", "valor indexidade = idadelista.index(maximo) # Armazenei em indexnome a posição", "essa: {indexnome}') print(f'As mulheres que possuem menos de 20 anos:", "do grupo, qual é o nome do homem mais velho", "de 20 anos. mediaidade = '' nomelista = [] idadelista", "idade e sexo de 4 pessoas. No final do programa,", "[] mulherescommenosde20 = 0 nomedelas = [] # ------------------------------------------------------------------- for", "nomedelas = [] # ------------------------------------------------------------------- for i in range(1,5): print(f'{i}", "[] # ------------------------------------------------------------------- for i in range(1,5): print(f'{i} PESSOA') nome", "do programa, mostre: a média de idade do grupo, qual", "mostre: a média de idade do grupo, qual é o", "Desenvolva um programa que leia o nome, idade e sexo", "= idadelista.index(maximo) # Armazenei em indexnome a posição de quem", "homens = [] mulherescommenosde20 = 0 nomedelas = [] #", "homens.append(nome) # Adcionei todas idades em uma lista idadelista.append(idade) #", "idades em uma lista idadelista.append(idade) # Tirei a média dessas", "posição de quem tem a maior idade indexnome = nomelista[indexidade]", "4 pessoas. No final do programa, mostre: a média de", "maior idade, com {maximo} é essa: {indexnome}') print(f'As mulheres que", "mulheres têm menos de 20 anos. mediaidade = '' nomelista", "todos os nomes em uma lista nomelista.append(nome) # ------------------------------------------------------------------- #", "= (input('Seu nome: ')) idade = int(input('Sua idade: ')) sexo", "')) idade = int(input('Sua idade: ')) sexo = int(input('Sexo? [0]Masculino", "56: Desenvolva um programa que leia o nome, idade e", "((sum(idadelista))/4) # Adcionei todos os nomes em uma lista nomelista.append(nome)", "uma lista maximo = max(idadelista) # Armazenei em idadexidade o", "homem mais velho e quantas mulheres têm menos de 20", "é essa: {indexnome}') print(f'As mulheres que possuem menos de 20", "idade do grupo, qual é o nome do homem mais", "final do programa, mostre: a média de idade do grupo,", "quem tem a maior idade indexnome = nomelista[indexidade] # -------------------------------------------------------------------" ]
[ "qargs=[1] targ = np.dot(np.kron(np.eye(2), np.kron(mat_a, np.eye(2))), mat) self.assertEqual(op.compose(op1, qargs=[1]), Operator(targ))", "def test_init_operator(self): \"\"\"Test initialization from Operator.\"\"\" op1 = Operator(self.rand_matrix(4, 4))", "dot method.\"\"\" # 3-qubit operator mat = self.rand_matrix(8, 8) mat_a", "qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit from qiskit.extensions.standard import HGate, CHGate,", "in the root directory # of this source tree or", "method.\"\"\" opYX = Operator(self.UY).compose(Operator(self.UX), front=True) matYX = np.dot(self.UY, self.UX) self.assertEqual(opYX,", "np.dot(np.kron(mat_a, np.kron(mat_b, mat_c)), mat) self.assertEqual(op.compose(op3, qargs=[2, 1, 0]), Operator(targ)) self.assertEqual(op", "cr = ClassicalRegister(2) circ = QuantumCircuit(qr, cr) circ.h(qr[0]) circ.x(qr[1]) circ.measure(qr,", "2, 2)) self.assertEqual(reshaped1.output_dims(), (8,)) self.assertEqual(reshaped1.input_dims(), (8,)) self.assertEqual(reshaped2.output_dims(), (2, 4)) self.assertEqual(reshaped2.input_dims(),", "op1([2]), Operator(targ)) def test_dot_subsystem(self): \"\"\"Test subsystem dot method.\"\"\" # 3-qubit", "= self.rand_matrix(4, 4) mat2 = self.rand_matrix(4, 4) op1 = Operator(mat1)", "* mati) uni_t = op.transpose() self.assertEqual(uni_t, Operator(matr.T + 1j *", "Operator(targ)) def test_compose_front_subsystem(self): \"\"\"Test subsystem front compose method.\"\"\" # 3-qubit", "= np.dot(np.kron(np.eye(4), mat_a), mat) self.assertEqual(op.compose(op1, qargs=[0]), Operator(targ)) self.assertEqual(op @ op1([0]),", "import HGate, CHGate, CXGate from qiskit.test import QiskitTestCase from qiskit.quantum_info.operators.operator", "* rng.rand(rows, cols) def simple_circuit_no_measure(self): \"\"\"Return a unitary circuit and", "psi.conj()) rho /= np.trace(rho) return rho @classmethod def rand_matrix(cls, rows,", "directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. #", "self.UY) self.assertEqual(opXY, Operator(matXY)) def test_compose_subsystem(self): \"\"\"Test subsystem compose method.\"\"\" #", "measure raises exception.\"\"\" circuit = self.simple_circuit_with_measure() self.assertRaises(QiskitError, Operator, circuit) def", "Operator(mat12).data) def test_tensor(self): \"\"\"Test tensor method.\"\"\" mat1 = self.UX mat2", "2) op = Operator(mat) op1 = Operator(mat_a) op2 = Operator(np.kron(mat_b,", "np.random.randint(0, np.iinfo(np.int32).max) logger.debug(\"rand_rho RandomState seeded with seed=%s\", seed) rng =", "np.kron(mat_b, mat_a)), mat) self.assertEqual(op.compose(op2, qargs=[0, 1]), Operator(targ)) self.assertEqual(op @ op2([0,", "self.rand_matrix(4, 4) op = Operator(mat) self.assertEqual(-op, Operator(-1 * mat)) def", "decomposition of Controlled-u1 gate lam = np.pi / 4 circuit", "Apache License, Version 2.0. You may # obtain a copy", "= QuantumRegister(2) cr = ClassicalRegister(2) circ = QuantumCircuit(qr, cr) circ.h(qr[0])", "__eq__ method\"\"\" mat = self.rand_matrix(2, 2, real=True) self.assertEqual(Operator(np.array(mat, dtype=complex)), Operator(mat))", "1, 0] targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c))) self.assertEqual(op.dot(op3, qargs=[2,", "self.assertTrue(global_phase_equivalent) gate = CHGate() op = Operator(gate).data had = HGate().to_matrix()", "CHGate, CXGate from qiskit.test import QiskitTestCase from qiskit.quantum_info.operators.operator import Operator", "Pauli-matrix unitaries UI = np.eye(2) UX = np.array([[0, 1], [1,", "input_dims=5) def test_init_operator(self): \"\"\"Test initialization from Operator.\"\"\" op1 = Operator(self.rand_matrix(4,", "self.assertRaises(QiskitError, Operator, mat, input_dims=[2, 4]) self.assertRaises(QiskitError, Operator, mat, input_dims=5) def", "may # obtain a copy of this license in the", "np.kron(np.eye(2), mat_b)), mat) self.assertEqual(op.compose(op2, qargs=[2, 0]), Operator(targ)) self.assertEqual(op @ op2([2,", "np.kron(mat_a, np.eye(2)))) self.assertEqual(op.dot(op1, qargs=[1]), Operator(targ)) self.assertEqual(op * op1([1]), Operator(targ)) #", "op2 qargs=[2, 0] targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b))) self.assertEqual(op.compose(op2,", "mat_c = self.rand_matrix(2, 2) op = Operator(mat) op1 = Operator(mat_a)", "= Operator(self.rand_matrix(2, 2)) self.assertRaises(QiskitError, op._multiply, 's') self.assertRaises(QiskitError, op.__rmul__, 's') self.assertRaises(QiskitError,", "import QuantumRegister, ClassicalRegister, QuantumCircuit from qiskit.extensions.standard import HGate, CHGate, CXGate", "np.eye(2)))) self.assertEqual(op.compose(op1, qargs=[1], front=True), Operator(targ)) # op1 qargs=[2] targ =", "# obtain a copy of this license in the LICENSE.txt", "self.assertTrue(clone == orig) def test_is_unitary(self): \"\"\"Test is_unitary method.\"\"\" # X-90", "self.assertEqual(op.dot(op1, qargs=[0]), Operator(targ)) self.assertEqual(op * op1([0]), Operator(targ)) # op1 qargs=[1]", "array.\"\"\" mat = self.rand_matrix(4, 4) self.assertRaises(QiskitError, Operator, mat, input_dims=[4, 2])", "rho @classmethod def rand_matrix(cls, rows, cols=None, real=False): \"\"\"Return a random", "= np.array([[0, 1], [1, 0]]) UY = np.array([[0, -1j], [1j,", "* mat))) self.assertFalse(op.equiv(2 * mat)) if __name__ == '__main__': unittest.main()", "self.assertEqual(reshaped2.input_dims(), (4, 2)) def test_copy(self): \"\"\"Test Operator copy method\"\"\" mat", "1, 0]), Operator(targ)) # op2 qargs=[0, 1] targ = np.dot(mat,", "they have been altered from the originals. # pylint: disable=invalid-name", "Operator(np.dot(self.UY, self.UX)) self.assertEqual(op1.dot(op2), targ) self.assertEqual(op1 * op2, targ) targ =", "Operator input_dims method.\"\"\" op = Operator(self.rand_matrix(2 * 3 * 4,", "np.dot(np.kron(np.eye(2), np.kron(mat_b, mat_a)), mat) self.assertEqual(op.compose(op2, qargs=[0, 1]), Operator(targ)) self.assertEqual(op @", "4) self.assertEqual(Operator(mat).dim, (4, 4)) self.assertEqual(Operator(mat, input_dims=[4], output_dims=[4]).dim, (4, 4)) self.assertEqual(Operator(mat,", "np.kron(mat_a, np.kron(np.eye(2), mat_b))) self.assertEqual(op.compose(op2, qargs=[2, 0], front=True), Operator(targ)) # op1", "self.assertEqual(op.output_dims(qargs=[2, 1, 0]), (4, 3, 2)) self.assertEqual(op.output_dims(qargs=[2, 0, 1]), (4,", "method.\"\"\" op1 = Operator(self.UY) op2 = Operator(self.UX) targ = Operator(np.dot(self.UY,", "* mati.T)) def test_compose_except(self): \"\"\"Test compose different dimension exception\"\"\" self.assertRaises(QiskitError,", "# op1 qargs=[1] targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2)))) self.assertEqual(op.compose(op1,", "self.assertTrue(global_phase_equivalent) def test_circuit_init_except(self): \"\"\"Test initialization from circuit with measure raises", "qiskit.extensions.standard import HGate, CHGate, CXGate from qiskit.test import QiskitTestCase from", "np.eye(3, dtype=complex) mat21 = np.kron(mat2, mat1) op21 = Operator(mat1).expand(Operator(mat2)) self.assertEqual(op21.dim,", "target, ignore_phase=True) self.assertTrue(global_phase_equivalent) # Test decomposition of controlled-H gate circuit", "\"\"\"Test Operator input_dims method.\"\"\" op = Operator(self.rand_matrix(2 * 3 *", "representation string property.\"\"\" mat = self.rand_matrix(2, 2) op = Operator(mat)", "1]), Operator(targ)) self.assertEqual(op * op2([0, 1]), Operator(targ)) # op2 qargs=[2,", "Operator(matr + 1j * mati) uni_t = op.transpose() self.assertEqual(uni_t, Operator(matr.T", "(2, 2, 2)) op = Operator(mat, input_dims=8, output_dims=8) assert_allclose(op.data, mat)", "return false self.assertFalse(Operator([[1, 0], [0, 0]]).is_unitary()) def test_to_operator(self): \"\"\"Test to_operator", "2]), Operator(targ)) # op3 qargs=[2, 1, 0] targ = np.dot(mat,", "op.data, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) def test_instruction_init(self): \"\"\"Test initialization from a", "class TestOperator(OperatorTestCase): \"\"\"Tests for Operator linear operator class.\"\"\" def test_init_array_qubit(self):", "different dimension exception\"\"\" self.assertRaises(QiskitError, Operator(np.eye(2)).compose, Operator(np.eye(3))) self.assertRaises(QiskitError, Operator(np.eye(2)).compose, 2) def", "0] = 0.0 self.assertFalse(cpy == orig) with self.subTest(\"Shallow copy\"): orig", "a notice indicating # that they have been altered from", "mat_a))) self.assertEqual(op.dot(op2, qargs=[0, 1]), Operator(targ)) self.assertEqual(op * op2([0, 1]), Operator(targ))", "copyright notice, and modified files need to carry a notice", "self.assertEqual(op.input_dims(), (2, 2, 2)) self.assertEqual(op.output_dims(), (2, 2, 2)) def test_init_array(self):", "targ) def test_dot(self): \"\"\"Test dot method.\"\"\" op1 = Operator(self.UY) op2", "front=True), Operator(targ)) # op2 qargs=[2, 0] targ = np.dot(mat, np.kron(mat_a,", "Operator(matr + 1j * mati) uni_conj = op.conjugate() self.assertEqual(uni_conj, Operator(matr", "front=True), Operator(targ)) # op3 qargs=[2, 1, 0] targ = np.dot(mat,", "or derivative works of this code must retain this #", "global_phase_equivalent = matrix_equal( op.data, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) # Test decomposition", "op2 = Operator(self.rand_matrix(3, 3)) self.assertRaises(QiskitError, op1._add, op2) def test_multiply(self): \"\"\"Test", "0] targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c))) self.assertEqual(op.dot(op3, qargs=[2, 1,", "op1 qargs=[2] targ = np.dot(mat, np.kron(mat_a, np.eye(4))) self.assertEqual(op.dot(op1, qargs=[2]), Operator(targ))", "Operator(targ)) # op1 qargs=[2] targ = np.dot(mat, np.kron(mat_a, np.eye(4))) self.assertEqual(op.dot(op1,", "self.assertRaises(QiskitError, Operator, mat, input_dims=5) def test_init_operator(self): \"\"\"Test initialization from Operator.\"\"\"", "\"\"\"Test Operator reshape method.\"\"\" op = Operator(self.rand_matrix(8, 8)) reshaped1 =", "def test_init_array(self): \"\"\"Test initialization from array.\"\"\" mat = np.eye(3) op", "mati) uni_t = op.transpose() self.assertEqual(uni_t, Operator(matr.T + 1j * mati.T))", "Operator, mat, input_dims=5) def test_init_operator(self): \"\"\"Test initialization from Operator.\"\"\" op1", "front=True), Operator(targ)) # op1 qargs=[2] targ = np.dot(mat, np.kron(mat_a, np.eye(4)))", "gate lam = np.pi / 4 circuit = QuantumCircuit(2) circuit.cu1(lam,", "(4, 4)) self.assertEqual(Operator(mat, input_dims=[4], output_dims=[4]).dim, (4, 4)) self.assertEqual(Operator(mat, input_dims=[2, 2],", "def test_adjoint(self): \"\"\"Test adjoint method.\"\"\" matr = self.rand_matrix(2, 4, real=True)", "op1 = Operator(self.rand_matrix(4, 4)) op2 = op1.to_operator() self.assertEqual(op1, op2) def", "that they have been altered from the originals. # pylint:", "self.rand_matrix(4, 4) self.assertRaises(QiskitError, Operator, mat, input_dims=[4, 2]) self.assertRaises(QiskitError, Operator, mat,", "Operator(targ)) self.assertEqual(op @ op1([1]), Operator(targ)) # op1 qargs=[2] targ =", "modifications or derivative works of this code must retain this", "Operator(mat_a) op2 = Operator(np.kron(mat_b, mat_a)) op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a)))", "3)) self.assertEqual(op.output_dims(qargs=[0]), (2,)) self.assertEqual(op.output_dims(qargs=[1]), (3,)) self.assertEqual(op.output_dims(qargs=[2]), (4,)) self.assertEqual(op.output_dims(qargs=[0, 2]), (2,", "real=True) mati = self.rand_matrix(2, 4, real=True) op = Operator(matr +", "Operator(np.kron(mat_b, mat_a)) op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a))) # op3 qargs=[0,", "if cols is None: cols = rows if real: return", "psi = rng.rand(n) + 1j * rng.rand(n) rho = np.outer(psi,", "UH = np.array([[1, 1], [1, -1]]) / np.sqrt(2) @classmethod def", "op = Operator(mat) assert_allclose(op.data, mat) self.assertEqual(op.dim, (8, 8)) self.assertEqual(op.input_dims(), (2,", "op1([1]), Operator(targ)) # op1 qargs=[2] targ = np.dot(mat, np.kron(mat_a, np.eye(4)))", "front=True), Operator(targ)) # op2 qargs=[0, 1] targ = np.dot(mat, np.kron(np.eye(2),", "import scipy.linalg as la from qiskit import QiskitError from qiskit", "front=True), Operator(targ)) # op1 qargs=[0] targ = np.dot(mat, np.kron(np.eye(4), mat_a))", "2)) op2 = Operator(self.rand_matrix(3, 3)) self.assertRaises(QiskitError, op1._add, op2) def test_multiply(self):", "= QuantumCircuit(qr) circ.h(qr[0]) circ.x(qr[1]) circ.ry(np.pi / 2, qr[2]) y90 =", "op1([2]), Operator(targ)) def test_compose_front_subsystem(self): \"\"\"Test subsystem front compose method.\"\"\" #", "mat21 = np.kron(mat2, mat1) op21 = Operator(mat1).expand(Operator(mat2)) self.assertEqual(op21.dim, (6, 6))", "exceptions.\"\"\" op = Operator(self.rand_matrix(3, 3)) # Non-integer power raises error", "method.\"\"\" op = Operator(self.rand_matrix(8, 8)) reshaped1 = op.reshape(input_dims=[8], output_dims=[8]) reshaped2", "test_copy(self): \"\"\"Test Operator copy method\"\"\" mat = np.eye(2) with self.subTest(\"Deep", "4) val = np.exp(5j) op = Operator(mat) self.assertEqual(op._multiply(val), Operator(val *", "np.random.RandomState(seed) if cols is None: cols = rows if real:", "op.__rmul__, 's') self.assertRaises(QiskitError, op._multiply, op) self.assertRaises(QiskitError, op.__rmul__, op) def test_negate(self):", "op2 qargs=[2, 0] targ = np.dot(np.kron(mat_a, np.kron(np.eye(2), mat_b)), mat) self.assertEqual(op.compose(op2,", "4]) self.assertEqual(op.input_dims(), (4, 5)) self.assertEqual(op.input_dims(qargs=[0, 1]), (4, 5)) self.assertEqual(op.input_dims(qargs=[1, 0]),", "raises error self.assertRaises(QiskitError, op.power, 0.5) def test_add(self): \"\"\"Test add method.\"\"\"", "def test_compose(self): \"\"\"Test compose method.\"\"\" op1 = Operator(self.UX) op2 =", "= np.eye(3, dtype=complex) mat21 = np.kron(mat2, mat1) op21 = Operator(mat1).expand(Operator(mat2))", "2], output_dims=[2, 4]) self.assertEqual(op.output_dims(), (2, 2, 2)) self.assertEqual(op.input_dims(), (2, 2,", "(5, 4)) self.assertEqual(op.input_dims(qargs=[0]), (4,)) self.assertEqual(op.input_dims(qargs=[1]), (5,)) def test_output_dims(self): \"\"\"Test Operator", "= op.reshape(input_dims=[4, 2], output_dims=[2, 4]) self.assertEqual(op.output_dims(), (2, 2, 2)) self.assertEqual(op.input_dims(),", "self.assertEqual(op.compose(op2, qargs=[0, 1]), Operator(targ)) self.assertEqual(op @ op2([0, 1]), Operator(targ)) #", "np.pi / 4) op = Operator(mat) self.assertTrue(op.equiv(phase * mat)) self.assertTrue(op.equiv(Operator(phase", "* op3([2, 1, 0]), Operator(targ)) # op2 qargs=[0, 1] targ", "You may # obtain a copy of this license in", "def test_power_except(self): \"\"\"Test power method raises exceptions.\"\"\" op = Operator(self.rand_matrix(3,", "from array.\"\"\" mat = np.eye(3) op = Operator(mat) assert_allclose(op.data, mat)", "= CXGate() op = Operator(gate).data target = gate.to_matrix() global_phase_equivalent =", "carry a notice indicating # that they have been altered", "0]), Operator(targ)) # op2 qargs=[0, 1] targ = np.dot(np.kron(np.eye(2), np.kron(mat_b,", "seed = np.random.randint(0, np.iinfo(np.int32).max) logger.debug(\"rand_matrix RandomState seeded with seed=%s\", seed)", "targ = np.dot(mat, np.kron(mat_a, np.eye(4))) self.assertEqual(op.compose(op1, qargs=[2], front=True), Operator(targ)) def", "unitaries UI = np.eye(2) UX = np.array([[0, 1], [1, 0]])", "Test decomposition of Controlled-u1 gate lam = np.pi / 4", "mat_a)) self.assertEqual(op.compose(op1, qargs=[0], front=True), Operator(targ)) # op1 qargs=[1] targ =", "(3,)) mat = self.rand_matrix(2 * 3 * 4, 4 *", "= gate.to_matrix() global_phase_equivalent = matrix_equal(op, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) gate =", "0.0 self.assertFalse(cpy == orig) with self.subTest(\"Shallow copy\"): orig = Operator(mat)", "real=True) op = Operator(matr + 1j * mati) uni_adj =", "\"\"\"Test initialization from Operator.\"\"\" op1 = Operator(self.rand_matrix(4, 4)) op2 =", "QiskitError from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit from qiskit.extensions.standard import", "seed=%s\", seed) rng = np.random.RandomState(seed) psi = rng.rand(n) + 1j", "None: cols = rows if real: return rng.rand(rows, cols) return", "circuit and the corresponding unitary array.\"\"\" qr = QuantumRegister(3) circ", "Operator(targ)) # op2 qargs=[2, 0] targ = np.dot(np.kron(mat_a, np.kron(np.eye(2), mat_b)),", "mat_b))) self.assertEqual(op.dot(op2, qargs=[2, 0]), Operator(targ)) self.assertEqual(op * op2([2, 0]), Operator(targ))", "method.\"\"\" op1 = Operator(self.UX) op2 = Operator(self.UY) targ = Operator(np.dot(self.UY,", "* mat)) def test_multiply_except(self): \"\"\"Test multiply method raises exceptions.\"\"\" op", "the Apache License, Version 2.0. You may # obtain a", "Operator(targ)) self.assertEqual(op * op1([2]), Operator(targ)) def test_compose_front_subsystem(self): \"\"\"Test subsystem front", "of qubit subsystems mat = self.rand_matrix(8, 8) op = Operator(mat)", "mat2)) self.assertEqual(op1 + op2, Operator(mat1 + mat2)) self.assertEqual(op1 - op2,", "(6, 6)) assert_allclose(op12.data, Operator(mat12).data) def test_power_except(self): \"\"\"Test power method raises", "def test_compose_subsystem(self): \"\"\"Test subsystem compose method.\"\"\" # 3-qubit operator mat", "circuit.x(1) circuit.ry(np.pi / 2, 2) op = Operator(circuit) y90 =", "retain this # copyright notice, and modified files need to", "op = Operator(self.rand_matrix(2 * 3 * 4, 4 * 5),", "numpy as np from numpy.testing import assert_allclose import scipy.linalg as", "np.kron(mat_b, mat_a))) self.assertEqual(op.compose(op2, qargs=[0, 1], front=True), Operator(targ)) # op2 qargs=[2,", "5], output_dims=[2, 3, 4]) self.assertEqual(op.input_dims(), (4, 5)) self.assertEqual(op.input_dims(qargs=[0, 1]), (4,", "test_add_except(self): \"\"\"Test add method raises exceptions.\"\"\" op1 = Operator(self.rand_matrix(2, 2))", "np.kron(mat_a, np.eye(4))) self.assertEqual(op.compose(op1, qargs=[2], front=True), Operator(targ)) def test_power(self): \"\"\"Test power", "mat_a))) # op3 qargs=[0, 1, 2] targ = np.dot(np.kron(mat_c, np.kron(mat_b,", "Operator reshape method.\"\"\" op = Operator(self.rand_matrix(8, 8)) reshaped1 = op.reshape(input_dims=[8],", "/= np.trace(rho) return rho @classmethod def rand_matrix(cls, rows, cols=None, real=False):", "with seed=%s\", seed) rng = np.random.RandomState(seed) psi = rng.rand(n) +", "self.assertTrue(global_phase_equivalent) # Test decomposition of Controlled-u1 gate lam = np.pi", "self.rand_matrix(2, 4, real=True) mati = self.rand_matrix(2, 4, real=True) op =", "= np.eye(2) with self.subTest(\"Deep copy\"): orig = Operator(mat) cpy =", "mat = self.rand_matrix(4, 4) self.assertRaises(QiskitError, Operator, mat, input_dims=[4, 2]) self.assertRaises(QiskitError,", "rng.rand(n) rho = np.outer(psi, psi.conj()) rho /= np.trace(rho) return rho", "QuantumCircuit(2) circuit.ch(0, 1) op = Operator(circuit) target = np.kron(self.UI, np.diag([1,", "\"\"\"Test subsystem compose method.\"\"\" # 3-qubit operator mat = self.rand_matrix(8,", "op1 = Operator(self.UY) op2 = Operator(self.UX) targ = Operator(np.dot(self.UY, self.UX))", "= Operator(np.kron(mat_b, mat_a)) op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a))) # op3", "input_dims=[2, 2], output_dims=[2, 2]).dim, (4, 4)) def test_input_dims(self): \"\"\"Test Operator", "self.assertEqual(Operator(np.array(mat, dtype=complex)), Operator(mat)) mat = self.rand_matrix(4, 4) self.assertEqual(Operator(mat.tolist()), Operator(mat)) def", "Operator(matXY)) def test_compose_subsystem(self): \"\"\"Test subsystem compose method.\"\"\" # 3-qubit operator", "# (C) Copyright IBM 2017, 2019. # # This code", "Operator(targ)) # op1 qargs=[1] targ = np.dot(np.kron(np.eye(2), np.kron(mat_a, np.eye(2))), mat)", "matrix_equal( op.data, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) # Test decomposition of Controlled-u1", "self.UH, np.diag([0, 1])) global_phase_equivalent = matrix_equal( op.data, target, ignore_phase=True) self.assertTrue(global_phase_equivalent)", "-1j], [1j, 0]]) UZ = np.diag([1, -1]) UH = np.array([[1,", "self.assertEqual(op2.dot(op1), targ) self.assertEqual(op2 * op1, targ) def test_compose_front(self): \"\"\"Test front", "need to carry a notice indicating # that they have", "circuit = QuantumCircuit(2) circuit.cu1(lam, 0, 1) op = Operator(circuit) target", "1] targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a))) self.assertEqual(op.compose(op2, qargs=[0, 1],", "self.assertEqual(op.output_dims(), (3,)) mat = self.rand_matrix(2 * 3 * 4, 4", "1], [1, -1]]) / np.sqrt(2) @classmethod def rand_rho(cls, n): \"\"\"Return", "op1 qargs=[2] targ = np.dot(mat, np.kron(mat_a, np.eye(4))) self.assertEqual(op.compose(op1, qargs=[2], front=True),", "initialization from N-qubit array.\"\"\" # Test automatic inference of qubit", "self.assertEqual(op.dim, (8, 8)) self.assertEqual(op.input_dims(), (2, 2, 2)) self.assertEqual(op.output_dims(), (2, 2,", "op2 qargs=[0, 1] targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a))) self.assertEqual(op.dot(op2,", "# op1 qargs=[2] targ = np.dot(np.kron(mat_a, np.eye(4)), mat) self.assertEqual(op.compose(op1, qargs=[2]),", "utf-8 -*- # This code is part of Qiskit. #", "la from qiskit import QiskitError from qiskit import QuantumRegister, ClassicalRegister,", "self.assertEqual(op @ op2([0, 1]), Operator(targ)) # op2 qargs=[2, 0] targ", "2] targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a))) self.assertEqual(op.dot(op3, qargs=[0, 1,", "+ np.kron( np.eye(2), np.diag([1, 0])) global_phase_equivalent = matrix_equal(op, target, ignore_phase=True)", "from Operator.\"\"\" op1 = Operator(self.rand_matrix(4, 4)) op2 = Operator(op1) self.assertEqual(op1,", "np.dot(mat, np.kron(mat_a, np.eye(4))) self.assertEqual(op.dot(op1, qargs=[2]), Operator(targ)) self.assertEqual(op * op1([2]), Operator(targ))", "QuantumRegister, ClassicalRegister, QuantumCircuit from qiskit.extensions.standard import HGate, CHGate, CXGate from", "op = Operator(matr + 1j * mati) uni_adj = op.adjoint()", "np.outer(psi, psi.conj()) rho /= np.trace(rho) return rho @classmethod def rand_matrix(cls,", "2)) def test_reshape(self): \"\"\"Test Operator reshape method.\"\"\" op = Operator(self.rand_matrix(8,", "np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c))) self.assertEqual(op.dot(op3, qargs=[2, 1, 0]), Operator(targ)) self.assertEqual(op", "output_dims=8) assert_allclose(op.data, mat) self.assertEqual(op.dim, (8, 8)) self.assertEqual(op.input_dims(), (2, 2, 2))", "rows, cols=None, real=False): \"\"\"Return a random matrix.\"\"\" seed = np.random.randint(0,", "/ 4 circuit = QuantumCircuit(2) circuit.cu1(lam, 0, 1) op =", "self.assertEqual(op.input_dims(qargs=[1, 0]), (5, 4)) self.assertEqual(op.input_dims(qargs=[0]), (4,)) self.assertEqual(op.input_dims(qargs=[1]), (5,)) def test_output_dims(self):", "+ op2, Operator(mat1 + mat2)) self.assertEqual(op1 - op2, Operator(mat1 -", "dim property.\"\"\" mat = self.rand_matrix(4, 4) self.assertEqual(Operator(mat).dim, (4, 4)) self.assertEqual(Operator(mat,", "2] targ = np.dot(np.kron(mat_c, np.kron(mat_b, mat_a)), mat) self.assertEqual(op.compose(op3, qargs=[0, 1,", "= np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b))) self.assertEqual(op.dot(op2, qargs=[2, 0]), Operator(targ)) self.assertEqual(op", "Operator.\"\"\" op1 = Operator(self.rand_matrix(4, 4)) op2 = Operator(op1) self.assertEqual(op1, op2)", "test_instruction_init(self): \"\"\"Test initialization from a circuit.\"\"\" gate = CXGate() op", "self.UY)) self.assertEqual(op2.compose(op1), targ) self.assertEqual(op2 @ op1, targ) def test_dot(self): \"\"\"Test", "front=True), Operator(targ)) def test_power(self): \"\"\"Test power method.\"\"\" X90 = la.expm(-1j", "import logging import copy import numpy as np from numpy.testing", "self.assertEqual(op.input_dims(qargs=[0, 1]), (4, 5)) self.assertEqual(op.input_dims(qargs=[1, 0]), (5, 4)) self.assertEqual(op.input_dims(qargs=[0]), (4,))", "4)) self.assertEqual(reshaped2.input_dims(), (4, 2)) def test_copy(self): \"\"\"Test Operator copy method\"\"\"", "circuit with measurement.\"\"\" qr = QuantumRegister(2) cr = ClassicalRegister(2) circ", "targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2)))) self.assertEqual(op.dot(op1, qargs=[1]), Operator(targ)) self.assertEqual(op", "1], [1, 0]]) UY = np.array([[0, -1j], [1j, 0]]) UZ", "unitary circuit with measurement.\"\"\" qr = QuantumRegister(2) cr = ClassicalRegister(2)", "(6, 6)) assert_allclose(op12.data, Operator(mat12).data) def test_tensor(self): \"\"\"Test tensor method.\"\"\" mat1", "target = np.kron(self.UI, np.diag([1, 0])) + np.kron( self.UH, np.diag([0, 1]))", "= Operator(gate).data had = HGate().to_matrix() target = np.kron(had, np.diag([0, 1]))", "* 3 * 4)) self.assertEqual(op.input_dims(), (4, 5)) self.assertEqual(op.output_dims(), (2, 3,", "6)) assert_allclose(op12.data, Operator(mat12).data) def test_power_except(self): \"\"\"Test power method raises exceptions.\"\"\"", "la.expm(-1j * 0.5 * np.pi * np.array([[0, 1], [1, 0]])", "mat) self.assertEqual(op.compose(op2, qargs=[2, 0]), Operator(targ)) self.assertEqual(op @ op2([2, 0]), Operator(targ))", "= Operator(mat) self.assertEqual(-op, Operator(-1 * mat)) def test_equiv(self): \"\"\"Test negate", "self.assertEqual(op.compose(op2, qargs=[2, 0]), Operator(targ)) self.assertEqual(op @ op2([2, 0]), Operator(targ)) #", "assert_allclose(op21.data, Operator(mat21).data) mat12 = np.kron(mat1, mat2) op12 = Operator(mat1).tensor(Operator(mat2)) self.assertEqual(op12.dim,", "= copy.copy(orig) clone._data[0, 0] = 0.0 self.assertTrue(clone == orig) def", "op = Operator(circuit) y90 = (1 / np.sqrt(2)) * np.array([[1,", "mat) self.assertEqual(op.compose(op1, qargs=[2]), Operator(targ)) self.assertEqual(op @ op1([2]), Operator(targ)) def test_dot_subsystem(self):", "the LICENSE.txt file in the root directory # of this", "real=True) op = Operator(matr + 1j * mati) uni_t =", "1j * mati) uni_conj = op.conjugate() self.assertEqual(uni_conj, Operator(matr - 1j", "0]), Operator(targ)) self.assertEqual(op * op3([2, 1, 0]), Operator(targ)) # op2", "circuit = QuantumCircuit(2) circuit.ch(0, 1) op = Operator(circuit) target =", "seeded with seed=%s\", seed) rng = np.random.RandomState(seed) if cols is", "mat = np.eye(2) with self.subTest(\"Deep copy\"): orig = Operator(mat) cpy", "QuantumCircuit(qr) circ.h(qr[0]) circ.x(qr[1]) circ.ry(np.pi / 2, qr[2]) y90 = (1", "np.exp(5j) op = Operator(mat) self.assertEqual(op._multiply(val), Operator(val * mat)) self.assertEqual(val *", "op = Operator(self.rand_matrix(8, 8)) reshaped1 = op.reshape(input_dims=[8], output_dims=[8]) reshaped2 =", "= np.diag([1, np.exp(1j * np.pi / 2)]) phase = np.exp(-1j", "targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c))) self.assertEqual(op.dot(op3, qargs=[2, 1, 0]),", "reshape method.\"\"\" op = Operator(self.rand_matrix(8, 8)) reshaped1 = op.reshape(input_dims=[8], output_dims=[8])", "exceptions.\"\"\" op = Operator(self.rand_matrix(2, 2)) self.assertRaises(QiskitError, op._multiply, 's') self.assertRaises(QiskitError, op.__rmul__,", "@ op3([0, 1, 2]), Operator(targ)) # op3 qargs=[2, 1, 0]", "np.eye(2)))) self.assertEqual(op.dot(op1, qargs=[1]), Operator(targ)) self.assertEqual(op * op1([1]), Operator(targ)) # op1", "5)) self.assertEqual(op.input_dims(qargs=[1, 0]), (5, 4)) self.assertEqual(op.input_dims(qargs=[0]), (4,)) self.assertEqual(op.input_dims(qargs=[1]), (5,)) def", "np.pi / 2)]) phase = np.exp(-1j * np.pi / 4)", "targ = Operator(np.dot(self.UX, self.UY)) self.assertEqual(op2.compose(op1), targ) self.assertEqual(op2 @ op1, targ)", "op.adjoint() self.assertEqual(uni_adj, Operator(matr.T - 1j * mati.T)) def test_compose_except(self): \"\"\"Test", "@ op1([0]), Operator(targ)) # op1 qargs=[1] targ = np.dot(np.kron(np.eye(2), np.kron(mat_a,", "-1j], [-1j, 0]])) self.assertEqual(op.power(4), Operator(-1 * np.eye(2))) self.assertEqual(op.power(8), Operator(np.eye(2))) def", "mat_a))) self.assertEqual(op.compose(op2, qargs=[0, 1], front=True), Operator(targ)) # op2 qargs=[2, 0]", "= self.rand_matrix(2, 2) mat_b = self.rand_matrix(2, 2) mat_c = self.rand_matrix(2,", "+ 1j * mati) uni_t = op.transpose() self.assertEqual(uni_t, Operator(matr.T +", "real=True) op = Operator(matr + 1j * mati) uni_conj =", "output_dims method.\"\"\" op = Operator(self.rand_matrix(2 * 3 * 4, 4", "\"\"\"Test negate method\"\"\" mat = self.rand_matrix(4, 4) op = Operator(mat)", "2017, 2019. # # This code is licensed under the", "np.kron(mat_a, np.kron(mat_b, mat_c))) self.assertEqual(op.compose(op3, qargs=[2, 1, 0], front=True), Operator(targ)) #", "self.assertEqual(op.output_dims(), (2, 3, 4)) self.assertEqual(op.output_dims(qargs=[0, 1, 2]), (2, 3, 4))", "of this code must retain this # copyright notice, and", "UX = np.array([[0, 1], [1, 0]]) UY = np.array([[0, -1j],", "2)) def test_copy(self): \"\"\"Test Operator copy method\"\"\" mat = np.eye(2)", "lam = np.pi / 4 circuit = QuantumCircuit(2) circuit.cu1(lam, 0,", "\"\"\"Test power method.\"\"\" X90 = la.expm(-1j * 0.5 * np.pi", "mat = self.rand_matrix(2, 2, real=True) self.assertEqual(Operator(np.array(mat, dtype=complex)), Operator(mat)) mat =", "targ = np.dot(np.kron(mat_a, np.eye(4)), mat) self.assertEqual(op.compose(op1, qargs=[2]), Operator(targ)) self.assertEqual(op @", "ignore_phase=True) self.assertTrue(global_phase_equivalent) def test_circuit_init_except(self): \"\"\"Test initialization from circuit with measure", "self.assertEqual(op.input_dims(), (4, 5)) self.assertEqual(op.input_dims(qargs=[0, 1]), (4, 5)) self.assertEqual(op.input_dims(qargs=[1, 0]), (5,", "self.assertEqual(Operator(mat.tolist()), Operator(mat)) def test_data(self): \"\"\"Test Operator representation string property.\"\"\" mat", "false self.assertFalse(Operator([[1, 0], [0, 0]]).is_unitary()) def test_to_operator(self): \"\"\"Test to_operator method.\"\"\"", "target, ignore_phase=True) self.assertTrue(global_phase_equivalent) # Test decomposition of Controlled-u1 gate lam", "inference of qubit subsystems mat = self.rand_matrix(8, 8) op =", "Operator dim property.\"\"\" mat = self.rand_matrix(4, 4) self.assertEqual(Operator(mat).dim, (4, 4))", "test_init_operator(self): \"\"\"Test initialization from Operator.\"\"\" op1 = Operator(self.rand_matrix(4, 4)) op2", "2.0. You may # obtain a copy of this license", "self.assertEqual(op.input_dims(qargs=[0]), (4,)) self.assertEqual(op.input_dims(qargs=[1]), (5,)) def test_output_dims(self): \"\"\"Test Operator output_dims method.\"\"\"", "= np.exp(-1j * np.pi / 4) op = Operator(mat) self.assertTrue(op.equiv(phase", "op2([0, 1]), Operator(targ)) # op2 qargs=[2, 0] targ = np.dot(mat,", "Controlled-u1 gate lam = np.pi / 4 circuit = QuantumCircuit(2)", "property.\"\"\" mat = self.rand_matrix(2, 2) op = Operator(mat) assert_allclose(mat, op.data)", "circuit = QuantumCircuit(3) circuit.h(0) circuit.x(1) circuit.ry(np.pi / 2, 2) op", "np.eye(4)), mat) self.assertEqual(op.compose(op1, qargs=[2]), Operator(targ)) self.assertEqual(op @ op1([2]), Operator(targ)) def", "def test_init_array_qubit(self): \"\"\"Test subsystem initialization from N-qubit array.\"\"\" # Test", "copy method\"\"\" mat = np.eye(2) with self.subTest(\"Deep copy\"): orig =", "return rng.rand(rows, cols) return rng.rand(rows, cols) + 1j * rng.rand(rows,", "operator mat = self.rand_matrix(8, 8) mat_a = self.rand_matrix(2, 2) mat_b", "def test_compose_front_subsystem(self): \"\"\"Test subsystem front compose method.\"\"\" # 3-qubit operator", "method raises exceptions.\"\"\" op = Operator(self.rand_matrix(3, 3)) # Non-integer power", "self.assertRaises(QiskitError, op._multiply, 's') self.assertRaises(QiskitError, op.__rmul__, 's') self.assertRaises(QiskitError, op._multiply, op) self.assertRaises(QiskitError,", "cols=None, real=False): \"\"\"Return a random matrix.\"\"\" seed = np.random.randint(0, np.iinfo(np.int32).max)", "# Test decomposition of Controlled-u1 gate lam = np.pi /", "3, 4]) self.assertEqual(op.input_dims(), (4, 5)) self.assertEqual(op.input_dims(qargs=[0, 1]), (4, 5)) self.assertEqual(op.input_dims(qargs=[1,", "np.kron(mat_c, np.kron(mat_b, mat_a))) self.assertEqual(op.dot(op3, qargs=[0, 1, 2]), Operator(targ)) self.assertEqual(op *", "2)) op = Operator(mat, input_dims=8, output_dims=8) assert_allclose(op.data, mat) self.assertEqual(op.dim, (8,", "= self.rand_matrix(2, 2) op = Operator(mat) op1 = Operator(mat_a) op2", "self.assertEqual(uni_adj, Operator(matr.T - 1j * mati.T)) def test_compose_except(self): \"\"\"Test compose", "np.kron(mat_a, np.kron(mat_b, mat_c))) self.assertEqual(op.dot(op3, qargs=[2, 1, 0]), Operator(targ)) self.assertEqual(op *", "This code is licensed under the Apache License, Version 2.0.", "UZ = np.diag([1, -1]) UH = np.array([[1, 1], [1, -1]])", "CXGate from qiskit.test import QiskitTestCase from qiskit.quantum_info.operators.operator import Operator from", "== orig) with self.subTest(\"Shallow copy\"): orig = Operator(mat) clone =", "mat) self.assertEqual(op.dim, (3, 3)) self.assertEqual(op.input_dims(), (3,)) self.assertEqual(op.output_dims(), (3,)) mat =", "cols) + 1j * rng.rand(rows, cols) def simple_circuit_no_measure(self): \"\"\"Return a", "op, Operator(val * mat)) def test_multiply_except(self): \"\"\"Test multiply method raises", "np.kron(mat_b, mat_a)), mat) self.assertEqual(op.compose(op3, qargs=[0, 1, 2]), Operator(targ)) self.assertEqual(op.compose(op3([0, 1,", "np.diag([1, 1, 1, np.exp(1j * lam)]) global_phase_equivalent = matrix_equal( op.data,", "4 circuit = QuantumCircuit(2) circuit.cu1(lam, 0, 1) op = Operator(circuit)", "Operator\"\"\" # Pauli-matrix unitaries UI = np.eye(2) UX = np.array([[0,", "4)) op2 = op1.to_operator() self.assertEqual(op1, op2) def test_conjugate(self): \"\"\"Test conjugate", "mat_a)) self.assertEqual(op.dot(op1, qargs=[0]), Operator(targ)) self.assertEqual(op * op1([0]), Operator(targ)) # op1", "add method raises exceptions.\"\"\" op1 = Operator(self.rand_matrix(2, 2)) op2 =", "np.exp(1j * lam)]) global_phase_equivalent = matrix_equal( op.data, target, ignore_phase=True) self.assertTrue(global_phase_equivalent)", "QuantumCircuit(2) circuit.cu1(lam, 0, 1) op = Operator(circuit) target = np.diag([1,", "np.kron(mat_b, mat_a))) self.assertEqual(op.dot(op2, qargs=[0, 1]), Operator(targ)) self.assertEqual(op * op2([0, 1]),", "@ op2([0, 1]), Operator(targ)) # op2 qargs=[2, 0] targ =", "Operator(op1) self.assertEqual(op1, op2) def test_circuit_init(self): \"\"\"Test initialization from a circuit.\"\"\"", "np.iinfo(np.int32).max) logger.debug(\"rand_rho RandomState seeded with seed=%s\", seed) rng = np.random.RandomState(seed)", "2) self.assertTrue(Operator(X90).is_unitary()) # Non-unitary should return false self.assertFalse(Operator([[1, 0], [0,", "class.\"\"\" def test_init_array_qubit(self): \"\"\"Test subsystem initialization from N-qubit array.\"\"\" #", "4, real=True) op = Operator(matr + 1j * mati) uni_adj", "mat = np.eye(3) op = Operator(mat) assert_allclose(op.data, mat) self.assertEqual(op.dim, (3,", "part of Qiskit. # # (C) Copyright IBM 2017, 2019.", "self.assertFalse(Operator([[1, 0], [0, 0]]).is_unitary()) def test_to_operator(self): \"\"\"Test to_operator method.\"\"\" op1", "mat = self.rand_matrix(4, 4) val = np.exp(5j) op = Operator(mat)", "simple_circuit_no_measure(self): \"\"\"Return a unitary circuit and the corresponding unitary array.\"\"\"", "def rand_matrix(cls, rows, cols=None, real=False): \"\"\"Return a random matrix.\"\"\" seed", "Operator(targ)) self.assertEqual(op * op3([2, 1, 0]), Operator(targ)) # op2 qargs=[0,", "def test_dim(self): \"\"\"Test Operator dim property.\"\"\" mat = self.rand_matrix(4, 4)", "if real: return rng.rand(rows, cols) return rng.rand(rows, cols) + 1j", "2019. # # This code is licensed under the Apache", "qargs=[0, 1, 2]), Operator(targ)) self.assertEqual(op.compose(op3([0, 1, 2])), Operator(targ)) self.assertEqual(op @", "Operator(targ)) # op1 qargs=[2] targ = np.dot(mat, np.kron(mat_a, np.eye(4))) self.assertEqual(op.compose(op1,", "source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or", "(3, 3)) self.assertEqual(op.input_dims(), (3,)) self.assertEqual(op.output_dims(), (3,)) mat = self.rand_matrix(2 *", "code must retain this # copyright notice, and modified files", "targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a))) self.assertEqual(op.compose(op3, qargs=[0, 1, 2],", "np.kron(mat_a, np.eye(2))), mat) self.assertEqual(op.compose(op1, qargs=[1]), Operator(targ)) self.assertEqual(op @ op1([1]), Operator(targ))", "1, 0]), (4, 3, 2)) self.assertEqual(op.output_dims(qargs=[2, 0, 1]), (4, 2,", "Operator(mat) op1 = Operator(mat_a) op2 = Operator(np.kron(mat_b, mat_a)) op3 =", "np.kron(np.eye(2), np.kron(mat_a, np.eye(2)))) self.assertEqual(op.dot(op1, qargs=[1]), Operator(targ)) self.assertEqual(op * op1([1]), Operator(targ))", "QuantumCircuit(3) circuit.h(0) circuit.x(1) circuit.ry(np.pi / 2, 2) op = Operator(circuit)", "2, qr[2]) y90 = (1 / np.sqrt(2)) * np.array([[1, -1],", "\"\"\"Test subsystem initialization from N-qubit array.\"\"\" # Test automatic inference", "1, 0] targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c))) self.assertEqual(op.compose(op3, qargs=[2,", "# -*- coding: utf-8 -*- # This code is part", "mati)) def test_transpose(self): \"\"\"Test transpose method.\"\"\" matr = self.rand_matrix(2, 4,", "class.\"\"\" import unittest import logging import copy import numpy as", "op1([0]), Operator(targ)) # op1 qargs=[1] targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a,", "self.assertEqual(Operator(mat, input_dims=[4], output_dims=[4]).dim, (4, 4)) self.assertEqual(Operator(mat, input_dims=[2, 2], output_dims=[2, 2]).dim,", "with measurement.\"\"\" qr = QuantumRegister(2) cr = ClassicalRegister(2) circ =", "op2 = Operator(np.kron(mat_b, mat_a)) op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a))) #", "self.assertEqual(op.compose(op3, qargs=[0, 1, 2], front=True), Operator(targ)) # op3 qargs=[2, 1,", "self.assertEqual(reshaped1.output_dims(), (8,)) self.assertEqual(reshaped1.input_dims(), (8,)) self.assertEqual(reshaped2.output_dims(), (2, 4)) self.assertEqual(reshaped2.input_dims(), (4, 2))", "= Operator(op1) self.assertEqual(op1, op2) def test_circuit_init(self): \"\"\"Test initialization from a", "transpose method.\"\"\" matr = self.rand_matrix(2, 4, real=True) mati = self.rand_matrix(2,", "raises exceptions.\"\"\" op1 = Operator(self.rand_matrix(2, 2)) op2 = Operator(self.rand_matrix(3, 3))", "0, 1) op = Operator(circuit) target = np.diag([1, 1, 1,", "self.assertEqual(op1 * op2, targ) targ = Operator(np.dot(self.UX, self.UY)) self.assertEqual(op2.dot(op1), targ)", "input_dims=8, output_dims=8) assert_allclose(op.data, mat) self.assertEqual(op.dim, (8, 8)) self.assertEqual(op.input_dims(), (2, 2,", "5)) self.assertEqual(op.input_dims(qargs=[0, 1]), (4, 5)) self.assertEqual(op.input_dims(qargs=[1, 0]), (5, 4)) self.assertEqual(op.input_dims(qargs=[0]),", "3, 4)) def test_init_array_except(self): \"\"\"Test initialization exception from array.\"\"\" mat", "mat_c))) self.assertEqual(op.compose(op3, qargs=[2, 1, 0], front=True), Operator(targ)) # op2 qargs=[0,", "= QuantumRegister(3) circ = QuantumCircuit(qr) circ.h(qr[0]) circ.x(qr[1]) circ.ry(np.pi / 2,", "/ 2, qr[2]) y90 = (1 / np.sqrt(2)) * np.array([[1,", "np.eye(3, dtype=complex) mat21 = np.kron(mat2, mat1) op21 = Operator(mat2).tensor(Operator(mat1)) self.assertEqual(op21.dim,", "def test_circuit_init(self): \"\"\"Test initialization from a circuit.\"\"\" # Test tensor", "mat = self.rand_matrix(4, 4) self.assertEqual(Operator(mat.tolist()), Operator(mat)) def test_data(self): \"\"\"Test Operator", "5], output_dims=[2, 3, 4]) self.assertEqual(op.output_dims(), (2, 3, 4)) self.assertEqual(op.output_dims(qargs=[0, 1,", "# op3 qargs=[0, 1, 2] targ = np.dot(np.kron(mat_c, np.kron(mat_b, mat_a)),", "= op1.to_operator() self.assertEqual(op1, op2) def test_conjugate(self): \"\"\"Test conjugate method.\"\"\" matr", "= np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a))) self.assertEqual(op.dot(op3, qargs=[0, 1, 2]), Operator(targ))", "subsystem front compose method.\"\"\" # 3-qubit operator mat = self.rand_matrix(8,", "1]), Operator(targ)) # op2 qargs=[2, 0] targ = np.dot(np.kron(mat_a, np.kron(np.eye(2),", "qiskit.test import QiskitTestCase from qiskit.quantum_info.operators.operator import Operator from qiskit.quantum_info.operators.predicates import", "2]), (2, 3, 4)) self.assertEqual(op.output_dims(qargs=[2, 1, 0]), (4, 3, 2))", "Operator(mat2) self.assertEqual(op1._add(op2), Operator(mat1 + mat2)) self.assertEqual(op1 + op2, Operator(mat1 +", "code is part of Qiskit. # # (C) Copyright IBM", "Operator(np.dot(self.UX, self.UY)) self.assertEqual(op2.dot(op1), targ) self.assertEqual(op2 * op1, targ) def test_compose_front(self):", "op = Operator(X90) self.assertEqual(op.power(2), Operator([[0, -1j], [-1j, 0]])) self.assertEqual(op.power(4), Operator(-1", "1, 2], front=True), Operator(targ)) # op3 qargs=[2, 1, 0] targ", "= np.kron(mat1, mat2) op12 = Operator(mat2).expand(Operator(mat1)) self.assertEqual(op12.dim, (6, 6)) assert_allclose(op12.data,", "of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any", "targ = np.dot(np.kron(mat_a, np.kron(mat_b, mat_c)), mat) self.assertEqual(op.compose(op3, qargs=[2, 1, 0]),", "np.dot(np.kron(np.eye(2), np.kron(mat_a, np.eye(2))), mat) self.assertEqual(op.compose(op1, qargs=[1]), Operator(targ)) self.assertEqual(op @ op1([1]),", "= np.outer(psi, psi.conj()) rho /= np.trace(rho) return rho @classmethod def", "= self.simple_circuit_with_measure() self.assertRaises(QiskitError, Operator, circuit) def test_equal(self): \"\"\"Test __eq__ method\"\"\"", "op = Operator(mat) assert_allclose(mat, op.data) def test_dim(self): \"\"\"Test Operator dim", "2)) self.assertEqual(op.output_dims(), (2, 2, 2)) def test_init_array(self): \"\"\"Test initialization from", "orig.copy() cpy._data[0, 0] = 0.0 self.assertFalse(cpy == orig) with self.subTest(\"Shallow", "array.\"\"\" # Test automatic inference of qubit subsystems mat =", "ignore_phase=True) self.assertTrue(global_phase_equivalent) # Test decomposition of controlled-H gate circuit =", "\"\"\"Return a random matrix.\"\"\" seed = np.random.randint(0, np.iinfo(np.int32).max) logger.debug(\"rand_matrix RandomState", "method\"\"\" mat = np.eye(2) with self.subTest(\"Deep copy\"): orig = Operator(mat)", "linear operator class.\"\"\" def test_init_array_qubit(self): \"\"\"Test subsystem initialization from N-qubit", "= np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c))) self.assertEqual(op.dot(op3, qargs=[2, 1, 0]), Operator(targ))", "def test_transpose(self): \"\"\"Test transpose method.\"\"\" matr = self.rand_matrix(2, 4, real=True)", "\"\"\"Test add method raises exceptions.\"\"\" op1 = Operator(self.rand_matrix(2, 2)) op2", "orig) with self.subTest(\"Shallow copy\"): orig = Operator(mat) clone = copy.copy(orig)", "density matrix\"\"\" seed = np.random.randint(0, np.iinfo(np.int32).max) logger.debug(\"rand_rho RandomState seeded with", "qargs=[0, 1, 2] targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a))) self.assertEqual(op.dot(op3,", "test_adjoint(self): \"\"\"Test adjoint method.\"\"\" matr = self.rand_matrix(2, 4, real=True) mati", "= Operator(gate).data target = gate.to_matrix() global_phase_equivalent = matrix_equal(op, target, ignore_phase=True)", "* mati) uni_conj = op.conjugate() self.assertEqual(uni_conj, Operator(matr - 1j *", "def test_add_except(self): \"\"\"Test add method raises exceptions.\"\"\" op1 = Operator(self.rand_matrix(2,", "Non-integer power raises error self.assertRaises(QiskitError, op.power, 0.5) def test_add(self): \"\"\"Test", "* np.pi * np.array([[0, 1], [1, 0]]) / 2) self.assertTrue(Operator(X90).is_unitary())", "= Operator(circuit) target = np.kron(self.UI, np.diag([1, 0])) + np.kron( self.UH,", "op1 qargs=[0] targ = np.dot(np.kron(np.eye(4), mat_a), mat) self.assertEqual(op.compose(op1, qargs=[0]), Operator(targ))", "def test_expand(self): \"\"\"Test expand method.\"\"\" mat1 = self.UX mat2 =", "qargs=[2, 1, 0] targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c))) self.assertEqual(op.compose(op3,", "# Any modifications or derivative works of this code must", "np.iinfo(np.int32).max) logger.debug(\"rand_matrix RandomState seeded with seed=%s\", seed) rng = np.random.RandomState(seed)", "Operator([[0, -1j], [-1j, 0]])) self.assertEqual(op.power(4), Operator(-1 * np.eye(2))) self.assertEqual(op.power(8), Operator(np.eye(2)))", "op2 qargs=[2, 0] targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b))) self.assertEqual(op.dot(op2,", "+ 1j * mati) uni_conj = op.conjugate() self.assertEqual(uni_conj, Operator(matr -", "self.assertEqual(op.output_dims(qargs=[2, 0, 1]), (4, 2, 3)) self.assertEqual(op.output_dims(qargs=[0]), (2,)) self.assertEqual(op.output_dims(qargs=[1]), (3,))", "qargs=[2] targ = np.dot(mat, np.kron(mat_a, np.eye(4))) self.assertEqual(op.dot(op1, qargs=[2]), Operator(targ)) self.assertEqual(op", "self.assertRaises(QiskitError, op.power, 0.5) def test_add(self): \"\"\"Test add method.\"\"\" mat1 =", "for Operator\"\"\" # Pauli-matrix unitaries UI = np.eye(2) UX =", "dtype=complex) mat21 = np.kron(mat2, mat1) op21 = Operator(mat2).tensor(Operator(mat1)) self.assertEqual(op21.dim, (6,", "4]) self.assertRaises(QiskitError, Operator, mat, input_dims=5) def test_init_operator(self): \"\"\"Test initialization from", "mat_c)), mat) self.assertEqual(op.compose(op3, qargs=[2, 1, 0]), Operator(targ)) self.assertEqual(op @ op3([2,", "test_dot_subsystem(self): \"\"\"Test subsystem dot method.\"\"\" # 3-qubit operator mat =", "Operator(mat, input_dims=8, output_dims=8) assert_allclose(op.data, mat) self.assertEqual(op.dim, (8, 8)) self.assertEqual(op.input_dims(), (2,", "op.power, 0.5) def test_add(self): \"\"\"Test add method.\"\"\" mat1 = self.rand_matrix(4,", "self.assertEqual(op @ op3([0, 1, 2]), Operator(targ)) # op3 qargs=[2, 1,", "self.assertEqual(op @ op2([2, 0]), Operator(targ)) # op1 qargs=[0] targ =", "op._multiply, op) self.assertRaises(QiskitError, op.__rmul__, op) def test_negate(self): \"\"\"Test negate method\"\"\"", "corresponding unitary array.\"\"\" qr = QuantumRegister(3) circ = QuantumCircuit(qr) circ.h(qr[0])", "assert_allclose(op.data, mat) self.assertEqual(op.dim, (4 * 5, 2 * 3 *", "exceptions.\"\"\" op1 = Operator(self.rand_matrix(2, 2)) op2 = Operator(self.rand_matrix(3, 3)) self.assertRaises(QiskitError,", "op2 qargs=[0, 1] targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a))) self.assertEqual(op.compose(op2,", "np.pi / 4 circuit = QuantumCircuit(2) circuit.cu1(lam, 0, 1) op", "* 4)) self.assertEqual(op.input_dims(), (4, 5)) self.assertEqual(op.output_dims(), (2, 3, 4)) def", "dot method.\"\"\" op1 = Operator(self.UY) op2 = Operator(self.UX) targ =", "1j * mati) uni_t = op.transpose() self.assertEqual(uni_t, Operator(matr.T + 1j", "mat = self.rand_matrix(2, 2) op = Operator(mat) assert_allclose(mat, op.data) def", "test_equiv(self): \"\"\"Test negate method\"\"\" mat = np.diag([1, np.exp(1j * np.pi", "self.assertEqual(op2 @ op1, targ) def test_dot(self): \"\"\"Test dot method.\"\"\" op1", "return rng.rand(rows, cols) + 1j * rng.rand(rows, cols) def simple_circuit_no_measure(self):", "1]), (4, 5)) self.assertEqual(op.input_dims(qargs=[1, 0]), (5, 4)) self.assertEqual(op.input_dims(qargs=[0]), (4,)) self.assertEqual(op.input_dims(qargs=[1]),", "op.data) def test_dim(self): \"\"\"Test Operator dim property.\"\"\" mat = self.rand_matrix(4,", "-*- # This code is part of Qiskit. # #", "Operator(self.rand_matrix(3, 3)) self.assertRaises(QiskitError, op1._add, op2) def test_multiply(self): \"\"\"Test multiply method.\"\"\"", "2])), Operator(targ)) self.assertEqual(op @ op3([0, 1, 2]), Operator(targ)) # op3", "op.conjugate() self.assertEqual(uni_conj, Operator(matr - 1j * mati)) def test_transpose(self): \"\"\"Test", "= CHGate() op = Operator(gate).data had = HGate().to_matrix() target =", "test_equal(self): \"\"\"Test __eq__ method\"\"\" mat = self.rand_matrix(2, 2, real=True) self.assertEqual(Operator(np.array(mat,", "= np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a))) self.assertEqual(op.dot(op2, qargs=[0, 1]), Operator(targ)) self.assertEqual(op", "qargs=[2] targ = np.dot(mat, np.kron(mat_a, np.eye(4))) self.assertEqual(op.compose(op1, qargs=[2], front=True), Operator(targ))", "def test_power(self): \"\"\"Test power method.\"\"\" X90 = la.expm(-1j * 0.5", "Operator(self.rand_matrix(4, 4)) op2 = op1.to_operator() self.assertEqual(op1, op2) def test_conjugate(self): \"\"\"Test", "controlled-H gate circuit = QuantumCircuit(2) circuit.ch(0, 1) op = Operator(circuit)", "4]) self.assertEqual(op.output_dims(), (2, 3, 4)) self.assertEqual(op.output_dims(qargs=[0, 1, 2]), (2, 3,", "= self.rand_matrix(2, 4, real=True) mati = self.rand_matrix(2, 4, real=True) op", "target = np.kron(had, np.diag([0, 1])) + np.kron( np.eye(2), np.diag([1, 0]))", "2]), (2, 4)) self.assertEqual(op.output_dims(qargs=[2, 0]), (4, 2)) def test_reshape(self): \"\"\"Test", "4 * 5), input_dims=[4, 5], output_dims=[2, 3, 4]) self.assertEqual(op.output_dims(), (2,", "np.exp(-1j * np.pi / 4) op = Operator(mat) self.assertTrue(op.equiv(phase *", "qargs=[2, 0], front=True), Operator(targ)) # op1 qargs=[0] targ = np.dot(mat,", "# Non-unitary should return false self.assertFalse(Operator([[1, 0], [0, 0]]).is_unitary()) def", "* np.array([[0, 1], [1, 0]]) / 2) op = Operator(X90)", "circ, target def simple_circuit_with_measure(self): \"\"\"Return a unitary circuit with measurement.\"\"\"", "derivative works of this code must retain this # copyright", "np.eye(4))) self.assertEqual(op.compose(op1, qargs=[2], front=True), Operator(targ)) def test_power(self): \"\"\"Test power method.\"\"\"", "mat, input_dims=[4, 2]) self.assertRaises(QiskitError, Operator, mat, input_dims=[2, 4]) self.assertRaises(QiskitError, Operator,", "* op1([2]), Operator(targ)) def test_compose_front_subsystem(self): \"\"\"Test subsystem front compose method.\"\"\"", "self.assertRaises(QiskitError, Operator(np.eye(2)).compose, 2) def test_compose(self): \"\"\"Test compose method.\"\"\" op1 =", "\"\"\"Test expand method.\"\"\" mat1 = self.UX mat2 = np.eye(3, dtype=complex)", "2)) self.assertEqual(op.output_dims(), (2, 2, 2)) op = Operator(mat, input_dims=8, output_dims=8)", "Test automatic inference of qubit subsystems mat = self.rand_matrix(8, 8)", "gate circuit = QuantumCircuit(2) circuit.ch(0, 1) op = Operator(circuit) target", "self.assertEqual(op.output_dims(), (2, 2, 2)) def test_init_array(self): \"\"\"Test initialization from array.\"\"\"", "Operator(self.rand_matrix(2, 2)) op2 = Operator(self.rand_matrix(3, 3)) self.assertRaises(QiskitError, op1._add, op2) def", "gate = CHGate() op = Operator(gate).data had = HGate().to_matrix() target", "mat1) op21 = Operator(mat2).tensor(Operator(mat1)) self.assertEqual(op21.dim, (6, 6)) assert_allclose(op21.data, Operator(mat21).data) mat12", "(5,)) def test_output_dims(self): \"\"\"Test Operator output_dims method.\"\"\" op = Operator(self.rand_matrix(2", "[1, -1]]) / np.sqrt(2) @classmethod def rand_rho(cls, n): \"\"\"Return random", "op = Operator(gate).data had = HGate().to_matrix() target = np.kron(had, np.diag([0,", "self.assertEqual(reshaped2.output_dims(), (2, 4)) self.assertEqual(reshaped2.input_dims(), (4, 2)) def test_copy(self): \"\"\"Test Operator", "0]), (4, 3, 2)) self.assertEqual(op.output_dims(qargs=[2, 0, 1]), (4, 2, 3))", "with self.subTest(\"Deep copy\"): orig = Operator(mat) cpy = orig.copy() cpy._data[0,", "qargs=[2, 0] targ = np.dot(np.kron(mat_a, np.kron(np.eye(2), mat_b)), mat) self.assertEqual(op.compose(op2, qargs=[2,", "HGate, CHGate, CXGate from qiskit.test import QiskitTestCase from qiskit.quantum_info.operators.operator import", "self.assertEqual(op.output_dims(qargs=[0]), (2,)) self.assertEqual(op.output_dims(qargs=[1]), (3,)) self.assertEqual(op.output_dims(qargs=[2]), (4,)) self.assertEqual(op.output_dims(qargs=[0, 2]), (2, 4))", "file in the root directory # of this source tree", "3-qubit operator mat = self.rand_matrix(8, 8) mat_a = self.rand_matrix(2, 2)", "2)]) phase = np.exp(-1j * np.pi / 4) op =", "return rho @classmethod def rand_matrix(cls, rows, cols=None, real=False): \"\"\"Return a", "# op3 qargs=[2, 1, 0] targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b,", "3 * 4)) self.assertEqual(op.input_dims(), (4, 5)) self.assertEqual(op.output_dims(), (2, 3, 4))", "mat) self.assertEqual(op.compose(op3, qargs=[0, 1, 2]), Operator(targ)) self.assertEqual(op.compose(op3([0, 1, 2])), Operator(targ))", "opXY = Operator(self.UX).compose(Operator(self.UY), front=True) matXY = np.dot(self.UX, self.UY) self.assertEqual(opXY, Operator(matXY))", "Operator(targ)) self.assertEqual(op @ op1([2]), Operator(targ)) def test_dot_subsystem(self): \"\"\"Test subsystem dot", "Operator(-1 * mat)) def test_equiv(self): \"\"\"Test negate method\"\"\" mat =", "add method.\"\"\" mat1 = self.rand_matrix(4, 4) mat2 = self.rand_matrix(4, 4)", "gate = CXGate() op = Operator(gate).data target = gate.to_matrix() global_phase_equivalent", "Operator(mat1 + mat2)) self.assertEqual(op1 - op2, Operator(mat1 - mat2)) def", "0]]).is_unitary()) def test_to_operator(self): \"\"\"Test to_operator method.\"\"\" op1 = Operator(self.rand_matrix(4, 4))", "0]]) UZ = np.diag([1, -1]) UH = np.array([[1, 1], [1,", "3 * 4, 4 * 5), input_dims=[4, 5], output_dims=[2, 3,", "method raises exceptions.\"\"\" op = Operator(self.rand_matrix(2, 2)) self.assertRaises(QiskitError, op._multiply, 's')", "= Operator(mat) clone = copy.copy(orig) clone._data[0, 0] = 0.0 self.assertTrue(clone", "qr = QuantumRegister(2) cr = ClassicalRegister(2) circ = QuantumCircuit(qr, cr)", "Operator(mat1 + mat2)) self.assertEqual(op1 + op2, Operator(mat1 + mat2)) self.assertEqual(op1", "opYX = Operator(self.UY).compose(Operator(self.UX), front=True) matYX = np.dot(self.UY, self.UX) self.assertEqual(opYX, Operator(matYX))", "def test_compose_front(self): \"\"\"Test front compose method.\"\"\" opYX = Operator(self.UY).compose(Operator(self.UX), front=True)", "IBM 2017, 2019. # # This code is licensed under", "0, 1]), (4, 2, 3)) self.assertEqual(op.output_dims(qargs=[0]), (2,)) self.assertEqual(op.output_dims(qargs=[1]), (3,)) self.assertEqual(op.output_dims(qargs=[2]),", "Operator(np.eye(3))) self.assertRaises(QiskitError, Operator(np.eye(2)).compose, 2) def test_compose(self): \"\"\"Test compose method.\"\"\" op1", "is_unitary method.\"\"\" # X-90 rotation X90 = la.expm(-1j * 0.5", "4, real=True) op = Operator(matr + 1j * mati) uni_t", "mat)) self.assertEqual(val * op, Operator(val * mat)) def test_multiply_except(self): \"\"\"Test", "test_reshape(self): \"\"\"Test Operator reshape method.\"\"\" op = Operator(self.rand_matrix(8, 8)) reshaped1", "mat = self.rand_matrix(4, 4) op = Operator(mat) self.assertEqual(-op, Operator(-1 *", "qargs=[0]), Operator(targ)) self.assertEqual(op @ op1([0]), Operator(targ)) # op1 qargs=[1] targ", "0] targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c))) self.assertEqual(op.compose(op3, qargs=[2, 1,", "4) mat2 = self.rand_matrix(4, 4) op1 = Operator(mat1) op2 =", "self.assertEqual(op.input_dims(), (2, 2, 2)) self.assertEqual(reshaped1.output_dims(), (8,)) self.assertEqual(reshaped1.input_dims(), (8,)) self.assertEqual(reshaped2.output_dims(), (2,", "rho = np.outer(psi, psi.conj()) rho /= np.trace(rho) return rho @classmethod", "Any modifications or derivative works of this code must retain", "Operator(self.rand_matrix(4, 4)) op2 = Operator(op1) self.assertEqual(op1, op2) def test_circuit_init(self): \"\"\"Test", "and modified files need to carry a notice indicating #", "self.assertEqual(op * op2([0, 1]), Operator(targ)) # op2 qargs=[2, 0] targ", "test_expand(self): \"\"\"Test expand method.\"\"\" mat1 = self.UX mat2 = np.eye(3,", "mat21 = np.kron(mat2, mat1) op21 = Operator(mat2).tensor(Operator(mat1)) self.assertEqual(op21.dim, (6, 6))", "/ 2) self.assertTrue(Operator(X90).is_unitary()) # Non-unitary should return false self.assertFalse(Operator([[1, 0],", "Operator(targ)) def test_power(self): \"\"\"Test power method.\"\"\" X90 = la.expm(-1j *", "from qiskit.test import QiskitTestCase from qiskit.quantum_info.operators.operator import Operator from qiskit.quantum_info.operators.predicates", "mat) self.assertEqual(op.compose(op1, qargs=[0]), Operator(targ)) self.assertEqual(op @ op1([0]), Operator(targ)) # op1", "test_to_operator(self): \"\"\"Test to_operator method.\"\"\" op1 = Operator(self.rand_matrix(4, 4)) op2 =", "seed = np.random.randint(0, np.iinfo(np.int32).max) logger.debug(\"rand_rho RandomState seeded with seed=%s\", seed)", "def test_instruction_init(self): \"\"\"Test initialization from a circuit.\"\"\" gate = CXGate()", "qargs=[2]), Operator(targ)) self.assertEqual(op @ op1([2]), Operator(targ)) def test_dot_subsystem(self): \"\"\"Test subsystem", "op1 = Operator(mat_a) op2 = Operator(np.kron(mat_b, mat_a)) op3 = Operator(np.kron(mat_c,", "input_dims=[4, 5], output_dims=[2, 3, 4]) self.assertEqual(op.input_dims(), (4, 5)) self.assertEqual(op.input_dims(qargs=[0, 1]),", "compose method.\"\"\" op1 = Operator(self.UX) op2 = Operator(self.UY) targ =", "np.array([[0, 1], [1, 0]]) UY = np.array([[0, -1j], [1j, 0]])", "class OperatorTestCase(QiskitTestCase): \"\"\"Test utils for Operator\"\"\" # Pauli-matrix unitaries UI", "targ) self.assertEqual(op1 * op2, targ) targ = Operator(np.dot(self.UX, self.UY)) self.assertEqual(op2.dot(op1),", "self.assertEqual(op.output_dims(qargs=[0, 1, 2]), (2, 3, 4)) self.assertEqual(op.output_dims(qargs=[2, 1, 0]), (4,", "6)) assert_allclose(op21.data, Operator(mat21).data) mat12 = np.kron(mat1, mat2) op12 = Operator(mat1).tensor(Operator(mat2))", "= np.dot(np.kron(mat_a, np.eye(4)), mat) self.assertEqual(op.compose(op1, qargs=[2]), Operator(targ)) self.assertEqual(op @ op1([2]),", "uni_conj = op.conjugate() self.assertEqual(uni_conj, Operator(matr - 1j * mati)) def", "self.assertTrue(global_phase_equivalent) # Test decomposition of controlled-H gate circuit = QuantumCircuit(2)", "op2 = Operator(mat2) self.assertEqual(op1._add(op2), Operator(mat1 + mat2)) self.assertEqual(op1 + op2,", "Operator(targ)) self.assertEqual(op * op3([0, 1, 2]), Operator(targ)) # op3 qargs=[2,", "ClassicalRegister, QuantumCircuit from qiskit.extensions.standard import HGate, CHGate, CXGate from qiskit.test", "unittest import logging import copy import numpy as np from", "(2, 3, 4)) def test_init_array_except(self): \"\"\"Test initialization exception from array.\"\"\"", "op1, targ) def test_compose_front(self): \"\"\"Test front compose method.\"\"\" opYX =", "* op1([0]), Operator(targ)) # op1 qargs=[1] targ = np.dot(mat, np.kron(np.eye(2),", "\"\"\"Test subsystem dot method.\"\"\" # 3-qubit operator mat = self.rand_matrix(8,", "array.\"\"\" qr = QuantumRegister(3) circ = QuantumCircuit(qr) circ.h(qr[0]) circ.x(qr[1]) circ.ry(np.pi", "= op.conjugate() self.assertEqual(uni_conj, Operator(matr - 1j * mati)) def test_transpose(self):", "test_transpose(self): \"\"\"Test transpose method.\"\"\" matr = self.rand_matrix(2, 4, real=True) mati", "0]), (4, 2)) def test_reshape(self): \"\"\"Test Operator reshape method.\"\"\" op", "= Operator(self.rand_matrix(4, 4)) op2 = Operator(op1) self.assertEqual(op1, op2) def test_circuit_init(self):", "test_tensor(self): \"\"\"Test tensor method.\"\"\" mat1 = self.UX mat2 = np.eye(3,", "1], [1, 0]]) / 2) self.assertTrue(Operator(X90).is_unitary()) # Non-unitary should return", "1j * mati.T)) def test_compose_except(self): \"\"\"Test compose different dimension exception\"\"\"", "Operator matrix linear operator class.\"\"\" import unittest import logging import", "(4, 4)) self.assertEqual(Operator(mat, input_dims=[2, 2], output_dims=[2, 2]).dim, (4, 4)) def", "= np.dot(mat, np.kron(mat_a, np.eye(4))) self.assertEqual(op.dot(op1, qargs=[2]), Operator(targ)) self.assertEqual(op * op1([2]),", "= matrix_equal(op, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) gate = CHGate() op =", "self.assertEqual(op * op3([0, 1, 2]), Operator(targ)) # op3 qargs=[2, 1,", "this license in the LICENSE.txt file in the root directory", "logging import copy import numpy as np from numpy.testing import", "import numpy as np from numpy.testing import assert_allclose import scipy.linalg", "\"\"\"Test tensor method.\"\"\" mat1 = self.UX mat2 = np.eye(3, dtype=complex)", "def test_output_dims(self): \"\"\"Test Operator output_dims method.\"\"\" op = Operator(self.rand_matrix(2 *", "self.assertTrue(global_phase_equivalent) def test_instruction_init(self): \"\"\"Test initialization from a circuit.\"\"\" gate =", "\"\"\"Test conjugate method.\"\"\" matr = self.rand_matrix(2, 4, real=True) mati =", "rho /= np.trace(rho) return rho @classmethod def rand_matrix(cls, rows, cols=None,", "self.assertEqual(op.output_dims(), (2, 2, 2)) op = Operator(mat, input_dims=8, output_dims=8) assert_allclose(op.data,", "test_compose_subsystem(self): \"\"\"Test subsystem compose method.\"\"\" # 3-qubit operator mat =", "from qiskit.quantum_info.operators.predicates import matrix_equal logger = logging.getLogger(__name__) class OperatorTestCase(QiskitTestCase): \"\"\"Test", "np.kron(self.UX, self.UH))) return circ, target def simple_circuit_with_measure(self): \"\"\"Return a unitary", "np.kron(np.eye(2), mat_b))) self.assertEqual(op.dot(op2, qargs=[2, 0]), Operator(targ)) self.assertEqual(op * op2([2, 0]),", "0], front=True), Operator(targ)) # op2 qargs=[0, 1] targ = np.dot(mat,", "1]]) target = np.kron(y90, np.kron(self.UX, self.UH)) global_phase_equivalent = matrix_equal( op.data,", "Operator(np.eye(2))) def test_expand(self): \"\"\"Test expand method.\"\"\" mat1 = self.UX mat2", "self.assertEqual(op * op3([2, 1, 0]), Operator(targ)) # op2 qargs=[0, 1]", "Operator linear operator class.\"\"\" def test_init_array_qubit(self): \"\"\"Test subsystem initialization from", "= self.rand_matrix(2, 2) op = Operator(mat) assert_allclose(mat, op.data) def test_dim(self):", "4, 4 * 5), input_dims=[4, 5], output_dims=[2, 3, 4]) self.assertEqual(op.output_dims(),", "np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c))) self.assertEqual(op.compose(op3, qargs=[2, 1, 0], front=True), Operator(targ))", "op = Operator(mat) assert_allclose(op.data, mat) self.assertEqual(op.dim, (3, 3)) self.assertEqual(op.input_dims(), (3,))", "= Operator(mat) self.assertTrue(op.equiv(phase * mat)) self.assertTrue(op.equiv(Operator(phase * mat))) self.assertFalse(op.equiv(2 *", "= QuantumCircuit(2) circuit.cu1(lam, 0, 1) op = Operator(circuit) target =", "np.diag([0, 1])) + np.kron( np.eye(2), np.diag([1, 0])) global_phase_equivalent = matrix_equal(op,", "initialization from circuit with measure raises exception.\"\"\" circuit = self.simple_circuit_with_measure()", "op2, targ) targ = Operator(np.dot(self.UX, self.UY)) self.assertEqual(op2.dot(op1), targ) self.assertEqual(op2 *", "Operator(targ)) self.assertEqual(op * op1([1]), Operator(targ)) # op1 qargs=[2] targ =", "license in the LICENSE.txt file in the root directory #", "op.transpose() self.assertEqual(uni_t, Operator(matr.T + 1j * mati.T)) def test_adjoint(self): \"\"\"Test", "op = Operator(circuit) target = np.diag([1, 1, 1, np.exp(1j *", "self.assertEqual(op.compose(op3, qargs=[2, 1, 0], front=True), Operator(targ)) # op2 qargs=[0, 1]", "Operator(targ)) self.assertEqual(op * op1([0]), Operator(targ)) # op1 qargs=[1] targ =", "np.sqrt(2) @classmethod def rand_rho(cls, n): \"\"\"Return random density matrix\"\"\" seed", "op) def test_negate(self): \"\"\"Test negate method\"\"\" mat = self.rand_matrix(4, 4)", "np.kron(mat_a, np.eye(4))) self.assertEqual(op.dot(op1, qargs=[2]), Operator(targ)) self.assertEqual(op * op1([2]), Operator(targ)) def", "from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit from qiskit.extensions.standard import HGate,", "method.\"\"\" # X-90 rotation X90 = la.expm(-1j * 0.5 *", "op2 = Operator(op1) self.assertEqual(op1, op2) def test_circuit_init(self): \"\"\"Test initialization from", "5), input_dims=[4, 5], output_dims=[2, 3, 4]) self.assertEqual(op.output_dims(), (2, 3, 4))", "modified files need to carry a notice indicating # that", "been altered from the originals. # pylint: disable=invalid-name \"\"\"Tests for", "= QuantumCircuit(qr, cr) circ.h(qr[0]) circ.x(qr[1]) circ.measure(qr, cr) return circ class", "* np.array([[1, -1], [1, 1]]) target = Operator(np.kron(y90, np.kron(self.UX, self.UH)))", "circ = QuantumCircuit(qr) circ.h(qr[0]) circ.x(qr[1]) circ.ry(np.pi / 2, qr[2]) y90", "= Operator(circuit) target = np.diag([1, 1, 1, np.exp(1j * lam)])", "Operator(targ)) # op1 qargs=[0] targ = np.dot(mat, np.kron(np.eye(4), mat_a)) self.assertEqual(op.dot(op1,", "np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b))) self.assertEqual(op.compose(op2, qargs=[2, 0], front=True), Operator(targ)) #", "= Operator(self.UX).compose(Operator(self.UY), front=True) matXY = np.dot(self.UX, self.UY) self.assertEqual(opXY, Operator(matXY)) def", "circuit.\"\"\" # Test tensor product of 1-qubit gates circuit =", "self.assertEqual(op.compose(op1, qargs=[0]), Operator(targ)) self.assertEqual(op @ op1([0]), Operator(targ)) # op1 qargs=[1]", "2]) self.assertRaises(QiskitError, Operator, mat, input_dims=[2, 4]) self.assertRaises(QiskitError, Operator, mat, input_dims=5)", "\"\"\"Test power method raises exceptions.\"\"\" op = Operator(self.rand_matrix(3, 3)) #", "as np from numpy.testing import assert_allclose import scipy.linalg as la", "method.\"\"\" X90 = la.expm(-1j * 0.5 * np.pi * np.array([[0,", "* mati) uni_adj = op.adjoint() self.assertEqual(uni_adj, Operator(matr.T - 1j *", "# op1 qargs=[2] targ = np.dot(mat, np.kron(mat_a, np.eye(4))) self.assertEqual(op.dot(op1, qargs=[2]),", "mati) uni_conj = op.conjugate() self.assertEqual(uni_conj, Operator(matr - 1j * mati))", "clone = copy.copy(orig) clone._data[0, 0] = 0.0 self.assertTrue(clone == orig)", "circuit.ry(np.pi / 2, 2) op = Operator(circuit) y90 = (1", "op3([0, 1, 2]), Operator(targ)) # op3 qargs=[2, 1, 0] targ", "self.assertEqual(op @ op1([1]), Operator(targ)) # op1 qargs=[2] targ = np.dot(np.kron(mat_a,", "= Operator(self.UY).compose(Operator(self.UX), front=True) matYX = np.dot(self.UY, self.UX) self.assertEqual(opYX, Operator(matYX)) opXY", "(4, 5)) self.assertEqual(op.output_dims(), (2, 3, 4)) def test_init_array_except(self): \"\"\"Test initialization", "2, 2) op = Operator(circuit) y90 = (1 / np.sqrt(2))", "QuantumRegister(2) cr = ClassicalRegister(2) circ = QuantumCircuit(qr, cr) circ.h(qr[0]) circ.x(qr[1])", "op2([2, 0]), Operator(targ)) # op1 qargs=[0] targ = np.dot(np.kron(np.eye(4), mat_a),", "targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a))) self.assertEqual(op.compose(op2, qargs=[0, 1], front=True),", "qargs=[2] targ = np.dot(np.kron(mat_a, np.eye(4)), mat) self.assertEqual(op.compose(op1, qargs=[2]), Operator(targ)) self.assertEqual(op", "rng = np.random.RandomState(seed) psi = rng.rand(n) + 1j * rng.rand(n)", "negate method\"\"\" mat = np.diag([1, np.exp(1j * np.pi / 2)])", "= Operator(self.rand_matrix(2 * 3 * 4, 4 * 5), input_dims=[4,", "# op2 qargs=[0, 1] targ = np.dot(np.kron(np.eye(2), np.kron(mat_b, mat_a)), mat)", "Operator(np.dot(self.UX, self.UY)) self.assertEqual(op2.compose(op1), targ) self.assertEqual(op2 @ op1, targ) def test_dot(self):", "8)) reshaped1 = op.reshape(input_dims=[8], output_dims=[8]) reshaped2 = op.reshape(input_dims=[4, 2], output_dims=[2,", "* 3 * 4, 4 * 5), input_dims=[4, 5], output_dims=[2,", "= np.dot(mat, np.kron(np.eye(4), mat_a)) self.assertEqual(op.compose(op1, qargs=[0], front=True), Operator(targ)) # op1", "\"\"\"Tests for Operator linear operator class.\"\"\" def test_init_array_qubit(self): \"\"\"Test subsystem", "= Operator(np.dot(self.UY, self.UX)) self.assertEqual(op1.dot(op2), targ) self.assertEqual(op1 * op2, targ) targ", "qargs=[0, 1]), Operator(targ)) self.assertEqual(op * op2([0, 1]), Operator(targ)) # op2", "originals. # pylint: disable=invalid-name \"\"\"Tests for Operator matrix linear operator", "-1], [1, 1]]) target = np.kron(y90, np.kron(self.UX, self.UH)) global_phase_equivalent =", "0], front=True), Operator(targ)) # op1 qargs=[0] targ = np.dot(mat, np.kron(np.eye(4),", "of controlled-H gate circuit = QuantumCircuit(2) circuit.ch(0, 1) op =", "= logging.getLogger(__name__) class OperatorTestCase(QiskitTestCase): \"\"\"Test utils for Operator\"\"\" # Pauli-matrix", "= Operator(matr + 1j * mati) uni_t = op.transpose() self.assertEqual(uni_t,", "a random matrix.\"\"\" seed = np.random.randint(0, np.iinfo(np.int32).max) logger.debug(\"rand_matrix RandomState seeded", "= np.kron(had, np.diag([0, 1])) + np.kron( np.eye(2), np.diag([1, 0])) global_phase_equivalent", "op.reshape(input_dims=[8], output_dims=[8]) reshaped2 = op.reshape(input_dims=[4, 2], output_dims=[2, 4]) self.assertEqual(op.output_dims(), (2,", "self.assertEqual(op.compose(op3, qargs=[2, 1, 0]), Operator(targ)) self.assertEqual(op @ op3([2, 1, 0]),", "self.rand_matrix(2, 2, real=True) self.assertEqual(Operator(np.array(mat, dtype=complex)), Operator(mat)) mat = self.rand_matrix(4, 4)", "LICENSE.txt file in the root directory # of this source", "4) op1 = Operator(mat1) op2 = Operator(mat2) self.assertEqual(op1._add(op2), Operator(mat1 +", "1j * rng.rand(n) rho = np.outer(psi, psi.conj()) rho /= np.trace(rho)", "def test_dot(self): \"\"\"Test dot method.\"\"\" op1 = Operator(self.UY) op2 =", "np from numpy.testing import assert_allclose import scipy.linalg as la from", "qr = QuantumRegister(3) circ = QuantumCircuit(qr) circ.h(qr[0]) circ.x(qr[1]) circ.ry(np.pi /", "rotation X90 = la.expm(-1j * 0.5 * np.pi * np.array([[0,", "\"\"\"Test initialization from array.\"\"\" mat = np.eye(3) op = Operator(mat)", "Copyright IBM 2017, 2019. # # This code is licensed", "self.assertEqual(op.dot(op3, qargs=[2, 1, 0]), Operator(targ)) self.assertEqual(op * op3([2, 1, 0]),", "test_init_array(self): \"\"\"Test initialization from array.\"\"\" mat = np.eye(3) op =", "self.rand_matrix(4, 4) self.assertEqual(Operator(mat.tolist()), Operator(mat)) def test_data(self): \"\"\"Test Operator representation string", "rng.rand(rows, cols) def simple_circuit_no_measure(self): \"\"\"Return a unitary circuit and the", "negate method\"\"\" mat = self.rand_matrix(4, 4) op = Operator(mat) self.assertEqual(-op,", "unitary circuit and the corresponding unitary array.\"\"\" qr = QuantumRegister(3)", "global_phase_equivalent = matrix_equal(op, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) gate = CHGate() op", "Version 2.0. You may # obtain a copy of this", "(2,)) self.assertEqual(op.output_dims(qargs=[1]), (3,)) self.assertEqual(op.output_dims(qargs=[2]), (4,)) self.assertEqual(op.output_dims(qargs=[0, 2]), (2, 4)) self.assertEqual(op.output_dims(qargs=[2,", "= np.kron(y90, np.kron(self.UX, self.UH)) global_phase_equivalent = matrix_equal( op.data, target, ignore_phase=True)", "targ = np.dot(np.kron(np.eye(4), mat_a), mat) self.assertEqual(op.compose(op1, qargs=[0]), Operator(targ)) self.assertEqual(op @", "self.UX mat2 = np.eye(3, dtype=complex) mat21 = np.kron(mat2, mat1) op21", "mat = self.rand_matrix(2 * 3 * 4, 4 * 5)", "Operator(mat) cpy = orig.copy() cpy._data[0, 0] = 0.0 self.assertFalse(cpy ==", "targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a))) self.assertEqual(op.dot(op2, qargs=[0, 1]), Operator(targ))", "self.assertFalse(cpy == orig) with self.subTest(\"Shallow copy\"): orig = Operator(mat) clone", "op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a))) # op3 qargs=[0, 1, 2]", "np.kron(np.eye(2), np.kron(mat_b, mat_a))) self.assertEqual(op.compose(op2, qargs=[0, 1], front=True), Operator(targ)) # op2", "qargs=[2, 0]), Operator(targ)) self.assertEqual(op * op2([2, 0]), Operator(targ)) # op1", "op2, Operator(mat1 - mat2)) def test_add_except(self): \"\"\"Test add method raises", "self.assertRaises(QiskitError, Operator, mat, input_dims=[4, 2]) self.assertRaises(QiskitError, Operator, mat, input_dims=[2, 4])", "self.assertEqual(op1, op2) def test_circuit_init(self): \"\"\"Test initialization from a circuit.\"\"\" #", "Operator, mat, input_dims=[4, 2]) self.assertRaises(QiskitError, Operator, mat, input_dims=[2, 4]) self.assertRaises(QiskitError,", "2]), Operator(targ)) # op3 qargs=[2, 1, 0] targ = np.dot(np.kron(mat_a,", "op1([0]), Operator(targ)) # op1 qargs=[1] targ = np.dot(np.kron(np.eye(2), np.kron(mat_a, np.eye(2))),", "self.assertEqual(op.output_dims(qargs=[2, 0]), (4, 2)) def test_reshape(self): \"\"\"Test Operator reshape method.\"\"\"", "test_compose(self): \"\"\"Test compose method.\"\"\" op1 = Operator(self.UX) op2 = Operator(self.UY)", "self.assertEqual(op12.dim, (6, 6)) assert_allclose(op12.data, Operator(mat12).data) def test_power_except(self): \"\"\"Test power method", "* op2, targ) targ = Operator(np.dot(self.UX, self.UY)) self.assertEqual(op2.dot(op1), targ) self.assertEqual(op2", "1, 0]), Operator(targ)) # op2 qargs=[0, 1] targ = np.dot(np.kron(np.eye(2),", "tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative", "4)) self.assertEqual(Operator(mat, input_dims=[2, 2], output_dims=[2, 2]).dim, (4, 4)) def test_input_dims(self):", "# op1 qargs=[0] targ = np.dot(mat, np.kron(np.eye(4), mat_a)) self.assertEqual(op.dot(op1, qargs=[0]),", "property.\"\"\" mat = self.rand_matrix(4, 4) self.assertEqual(Operator(mat).dim, (4, 4)) self.assertEqual(Operator(mat, input_dims=[4],", "Non-unitary should return false self.assertFalse(Operator([[1, 0], [0, 0]]).is_unitary()) def test_to_operator(self):", "4, real=True) op = Operator(matr + 1j * mati) uni_conj", "0.0 self.assertTrue(clone == orig) def test_is_unitary(self): \"\"\"Test is_unitary method.\"\"\" #", "self.assertEqual(opXY, Operator(matXY)) def test_compose_subsystem(self): \"\"\"Test subsystem compose method.\"\"\" # 3-qubit", "val = np.exp(5j) op = Operator(mat) self.assertEqual(op._multiply(val), Operator(val * mat))", "np.eye(2), np.diag([1, 0])) global_phase_equivalent = matrix_equal(op, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) def", "op1._add, op2) def test_multiply(self): \"\"\"Test multiply method.\"\"\" mat = self.rand_matrix(4,", "rand_rho(cls, n): \"\"\"Return random density matrix\"\"\" seed = np.random.randint(0, np.iinfo(np.int32).max)", "License, Version 2.0. You may # obtain a copy of", "= Operator(mat2).expand(Operator(mat1)) self.assertEqual(op12.dim, (6, 6)) assert_allclose(op12.data, Operator(mat12).data) def test_tensor(self): \"\"\"Test", "automatic inference of qubit subsystems mat = self.rand_matrix(8, 8) op", "Operator(mat1).tensor(Operator(mat2)) self.assertEqual(op12.dim, (6, 6)) assert_allclose(op12.data, Operator(mat12).data) def test_power_except(self): \"\"\"Test power", "Operator(val * mat)) def test_multiply_except(self): \"\"\"Test multiply method raises exceptions.\"\"\"", "= orig.copy() cpy._data[0, 0] = 0.0 self.assertFalse(cpy == orig) with", "mat_b)), mat) self.assertEqual(op.compose(op2, qargs=[2, 0]), Operator(targ)) self.assertEqual(op @ op2([2, 0]),", "method raises exceptions.\"\"\" op1 = Operator(self.rand_matrix(2, 2)) op2 = Operator(self.rand_matrix(3,", "self.simple_circuit_with_measure() self.assertRaises(QiskitError, Operator, circuit) def test_equal(self): \"\"\"Test __eq__ method\"\"\" mat", "= Operator(X90) self.assertEqual(op.power(2), Operator([[0, -1j], [-1j, 0]])) self.assertEqual(op.power(4), Operator(-1 *", "Operator(circuit) target = np.diag([1, 1, 1, np.exp(1j * lam)]) global_phase_equivalent", "\"\"\"Test Operator output_dims method.\"\"\" op = Operator(self.rand_matrix(2 * 3 *", "op1 qargs=[1] targ = np.dot(np.kron(np.eye(2), np.kron(mat_a, np.eye(2))), mat) self.assertEqual(op.compose(op1, qargs=[1]),", "(2, 4)) self.assertEqual(reshaped2.input_dims(), (4, 2)) def test_copy(self): \"\"\"Test Operator copy", "from the originals. # pylint: disable=invalid-name \"\"\"Tests for Operator matrix", "self.rand_matrix(2, 2) mat_b = self.rand_matrix(2, 2) mat_c = self.rand_matrix(2, 2)", "* np.array([[0, 1], [1, 0]]) / 2) self.assertTrue(Operator(X90).is_unitary()) # Non-unitary", "logger.debug(\"rand_matrix RandomState seeded with seed=%s\", seed) rng = np.random.RandomState(seed) if", "seed) rng = np.random.RandomState(seed) if cols is None: cols =", "= self.rand_matrix(4, 4) op1 = Operator(mat1) op2 = Operator(mat2) self.assertEqual(op1._add(op2),", "from qiskit.quantum_info.operators.operator import Operator from qiskit.quantum_info.operators.predicates import matrix_equal logger =", "* np.eye(2))) self.assertEqual(op.power(8), Operator(np.eye(2))) def test_expand(self): \"\"\"Test expand method.\"\"\" mat1", "self.rand_matrix(2, 2) mat_c = self.rand_matrix(2, 2) op = Operator(mat) op1", "- 1j * mati)) def test_transpose(self): \"\"\"Test transpose method.\"\"\" matr", "Operator(matr + 1j * mati) uni_adj = op.adjoint() self.assertEqual(uni_adj, Operator(matr.T", "Operator(mat12).data) def test_power_except(self): \"\"\"Test power method raises exceptions.\"\"\" op =", "operator class.\"\"\" import unittest import logging import copy import numpy", "real=True) self.assertEqual(Operator(np.array(mat, dtype=complex)), Operator(mat)) mat = self.rand_matrix(4, 4) self.assertEqual(Operator(mat.tolist()), Operator(mat))", "rows if real: return rng.rand(rows, cols) return rng.rand(rows, cols) +", "test_negate(self): \"\"\"Test negate method\"\"\" mat = self.rand_matrix(4, 4) op =", "[1, 0]]) / 2) op = Operator(X90) self.assertEqual(op.power(2), Operator([[0, -1j],", "0] = 0.0 self.assertTrue(clone == orig) def test_is_unitary(self): \"\"\"Test is_unitary", "(2, 2, 2)) def test_init_array(self): \"\"\"Test initialization from array.\"\"\" mat", "self.subTest(\"Shallow copy\"): orig = Operator(mat) clone = copy.copy(orig) clone._data[0, 0]", "matrix_equal(op, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) def test_circuit_init_except(self): \"\"\"Test initialization from circuit", "= np.dot(np.kron(np.eye(2), np.kron(mat_b, mat_a)), mat) self.assertEqual(op.compose(op2, qargs=[0, 1]), Operator(targ)) self.assertEqual(op", "mat2)) def test_add_except(self): \"\"\"Test add method raises exceptions.\"\"\" op1 =", "3 * 4, 4 * 5) op = Operator(mat, input_dims=[4,", "Operator(mat1).expand(Operator(mat2)) self.assertEqual(op21.dim, (6, 6)) assert_allclose(op21.data, Operator(mat21).data) mat12 = np.kron(mat1, mat2)", "def test_is_unitary(self): \"\"\"Test is_unitary method.\"\"\" # X-90 rotation X90 =", "circuit.ch(0, 1) op = Operator(circuit) target = np.kron(self.UI, np.diag([1, 0]))", "tensor method.\"\"\" mat1 = self.UX mat2 = np.eye(3, dtype=complex) mat21", "self.assertEqual(Operator(mat).dim, (4, 4)) self.assertEqual(Operator(mat, input_dims=[4], output_dims=[4]).dim, (4, 4)) self.assertEqual(Operator(mat, input_dims=[2,", "np.kron(mat_a, np.kron(np.eye(2), mat_b))) self.assertEqual(op.dot(op2, qargs=[2, 0]), Operator(targ)) self.assertEqual(op * op2([2,", "= np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b))) self.assertEqual(op.compose(op2, qargs=[2, 0], front=True), Operator(targ))", "coding: utf-8 -*- # This code is part of Qiskit.", "targ = Operator(np.dot(self.UY, self.UX)) self.assertEqual(op1.dot(op2), targ) self.assertEqual(op1 * op2, targ)", "= np.kron(mat2, mat1) op21 = Operator(mat1).expand(Operator(mat2)) self.assertEqual(op21.dim, (6, 6)) assert_allclose(op21.data,", "op2([2, 0]), Operator(targ)) # op1 qargs=[0] targ = np.dot(mat, np.kron(np.eye(4),", "test_init_array_qubit(self): \"\"\"Test subsystem initialization from N-qubit array.\"\"\" # Test automatic", "1, 0], front=True), Operator(targ)) # op2 qargs=[0, 1] targ =", "# op2 qargs=[0, 1] targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a)))", "mat12 = np.kron(mat1, mat2) op12 = Operator(mat2).expand(Operator(mat1)) self.assertEqual(op12.dim, (6, 6))", "global_phase_equivalent = matrix_equal(op, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) def test_circuit_init_except(self): \"\"\"Test initialization", "copy.copy(orig) clone._data[0, 0] = 0.0 self.assertTrue(clone == orig) def test_is_unitary(self):", "[-1j, 0]])) self.assertEqual(op.power(4), Operator(-1 * np.eye(2))) self.assertEqual(op.power(8), Operator(np.eye(2))) def test_expand(self):", "Qiskit. # # (C) Copyright IBM 2017, 2019. # #", "-1], [1, 1]]) target = Operator(np.kron(y90, np.kron(self.UX, self.UH))) return circ,", "np.eye(2) UX = np.array([[0, 1], [1, 0]]) UY = np.array([[0,", "mat1 = self.rand_matrix(4, 4) mat2 = self.rand_matrix(4, 4) op1 =", "+ mat2)) self.assertEqual(op1 + op2, Operator(mat1 + mat2)) self.assertEqual(op1 -", "@classmethod def rand_rho(cls, n): \"\"\"Return random density matrix\"\"\" seed =", "Operator(self.UX).compose(Operator(self.UY), front=True) matXY = np.dot(self.UX, self.UY) self.assertEqual(opXY, Operator(matXY)) def test_compose_subsystem(self):", "qiskit.quantum_info.operators.predicates import matrix_equal logger = logging.getLogger(__name__) class OperatorTestCase(QiskitTestCase): \"\"\"Test utils", "op) self.assertRaises(QiskitError, op.__rmul__, op) def test_negate(self): \"\"\"Test negate method\"\"\" mat", "global_phase_equivalent = matrix_equal( op.data, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) def test_instruction_init(self): \"\"\"Test", "# op1 qargs=[1] targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2)))) self.assertEqual(op.dot(op1,", "power method raises exceptions.\"\"\" op = Operator(self.rand_matrix(3, 3)) # Non-integer", "under the Apache License, Version 2.0. You may # obtain", "rng.rand(rows, cols) + 1j * rng.rand(rows, cols) def simple_circuit_no_measure(self): \"\"\"Return", "targ = np.dot(np.kron(np.eye(2), np.kron(mat_b, mat_a)), mat) self.assertEqual(op.compose(op2, qargs=[0, 1]), Operator(targ))", "linear operator class.\"\"\" import unittest import logging import copy import", "3, 2)) self.assertEqual(op.output_dims(qargs=[2, 0, 1]), (4, 2, 3)) self.assertEqual(op.output_dims(qargs=[0]), (2,))", "1, np.exp(1j * lam)]) global_phase_equivalent = matrix_equal( op.data, target, ignore_phase=True)", "ignore_phase=True) self.assertTrue(global_phase_equivalent) def test_instruction_init(self): \"\"\"Test initialization from a circuit.\"\"\" gate", "np.diag([1, 0])) + np.kron( self.UH, np.diag([0, 1])) global_phase_equivalent = matrix_equal(", "def test_data(self): \"\"\"Test Operator representation string property.\"\"\" mat = self.rand_matrix(2,", "qargs=[1], front=True), Operator(targ)) # op1 qargs=[2] targ = np.dot(mat, np.kron(mat_a,", "/ np.sqrt(2)) * np.array([[1, -1], [1, 1]]) target = Operator(np.kron(y90,", "8) mat_a = self.rand_matrix(2, 2) mat_b = self.rand_matrix(2, 2) mat_c", "random density matrix\"\"\" seed = np.random.randint(0, np.iinfo(np.int32).max) logger.debug(\"rand_rho RandomState seeded", "np.kron( self.UH, np.diag([0, 1])) global_phase_equivalent = matrix_equal( op.data, target, ignore_phase=True)", "5], output_dims=[2, 3, 4]) assert_allclose(op.data, mat) self.assertEqual(op.dim, (4 * 5,", "a circuit.\"\"\" # Test tensor product of 1-qubit gates circuit", "= Operator(np.dot(self.UX, self.UY)) self.assertEqual(op2.dot(op1), targ) self.assertEqual(op2 * op1, targ) def", "qargs=[2, 1, 0], front=True), Operator(targ)) # op2 qargs=[0, 1] targ", "to carry a notice indicating # that they have been", "(1 / np.sqrt(2)) * np.array([[1, -1], [1, 1]]) target =", "self.assertEqual(op * op1([2]), Operator(targ)) def test_compose_front_subsystem(self): \"\"\"Test subsystem front compose", "operator class.\"\"\" def test_init_array_qubit(self): \"\"\"Test subsystem initialization from N-qubit array.\"\"\"", "qargs=[0, 1] targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a))) self.assertEqual(op.dot(op2, qargs=[0,", "np.kron(had, np.diag([0, 1])) + np.kron( np.eye(2), np.diag([1, 0])) global_phase_equivalent =", "Operator(mat21).data) mat12 = np.kron(mat1, mat2) op12 = Operator(mat2).expand(Operator(mat1)) self.assertEqual(op12.dim, (6,", "0]]) UY = np.array([[0, -1j], [1j, 0]]) UZ = np.diag([1,", "circuit.cu1(lam, 0, 1) op = Operator(circuit) target = np.diag([1, 1,", "test_add(self): \"\"\"Test add method.\"\"\" mat1 = self.rand_matrix(4, 4) mat2 =", "= Operator(self.UY) op2 = Operator(self.UX) targ = Operator(np.dot(self.UY, self.UX)) self.assertEqual(op1.dot(op2),", "4)) self.assertEqual(op.output_dims(qargs=[0, 1, 2]), (2, 3, 4)) self.assertEqual(op.output_dims(qargs=[2, 1, 0]),", "op2 qargs=[0, 1] targ = np.dot(np.kron(np.eye(2), np.kron(mat_b, mat_a)), mat) self.assertEqual(op.compose(op2,", "Operator(targ)) self.assertEqual(op @ op1([0]), Operator(targ)) # op1 qargs=[1] targ =", "= Operator(mat2).tensor(Operator(mat1)) self.assertEqual(op21.dim, (6, 6)) assert_allclose(op21.data, Operator(mat21).data) mat12 = np.kron(mat1,", "np.sqrt(2)) * np.array([[1, -1], [1, 1]]) target = np.kron(y90, np.kron(self.UX,", "test_dot(self): \"\"\"Test dot method.\"\"\" op1 = Operator(self.UY) op2 = Operator(self.UX)", "Operator(targ)) self.assertEqual(op.compose(op3([0, 1, 2])), Operator(targ)) self.assertEqual(op @ op3([0, 1, 2]),", "Operator(X90) self.assertEqual(op.power(2), Operator([[0, -1j], [-1j, 0]])) self.assertEqual(op.power(4), Operator(-1 * np.eye(2)))", "the corresponding unitary array.\"\"\" qr = QuantumRegister(3) circ = QuantumCircuit(qr)", "to_operator method.\"\"\" op1 = Operator(self.rand_matrix(4, 4)) op2 = op1.to_operator() self.assertEqual(op1,", "qargs=[0, 1, 2] targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a))) self.assertEqual(op.compose(op3,", "assert_allclose(op.data, mat) self.assertEqual(op.dim, (3, 3)) self.assertEqual(op.input_dims(), (3,)) self.assertEqual(op.output_dims(), (3,)) mat", "= np.kron(self.UI, np.diag([1, 0])) + np.kron( self.UH, np.diag([0, 1])) global_phase_equivalent", "(4, 3, 2)) self.assertEqual(op.output_dims(qargs=[2, 0, 1]), (4, 2, 3)) self.assertEqual(op.output_dims(qargs=[0]),", "= self.rand_matrix(8, 8) mat_a = self.rand_matrix(2, 2) mat_b = self.rand_matrix(2,", "np.dot(np.kron(mat_c, np.kron(mat_b, mat_a)), mat) self.assertEqual(op.compose(op3, qargs=[0, 1, 2]), Operator(targ)) self.assertEqual(op.compose(op3([0,", "0])) + np.kron( self.UH, np.diag([0, 1])) global_phase_equivalent = matrix_equal( op.data,", "copy of this license in the LICENSE.txt file in the", "= op.transpose() self.assertEqual(uni_t, Operator(matr.T + 1j * mati.T)) def test_adjoint(self):", "\"\"\"Return a unitary circuit with measurement.\"\"\" qr = QuantumRegister(2) cr", "+ 1j * rng.rand(rows, cols) def simple_circuit_no_measure(self): \"\"\"Return a unitary", "4) self.assertRaises(QiskitError, Operator, mat, input_dims=[4, 2]) self.assertRaises(QiskitError, Operator, mat, input_dims=[2,", "in the LICENSE.txt file in the root directory # of", "1], front=True), Operator(targ)) # op2 qargs=[2, 0] targ = np.dot(mat,", "self.rand_matrix(4, 4) val = np.exp(5j) op = Operator(mat) self.assertEqual(op._multiply(val), Operator(val", "\"\"\"Test multiply method raises exceptions.\"\"\" op = Operator(self.rand_matrix(2, 2)) self.assertRaises(QiskitError,", "cols) def simple_circuit_no_measure(self): \"\"\"Return a unitary circuit and the corresponding", "self.assertEqual(opYX, Operator(matYX)) opXY = Operator(self.UX).compose(Operator(self.UY), front=True) matXY = np.dot(self.UX, self.UY)", "copy import numpy as np from numpy.testing import assert_allclose import", "mati = self.rand_matrix(2, 4, real=True) op = Operator(matr + 1j", "circuit.\"\"\" gate = CXGate() op = Operator(gate).data target = gate.to_matrix()", "self.assertEqual(op.input_dims(), (3,)) self.assertEqual(op.output_dims(), (3,)) mat = self.rand_matrix(2 * 3 *", "Operator(matr.T + 1j * mati.T)) def test_adjoint(self): \"\"\"Test adjoint method.\"\"\"", "import QiskitTestCase from qiskit.quantum_info.operators.operator import Operator from qiskit.quantum_info.operators.predicates import matrix_equal", "self.assertEqual(op.dim, (3, 3)) self.assertEqual(op.input_dims(), (3,)) self.assertEqual(op.output_dims(), (3,)) mat = self.rand_matrix(2", "np.kron(np.eye(2), np.kron(mat_b, mat_a))) self.assertEqual(op.dot(op2, qargs=[0, 1]), Operator(targ)) self.assertEqual(op * op2([0,", "op1 qargs=[1] targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2)))) self.assertEqual(op.compose(op1, qargs=[1],", "(4 * 5, 2 * 3 * 4)) self.assertEqual(op.input_dims(), (4,", "Operator(targ)) self.assertEqual(op @ op3([2, 1, 0]), Operator(targ)) # op2 qargs=[0,", "the originals. # pylint: disable=invalid-name \"\"\"Tests for Operator matrix linear", "works of this code must retain this # copyright notice,", "mat = self.rand_matrix(8, 8) mat_a = self.rand_matrix(2, 2) mat_b =", "2, 3)) self.assertEqual(op.output_dims(qargs=[0]), (2,)) self.assertEqual(op.output_dims(qargs=[1]), (3,)) self.assertEqual(op.output_dims(qargs=[2]), (4,)) self.assertEqual(op.output_dims(qargs=[0, 2]),", "op21 = Operator(mat2).tensor(Operator(mat1)) self.assertEqual(op21.dim, (6, 6)) assert_allclose(op21.data, Operator(mat21).data) mat12 =", "at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of", "must retain this # copyright notice, and modified files need", "subsystem dot method.\"\"\" # 3-qubit operator mat = self.rand_matrix(8, 8)", "2 * 3 * 4)) self.assertEqual(op.input_dims(), (4, 5)) self.assertEqual(op.output_dims(), (2,", "N-qubit array.\"\"\" # Test automatic inference of qubit subsystems mat", "self.assertEqual(op.compose(op3([0, 1, 2])), Operator(targ)) self.assertEqual(op @ op3([0, 1, 2]), Operator(targ))", "= np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c))) self.assertEqual(op.compose(op3, qargs=[2, 1, 0], front=True),", "expand method.\"\"\" mat1 = self.UX mat2 = np.eye(3, dtype=complex) mat21", "mat2) op12 = Operator(mat1).tensor(Operator(mat2)) self.assertEqual(op12.dim, (6, 6)) assert_allclose(op12.data, Operator(mat12).data) def", "assert_allclose import scipy.linalg as la from qiskit import QiskitError from", "= Operator(np.kron(mat_c, np.kron(mat_b, mat_a))) # op3 qargs=[0, 1, 2] targ", "mat2) op12 = Operator(mat2).expand(Operator(mat1)) self.assertEqual(op12.dim, (6, 6)) assert_allclose(op12.data, Operator(mat12).data) def", "np.kron(mat_b, mat_c))) self.assertEqual(op.dot(op3, qargs=[2, 1, 0]), Operator(targ)) self.assertEqual(op * op3([2,", "Operator(mat) assert_allclose(mat, op.data) def test_dim(self): \"\"\"Test Operator dim property.\"\"\" mat", "1]), (4, 2, 3)) self.assertEqual(op.output_dims(qargs=[0]), (2,)) self.assertEqual(op.output_dims(qargs=[1]), (3,)) self.assertEqual(op.output_dims(qargs=[2]), (4,))", "qargs=[0, 1] targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a))) self.assertEqual(op.compose(op2, qargs=[0,", "= QuantumCircuit(2) circuit.ch(0, 1) op = Operator(circuit) target = np.kron(self.UI,", "mat_a), mat) self.assertEqual(op.compose(op1, qargs=[0]), Operator(targ)) self.assertEqual(op @ op1([0]), Operator(targ)) #", "2, 2)) self.assertEqual(op.output_dims(), (2, 2, 2)) op = Operator(mat, input_dims=8,", "= np.dot(self.UY, self.UX) self.assertEqual(opYX, Operator(matYX)) opXY = Operator(self.UX).compose(Operator(self.UY), front=True) matXY", "initialization from array.\"\"\" mat = np.eye(3) op = Operator(mat) assert_allclose(op.data,", "= Operator(self.rand_matrix(3, 3)) self.assertRaises(QiskitError, op1._add, op2) def test_multiply(self): \"\"\"Test multiply", "Operator(targ)) self.assertEqual(op @ op2([2, 0]), Operator(targ)) # op1 qargs=[0] targ", "0]]) / 2) op = Operator(X90) self.assertEqual(op.power(2), Operator([[0, -1j], [-1j,", "mat)) def test_equiv(self): \"\"\"Test negate method\"\"\" mat = np.diag([1, np.exp(1j", "0]])) self.assertEqual(op.power(4), Operator(-1 * np.eye(2))) self.assertEqual(op.power(8), Operator(np.eye(2))) def test_expand(self): \"\"\"Test", "4)) def test_input_dims(self): \"\"\"Test Operator input_dims method.\"\"\" op = Operator(self.rand_matrix(2", "Operator representation string property.\"\"\" mat = self.rand_matrix(2, 2) op =", "* 5) op = Operator(mat, input_dims=[4, 5], output_dims=[2, 3, 4])", "cpy = orig.copy() cpy._data[0, 0] = 0.0 self.assertFalse(cpy == orig)", "np.dot(mat, np.kron(mat_a, np.eye(4))) self.assertEqual(op.compose(op1, qargs=[2], front=True), Operator(targ)) def test_power(self): \"\"\"Test", "self.assertEqual(op.compose(op1, qargs=[1], front=True), Operator(targ)) # op1 qargs=[2] targ = np.dot(mat,", "3)) # Non-integer power raises error self.assertRaises(QiskitError, op.power, 0.5) def", "Operator(targ)) self.assertEqual(op @ op3([0, 1, 2]), Operator(targ)) # op3 qargs=[2,", "self.subTest(\"Deep copy\"): orig = Operator(mat) cpy = orig.copy() cpy._data[0, 0]", "a unitary circuit and the corresponding unitary array.\"\"\" qr =", "1j * mati.T)) def test_adjoint(self): \"\"\"Test adjoint method.\"\"\" matr =", "np.kron(mat_b, mat_c)), mat) self.assertEqual(op.compose(op3, qargs=[2, 1, 0]), Operator(targ)) self.assertEqual(op @", "# op1 qargs=[2] targ = np.dot(mat, np.kron(mat_a, np.eye(4))) self.assertEqual(op.compose(op1, qargs=[2],", "@ op2([2, 0]), Operator(targ)) # op1 qargs=[0] targ = np.dot(np.kron(np.eye(4),", "targ) targ = Operator(np.dot(self.UX, self.UY)) self.assertEqual(op2.dot(op1), targ) self.assertEqual(op2 * op1,", "@ op3([2, 1, 0]), Operator(targ)) # op2 qargs=[0, 1] targ", "output_dims=[4]).dim, (4, 4)) self.assertEqual(Operator(mat, input_dims=[2, 2], output_dims=[2, 2]).dim, (4, 4))", "3, 4]) assert_allclose(op.data, mat) self.assertEqual(op.dim, (4 * 5, 2 *", "circ = QuantumCircuit(qr, cr) circ.h(qr[0]) circ.x(qr[1]) circ.measure(qr, cr) return circ", "logger.debug(\"rand_rho RandomState seeded with seed=%s\", seed) rng = np.random.RandomState(seed) psi", "np.random.RandomState(seed) psi = rng.rand(n) + 1j * rng.rand(n) rho =", "* mati.T)) def test_adjoint(self): \"\"\"Test adjoint method.\"\"\" matr = self.rand_matrix(2,", "= Operator(np.kron(y90, np.kron(self.UX, self.UH))) return circ, target def simple_circuit_with_measure(self): \"\"\"Return", "conjugate method.\"\"\" matr = self.rand_matrix(2, 4, real=True) mati = self.rand_matrix(2,", "\"\"\"Return random density matrix\"\"\" seed = np.random.randint(0, np.iinfo(np.int32).max) logger.debug(\"rand_rho RandomState", "= Operator(self.rand_matrix(2, 2)) op2 = Operator(self.rand_matrix(3, 3)) self.assertRaises(QiskitError, op1._add, op2)", "self.assertEqual(val * op, Operator(val * mat)) def test_multiply_except(self): \"\"\"Test multiply", "self.assertEqual(op1 @ op2, targ) targ = Operator(np.dot(self.UX, self.UY)) self.assertEqual(op2.compose(op1), targ)", "np.sqrt(2)) * np.array([[1, -1], [1, 1]]) target = Operator(np.kron(y90, np.kron(self.UX,", "return circ class TestOperator(OperatorTestCase): \"\"\"Tests for Operator linear operator class.\"\"\"", "op = Operator(mat) op1 = Operator(mat_a) op2 = Operator(np.kron(mat_b, mat_a))", "self.assertEqual(op.dot(op2, qargs=[2, 0]), Operator(targ)) self.assertEqual(op * op2([2, 0]), Operator(targ)) #", "Operator(mat)) mat = self.rand_matrix(4, 4) self.assertEqual(Operator(mat.tolist()), Operator(mat)) def test_data(self): \"\"\"Test", "op1, targ) def test_dot(self): \"\"\"Test dot method.\"\"\" op1 = Operator(self.UY)", "matXY = np.dot(self.UX, self.UY) self.assertEqual(opXY, Operator(matXY)) def test_compose_subsystem(self): \"\"\"Test subsystem", "5), input_dims=[4, 5], output_dims=[2, 3, 4]) self.assertEqual(op.input_dims(), (4, 5)) self.assertEqual(op.input_dims(qargs=[0,", "= Operator(mat) cpy = orig.copy() cpy._data[0, 0] = 0.0 self.assertFalse(cpy", "\"\"\"Test transpose method.\"\"\" matr = self.rand_matrix(2, 4, real=True) mati =", "* op1, targ) def test_compose_front(self): \"\"\"Test front compose method.\"\"\" opYX", "1, 2]), Operator(targ)) self.assertEqual(op * op3([0, 1, 2]), Operator(targ)) #", "(2, 2, 2)) self.assertEqual(op.output_dims(), (2, 2, 2)) def test_init_array(self): \"\"\"Test", "op2) def test_multiply(self): \"\"\"Test multiply method.\"\"\" mat = self.rand_matrix(4, 4)", "# Non-integer power raises error self.assertRaises(QiskitError, op.power, 0.5) def test_add(self):", "Operator(targ)) self.assertEqual(op * op2([2, 0]), Operator(targ)) # op1 qargs=[0] targ", "input_dims=[4, 2]) self.assertRaises(QiskitError, Operator, mat, input_dims=[2, 4]) self.assertRaises(QiskitError, Operator, mat,", "error self.assertRaises(QiskitError, op.power, 0.5) def test_add(self): \"\"\"Test add method.\"\"\" mat1", "0] targ = np.dot(np.kron(mat_a, np.kron(np.eye(2), mat_b)), mat) self.assertEqual(op.compose(op2, qargs=[2, 0]),", "Operator(targ)) # op1 qargs=[0] targ = np.dot(np.kron(np.eye(4), mat_a), mat) self.assertEqual(op.compose(op1,", "cols) return rng.rand(rows, cols) + 1j * rng.rand(rows, cols) def", "mati) uni_adj = op.adjoint() self.assertEqual(uni_adj, Operator(matr.T - 1j * mati.T))", "/ np.sqrt(2)) * np.array([[1, -1], [1, 1]]) target = np.kron(y90,", "import QiskitError from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit from qiskit.extensions.standard", "* 4, 4 * 5) op = Operator(mat, input_dims=[4, 5],", "orig = Operator(mat) clone = copy.copy(orig) clone._data[0, 0] = 0.0", "matrix linear operator class.\"\"\" import unittest import logging import copy", "raises exception.\"\"\" circuit = self.simple_circuit_with_measure() self.assertRaises(QiskitError, Operator, circuit) def test_equal(self):", "* mat)) def test_equiv(self): \"\"\"Test negate method\"\"\" mat = np.diag([1,", "target, ignore_phase=True) self.assertTrue(global_phase_equivalent) def test_circuit_init_except(self): \"\"\"Test initialization from circuit with", "Operator(mat, input_dims=[4, 5], output_dims=[2, 3, 4]) assert_allclose(op.data, mat) self.assertEqual(op.dim, (4", "[1, 0]]) / 2) self.assertTrue(Operator(X90).is_unitary()) # Non-unitary should return false", "circuit with measure raises exception.\"\"\" circuit = self.simple_circuit_with_measure() self.assertRaises(QiskitError, Operator,", "Operator(circuit) target = np.kron(self.UI, np.diag([1, 0])) + np.kron( self.UH, np.diag([0,", "np.dot(np.kron(mat_a, np.kron(np.eye(2), mat_b)), mat) self.assertEqual(op.compose(op2, qargs=[2, 0]), Operator(targ)) self.assertEqual(op @", "initialization from Operator.\"\"\" op1 = Operator(self.rand_matrix(4, 4)) op2 = Operator(op1)", "* rng.rand(n) rho = np.outer(psi, psi.conj()) rho /= np.trace(rho) return", "test_multiply_except(self): \"\"\"Test multiply method raises exceptions.\"\"\" op = Operator(self.rand_matrix(2, 2))", "rng.rand(n) + 1j * rng.rand(n) rho = np.outer(psi, psi.conj()) rho", "qargs=[0], front=True), Operator(targ)) # op1 qargs=[1] targ = np.dot(mat, np.kron(np.eye(2),", "(6, 6)) assert_allclose(op21.data, Operator(mat21).data) mat12 = np.kron(mat1, mat2) op12 =", "circ.h(qr[0]) circ.x(qr[1]) circ.measure(qr, cr) return circ class TestOperator(OperatorTestCase): \"\"\"Tests for", "4)) op2 = Operator(op1) self.assertEqual(op1, op2) def test_circuit_init(self): \"\"\"Test initialization", "= np.eye(3, dtype=complex) mat21 = np.kron(mat2, mat1) op21 = Operator(mat2).tensor(Operator(mat1))", "front=True) matYX = np.dot(self.UY, self.UX) self.assertEqual(opYX, Operator(matYX)) opXY = Operator(self.UX).compose(Operator(self.UY),", "test_init_array_except(self): \"\"\"Test initialization exception from array.\"\"\" mat = self.rand_matrix(4, 4)", "test_data(self): \"\"\"Test Operator representation string property.\"\"\" mat = self.rand_matrix(2, 2)", "self.assertEqual(op2 * op1, targ) def test_compose_front(self): \"\"\"Test front compose method.\"\"\"", "# 3-qubit operator mat = self.rand_matrix(8, 8) mat_a = self.rand_matrix(2,", "# op1 qargs=[1] targ = np.dot(np.kron(np.eye(2), np.kron(mat_a, np.eye(2))), mat) self.assertEqual(op.compose(op1,", "of Qiskit. # # (C) Copyright IBM 2017, 2019. #", "self.assertRaises(QiskitError, Operator, circuit) def test_equal(self): \"\"\"Test __eq__ method\"\"\" mat =", "power method.\"\"\" X90 = la.expm(-1j * 0.5 * np.pi *", "files need to carry a notice indicating # that they", "mat) self.assertEqual(op.compose(op2, qargs=[0, 1]), Operator(targ)) self.assertEqual(op @ op2([0, 1]), Operator(targ))", "(2, 4)) self.assertEqual(op.output_dims(qargs=[2, 0]), (4, 2)) def test_reshape(self): \"\"\"Test Operator", "targ = np.dot(mat, np.kron(np.eye(4), mat_a)) self.assertEqual(op.compose(op1, qargs=[0], front=True), Operator(targ)) #", "for Operator matrix linear operator class.\"\"\" import unittest import logging", "subsystem initialization from N-qubit array.\"\"\" # Test automatic inference of", "op.data, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) # Test decomposition of controlled-H gate", "self.assertEqual(op.output_dims(qargs=[0, 2]), (2, 4)) self.assertEqual(op.output_dims(qargs=[2, 0]), (4, 2)) def test_reshape(self):", "self.rand_matrix(2, 4, real=True) op = Operator(matr + 1j * mati)", "= self.rand_matrix(2, 2, real=True) self.assertEqual(Operator(np.array(mat, dtype=complex)), Operator(mat)) mat = self.rand_matrix(4,", "\"\"\"Test to_operator method.\"\"\" op1 = Operator(self.rand_matrix(4, 4)) op2 = op1.to_operator()", "np.kron(np.eye(4), mat_a)) self.assertEqual(op.dot(op1, qargs=[0]), Operator(targ)) self.assertEqual(op * op1([0]), Operator(targ)) #", "a copy of this license in the LICENSE.txt file in", "# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # #", "[1, 0]]) UY = np.array([[0, -1j], [1j, 0]]) UZ =", "targ = np.dot(mat, np.kron(mat_a, np.eye(4))) self.assertEqual(op.dot(op1, qargs=[2]), Operator(targ)) self.assertEqual(op *", "and the corresponding unitary array.\"\"\" qr = QuantumRegister(3) circ =", "# Test automatic inference of qubit subsystems mat = self.rand_matrix(8,", "/ 2) op = Operator(X90) self.assertEqual(op.power(2), Operator([[0, -1j], [-1j, 0]]))", "self.assertEqual(op.compose(op2, qargs=[2, 0], front=True), Operator(targ)) # op1 qargs=[0] targ =", "def test_negate(self): \"\"\"Test negate method\"\"\" mat = self.rand_matrix(4, 4) op", "output_dims=[2, 4]) self.assertEqual(op.output_dims(), (2, 2, 2)) self.assertEqual(op.input_dims(), (2, 2, 2))", "Operator(matr - 1j * mati)) def test_transpose(self): \"\"\"Test transpose method.\"\"\"", "initialization from a circuit.\"\"\" # Test tensor product of 1-qubit", "4)) def test_init_array_except(self): \"\"\"Test initialization exception from array.\"\"\" mat =", "unitary array.\"\"\" qr = QuantumRegister(3) circ = QuantumCircuit(qr) circ.h(qr[0]) circ.x(qr[1])", "# # (C) Copyright IBM 2017, 2019. # # This", "1) op = Operator(circuit) target = np.kron(self.UI, np.diag([1, 0])) +", "= Operator(self.rand_matrix(4, 4)) op2 = op1.to_operator() self.assertEqual(op1, op2) def test_conjugate(self):", "mat) self.assertEqual(op.compose(op1, qargs=[1]), Operator(targ)) self.assertEqual(op @ op1([1]), Operator(targ)) # op1", "self.UX)) self.assertEqual(op1.dot(op2), targ) self.assertEqual(op1 * op2, targ) targ = Operator(np.dot(self.UX,", "# # Any modifications or derivative works of this code", "method.\"\"\" mat1 = self.rand_matrix(4, 4) mat2 = self.rand_matrix(4, 4) op1", "= np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a))) self.assertEqual(op.compose(op2, qargs=[0, 1], front=True), Operator(targ))", "self.assertRaises(QiskitError, op1._add, op2) def test_multiply(self): \"\"\"Test multiply method.\"\"\" mat =", "op = Operator(circuit) target = np.kron(self.UI, np.diag([1, 0])) + np.kron(", "= Operator(mat) assert_allclose(op.data, mat) self.assertEqual(op.dim, (8, 8)) self.assertEqual(op.input_dims(), (2, 2,", "/ np.sqrt(2) @classmethod def rand_rho(cls, n): \"\"\"Return random density matrix\"\"\"", "notice indicating # that they have been altered from the", "= 0.0 self.assertFalse(cpy == orig) with self.subTest(\"Shallow copy\"): orig =", "Operator(mat) self.assertEqual(op._multiply(val), Operator(val * mat)) self.assertEqual(val * op, Operator(val *", "self.assertEqual(op.input_dims(), (2, 2, 2)) self.assertEqual(op.output_dims(), (2, 2, 2)) op =", "'s') self.assertRaises(QiskitError, op._multiply, op) self.assertRaises(QiskitError, op.__rmul__, op) def test_negate(self): \"\"\"Test", "np.kron(mat1, mat2) op12 = Operator(mat2).expand(Operator(mat1)) self.assertEqual(op12.dim, (6, 6)) assert_allclose(op12.data, Operator(mat12).data)", "targ) self.assertEqual(op1 @ op2, targ) targ = Operator(np.dot(self.UX, self.UY)) self.assertEqual(op2.compose(op1),", "= np.random.randint(0, np.iinfo(np.int32).max) logger.debug(\"rand_matrix RandomState seeded with seed=%s\", seed) rng", "Operator(mat1 - mat2)) def test_add_except(self): \"\"\"Test add method raises exceptions.\"\"\"", "targ = np.dot(np.kron(mat_c, np.kron(mat_b, mat_a)), mat) self.assertEqual(op.compose(op3, qargs=[0, 1, 2]),", "multiply method.\"\"\" mat = self.rand_matrix(4, 4) val = np.exp(5j) op", "= Operator(matr + 1j * mati) uni_adj = op.adjoint() self.assertEqual(uni_adj,", "self.rand_matrix(8, 8) op = Operator(mat) assert_allclose(op.data, mat) self.assertEqual(op.dim, (8, 8))", "target = np.diag([1, 1, 1, np.exp(1j * lam)]) global_phase_equivalent =", "Operator(self.UX) op2 = Operator(self.UY) targ = Operator(np.dot(self.UY, self.UX)) self.assertEqual(op1.compose(op2), targ)", "0]), Operator(targ)) self.assertEqual(op * op2([2, 0]), Operator(targ)) # op1 qargs=[0]", "3, 4)) self.assertEqual(op.output_dims(qargs=[0, 1, 2]), (2, 3, 4)) self.assertEqual(op.output_dims(qargs=[2, 1,", "targ = np.dot(np.kron(mat_a, np.kron(np.eye(2), mat_b)), mat) self.assertEqual(op.compose(op2, qargs=[2, 0]), Operator(targ))", "qiskit import QiskitError from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit from", "mat_a = self.rand_matrix(2, 2) mat_b = self.rand_matrix(2, 2) mat_c =", "= self.rand_matrix(4, 4) self.assertRaises(QiskitError, Operator, mat, input_dims=[4, 2]) self.assertRaises(QiskitError, Operator,", "disable=invalid-name \"\"\"Tests for Operator matrix linear operator class.\"\"\" import unittest", "* mati)) def test_transpose(self): \"\"\"Test transpose method.\"\"\" matr = self.rand_matrix(2,", "0]]) / 2) self.assertTrue(Operator(X90).is_unitary()) # Non-unitary should return false self.assertFalse(Operator([[1,", "HGate().to_matrix() target = np.kron(had, np.diag([0, 1])) + np.kron( np.eye(2), np.diag([1,", "self.assertTrue(op.equiv(Operator(phase * mat))) self.assertFalse(op.equiv(2 * mat)) if __name__ == '__main__':", "* np.pi / 2)]) phase = np.exp(-1j * np.pi /", "scipy.linalg as la from qiskit import QiskitError from qiskit import", "/ 2, 2) op = Operator(circuit) y90 = (1 /", "= np.exp(5j) op = Operator(mat) self.assertEqual(op._multiply(val), Operator(val * mat)) self.assertEqual(val", "def test_copy(self): \"\"\"Test Operator copy method\"\"\" mat = np.eye(2) with", "= QuantumCircuit(3) circuit.h(0) circuit.x(1) circuit.ry(np.pi / 2, 2) op =", "self.rand_matrix(2, 2) op = Operator(mat) assert_allclose(mat, op.data) def test_dim(self): \"\"\"Test", "op2, targ) targ = Operator(np.dot(self.UX, self.UY)) self.assertEqual(op2.compose(op1), targ) self.assertEqual(op2 @", "matYX = np.dot(self.UY, self.UX) self.assertEqual(opYX, Operator(matYX)) opXY = Operator(self.UX).compose(Operator(self.UY), front=True)", "0]), Operator(targ)) # op1 qargs=[0] targ = np.dot(mat, np.kron(np.eye(4), mat_a))", "Operator(self.UY) targ = Operator(np.dot(self.UY, self.UX)) self.assertEqual(op1.compose(op2), targ) self.assertEqual(op1 @ op2,", "compose different dimension exception\"\"\" self.assertRaises(QiskitError, Operator(np.eye(2)).compose, Operator(np.eye(3))) self.assertRaises(QiskitError, Operator(np.eye(2)).compose, 2)", "np.eye(4))) self.assertEqual(op.dot(op1, qargs=[2]), Operator(targ)) self.assertEqual(op * op1([2]), Operator(targ)) def test_compose_front_subsystem(self):", "-*- coding: utf-8 -*- # This code is part of", "\"\"\"Test Operator copy method\"\"\" mat = np.eye(2) with self.subTest(\"Deep copy\"):", "+ 1j * rng.rand(n) rho = np.outer(psi, psi.conj()) rho /=", "self.assertRaises(QiskitError, op._multiply, op) self.assertRaises(QiskitError, op.__rmul__, op) def test_negate(self): \"\"\"Test negate", "method\"\"\" mat = self.rand_matrix(4, 4) op = Operator(mat) self.assertEqual(-op, Operator(-1", "= Operator(mat, input_dims=[4, 5], output_dims=[2, 3, 4]) assert_allclose(op.data, mat) self.assertEqual(op.dim,", "\"\"\"Test initialization from a circuit.\"\"\" # Test tensor product of", "1, 0] targ = np.dot(np.kron(mat_a, np.kron(mat_b, mat_c)), mat) self.assertEqual(op.compose(op3, qargs=[2,", "mat_a))) self.assertEqual(op.dot(op3, qargs=[0, 1, 2]), Operator(targ)) self.assertEqual(op * op3([0, 1,", "np.kron(y90, np.kron(self.UX, self.UH)) global_phase_equivalent = matrix_equal( op.data, target, ignore_phase=True) self.assertTrue(global_phase_equivalent)", "real: return rng.rand(rows, cols) return rng.rand(rows, cols) + 1j *", "= Operator(mat) assert_allclose(mat, op.data) def test_dim(self): \"\"\"Test Operator dim property.\"\"\"", "-1]) UH = np.array([[1, 1], [1, -1]]) / np.sqrt(2) @classmethod", "Operator(self.UY).compose(Operator(self.UX), front=True) matYX = np.dot(self.UY, self.UX) self.assertEqual(opYX, Operator(matYX)) opXY =", "target = gate.to_matrix() global_phase_equivalent = matrix_equal(op, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) gate", "copy\"): orig = Operator(mat) cpy = orig.copy() cpy._data[0, 0] =", "cols = rows if real: return rng.rand(rows, cols) return rng.rand(rows,", "self.assertEqual(op.dot(op1, qargs=[2]), Operator(targ)) self.assertEqual(op * op1([2]), Operator(targ)) def test_compose_front_subsystem(self): \"\"\"Test", "mat) self.assertEqual(op.dim, (8, 8)) self.assertEqual(op.input_dims(), (2, 2, 2)) self.assertEqual(op.output_dims(), (2,", "Operator(gate).data had = HGate().to_matrix() target = np.kron(had, np.diag([0, 1])) +", "Operator(targ)) self.assertEqual(op * op2([0, 1]), Operator(targ)) # op2 qargs=[2, 0]", "2], front=True), Operator(targ)) # op3 qargs=[2, 1, 0] targ =", "1, 0]), Operator(targ)) self.assertEqual(op * op3([2, 1, 0]), Operator(targ)) #", "= rng.rand(n) + 1j * rng.rand(n) rho = np.outer(psi, psi.conj())", "- op2, Operator(mat1 - mat2)) def test_add_except(self): \"\"\"Test add method", "assert_allclose(op12.data, Operator(mat12).data) def test_tensor(self): \"\"\"Test tensor method.\"\"\" mat1 = self.UX", "qargs=[2], front=True), Operator(targ)) def test_power(self): \"\"\"Test power method.\"\"\" X90 =", "op12 = Operator(mat2).expand(Operator(mat1)) self.assertEqual(op12.dim, (6, 6)) assert_allclose(op12.data, Operator(mat12).data) def test_tensor(self):", "root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.", "matrix.\"\"\" seed = np.random.randint(0, np.iinfo(np.int32).max) logger.debug(\"rand_matrix RandomState seeded with seed=%s\",", "Operator(targ)) # op3 qargs=[2, 1, 0] targ = np.dot(np.kron(mat_a, np.kron(mat_b,", "2)) self.assertEqual(reshaped1.output_dims(), (8,)) self.assertEqual(reshaped1.input_dims(), (8,)) self.assertEqual(reshaped2.output_dims(), (2, 4)) self.assertEqual(reshaped2.input_dims(), (4,", "raises exceptions.\"\"\" op = Operator(self.rand_matrix(3, 3)) # Non-integer power raises", "op3 qargs=[0, 1, 2] targ = np.dot(np.kron(mat_c, np.kron(mat_b, mat_a)), mat)", "0] targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b))) self.assertEqual(op.dot(op2, qargs=[2, 0]),", "RandomState seeded with seed=%s\", seed) rng = np.random.RandomState(seed) if cols", "cols is None: cols = rows if real: return rng.rand(rows,", "op._multiply, 's') self.assertRaises(QiskitError, op.__rmul__, 's') self.assertRaises(QiskitError, op._multiply, op) self.assertRaises(QiskitError, op.__rmul__,", "* 5, 2 * 3 * 4)) self.assertEqual(op.input_dims(), (4, 5))", "2) op = Operator(mat) assert_allclose(mat, op.data) def test_dim(self): \"\"\"Test Operator", "op3 qargs=[0, 1, 2] targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a)))", "2]).dim, (4, 4)) def test_input_dims(self): \"\"\"Test Operator input_dims method.\"\"\" op", "output_dims=[2, 3, 4]) self.assertEqual(op.output_dims(), (2, 3, 4)) self.assertEqual(op.output_dims(qargs=[0, 1, 2]),", "qargs=[0, 1, 2]), Operator(targ)) self.assertEqual(op * op3([0, 1, 2]), Operator(targ))", "self.assertEqual(op.dot(op3, qargs=[0, 1, 2]), Operator(targ)) self.assertEqual(op * op3([0, 1, 2]),", "4)) self.assertEqual(op.output_dims(qargs=[2, 0]), (4, 2)) def test_reshape(self): \"\"\"Test Operator reshape", "6)) assert_allclose(op12.data, Operator(mat12).data) def test_tensor(self): \"\"\"Test tensor method.\"\"\" mat1 =", "compose method.\"\"\" opYX = Operator(self.UY).compose(Operator(self.UX), front=True) matYX = np.dot(self.UY, self.UX)", "input_dims method.\"\"\" op = Operator(self.rand_matrix(2 * 3 * 4, 4", "matrix_equal( op.data, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) def test_instruction_init(self): \"\"\"Test initialization from", "simple_circuit_with_measure(self): \"\"\"Return a unitary circuit with measurement.\"\"\" qr = QuantumRegister(2)", "np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2)))) self.assertEqual(op.dot(op1, qargs=[1]), Operator(targ)) self.assertEqual(op * op1([1]),", "(2, 2, 2)) self.assertEqual(op.output_dims(), (2, 2, 2)) op = Operator(mat,", "Operator(targ)) # op1 qargs=[0] targ = np.dot(mat, np.kron(np.eye(4), mat_a)) self.assertEqual(op.compose(op1,", "Operator(mat2).expand(Operator(mat1)) self.assertEqual(op12.dim, (6, 6)) assert_allclose(op12.data, Operator(mat12).data) def test_tensor(self): \"\"\"Test tensor", "3)) self.assertRaises(QiskitError, op1._add, op2) def test_multiply(self): \"\"\"Test multiply method.\"\"\" mat", "\"\"\"Test multiply method.\"\"\" mat = self.rand_matrix(4, 4) val = np.exp(5j)", "self.UH)) global_phase_equivalent = matrix_equal( op.data, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) # Test", "(4, 2, 3)) self.assertEqual(op.output_dims(qargs=[0]), (2,)) self.assertEqual(op.output_dims(qargs=[1]), (3,)) self.assertEqual(op.output_dims(qargs=[2]), (4,)) self.assertEqual(op.output_dims(qargs=[0,", "self.assertRaises(QiskitError, Operator(np.eye(2)).compose, Operator(np.eye(3))) self.assertRaises(QiskitError, Operator(np.eye(2)).compose, 2) def test_compose(self): \"\"\"Test compose", "= Operator(mat1).tensor(Operator(mat2)) self.assertEqual(op12.dim, (6, 6)) assert_allclose(op12.data, Operator(mat12).data) def test_power_except(self): \"\"\"Test", "for Operator linear operator class.\"\"\" def test_init_array_qubit(self): \"\"\"Test subsystem initialization", "[1, 1]]) target = np.kron(y90, np.kron(self.UX, self.UH)) global_phase_equivalent = matrix_equal(", "1, 2] targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a))) self.assertEqual(op.dot(op3, qargs=[0,", "import copy import numpy as np from numpy.testing import assert_allclose", "2]), Operator(targ)) self.assertEqual(op.compose(op3([0, 1, 2])), Operator(targ)) self.assertEqual(op @ op3([0, 1,", "self.assertTrue(op.equiv(phase * mat)) self.assertTrue(op.equiv(Operator(phase * mat))) self.assertFalse(op.equiv(2 * mat)) if", "= np.eye(2) UX = np.array([[0, 1], [1, 0]]) UY =", "This code is part of Qiskit. # # (C) Copyright", "licensed under the Apache License, Version 2.0. You may #", "= np.dot(mat, np.kron(np.eye(4), mat_a)) self.assertEqual(op.dot(op1, qargs=[0]), Operator(targ)) self.assertEqual(op * op1([0]),", "self.assertEqual(op.compose(op1, qargs=[2], front=True), Operator(targ)) def test_power(self): \"\"\"Test power method.\"\"\" X90", "targ) def test_compose_front(self): \"\"\"Test front compose method.\"\"\" opYX = Operator(self.UY).compose(Operator(self.UX),", "op.data, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) # Test decomposition of Controlled-u1 gate", "np.eye(2))), mat) self.assertEqual(op.compose(op1, qargs=[1]), Operator(targ)) self.assertEqual(op @ op1([1]), Operator(targ)) #", "front compose method.\"\"\" opYX = Operator(self.UY).compose(Operator(self.UX), front=True) matYX = np.dot(self.UY,", "= self.UX mat2 = np.eye(3, dtype=complex) mat21 = np.kron(mat2, mat1)", "2], output_dims=[2, 2]).dim, (4, 4)) def test_input_dims(self): \"\"\"Test Operator input_dims", "product of 1-qubit gates circuit = QuantumCircuit(3) circuit.h(0) circuit.x(1) circuit.ry(np.pi", "\"\"\"Test adjoint method.\"\"\" matr = self.rand_matrix(2, 4, real=True) mati =", "(8,)) self.assertEqual(reshaped1.input_dims(), (8,)) self.assertEqual(reshaped2.output_dims(), (2, 4)) self.assertEqual(reshaped2.input_dims(), (4, 2)) def", "as la from qiskit import QiskitError from qiskit import QuantumRegister,", "self.assertEqual(op @ op1([0]), Operator(targ)) # op1 qargs=[1] targ = np.dot(np.kron(np.eye(2),", "rng = np.random.RandomState(seed) if cols is None: cols = rows", "circ.x(qr[1]) circ.measure(qr, cr) return circ class TestOperator(OperatorTestCase): \"\"\"Tests for Operator", "3)) self.assertEqual(op.input_dims(), (3,)) self.assertEqual(op.output_dims(), (3,)) mat = self.rand_matrix(2 * 3", "return circ, target def simple_circuit_with_measure(self): \"\"\"Return a unitary circuit with", "dtype=complex)), Operator(mat)) mat = self.rand_matrix(4, 4) self.assertEqual(Operator(mat.tolist()), Operator(mat)) def test_data(self):", "op = Operator(mat) self.assertEqual(-op, Operator(-1 * mat)) def test_equiv(self): \"\"\"Test", "self.assertEqual(op * op1([0]), Operator(targ)) # op1 qargs=[1] targ = np.dot(mat,", "1, 2])), Operator(targ)) self.assertEqual(op @ op3([0, 1, 2]), Operator(targ)) #", "op1 qargs=[0] targ = np.dot(mat, np.kron(np.eye(4), mat_a)) self.assertEqual(op.compose(op1, qargs=[0], front=True),", "of this license in the LICENSE.txt file in the root", "= Operator(matr + 1j * mati) uni_conj = op.conjugate() self.assertEqual(uni_conj,", "self.assertEqual(op12.dim, (6, 6)) assert_allclose(op12.data, Operator(mat12).data) def test_tensor(self): \"\"\"Test tensor method.\"\"\"", "numpy.testing import assert_allclose import scipy.linalg as la from qiskit import", "0]), (5, 4)) self.assertEqual(op.input_dims(qargs=[0]), (4,)) self.assertEqual(op.input_dims(qargs=[1]), (5,)) def test_output_dims(self): \"\"\"Test", "a circuit.\"\"\" gate = CXGate() op = Operator(gate).data target =", "2)) self.assertRaises(QiskitError, op._multiply, 's') self.assertRaises(QiskitError, op.__rmul__, 's') self.assertRaises(QiskitError, op._multiply, op)", "= op.reshape(input_dims=[8], output_dims=[8]) reshaped2 = op.reshape(input_dims=[4, 2], output_dims=[2, 4]) self.assertEqual(op.output_dims(),", "\"\"\"Test compose different dimension exception\"\"\" self.assertRaises(QiskitError, Operator(np.eye(2)).compose, Operator(np.eye(3))) self.assertRaises(QiskitError, Operator(np.eye(2)).compose,", "op2, Operator(mat1 + mat2)) self.assertEqual(op1 - op2, Operator(mat1 - mat2))", "front compose method.\"\"\" # 3-qubit operator mat = self.rand_matrix(8, 8)", "= la.expm(-1j * 0.5 * np.pi * np.array([[0, 1], [1,", "op.__rmul__, op) def test_negate(self): \"\"\"Test negate method\"\"\" mat = self.rand_matrix(4,", "= ClassicalRegister(2) circ = QuantumCircuit(qr, cr) circ.h(qr[0]) circ.x(qr[1]) circ.measure(qr, cr)", "np.dot(self.UX, self.UY) self.assertEqual(opXY, Operator(matXY)) def test_compose_subsystem(self): \"\"\"Test subsystem compose method.\"\"\"", "np.kron(np.eye(2), mat_b))) self.assertEqual(op.compose(op2, qargs=[2, 0], front=True), Operator(targ)) # op1 qargs=[0]", "Operator(np.kron(y90, np.kron(self.UX, self.UH))) return circ, target def simple_circuit_with_measure(self): \"\"\"Return a", "np.eye(2) with self.subTest(\"Deep copy\"): orig = Operator(mat) cpy = orig.copy()", "op1([1]), Operator(targ)) # op1 qargs=[2] targ = np.dot(np.kron(mat_a, np.eye(4)), mat)", "raises exceptions.\"\"\" op = Operator(self.rand_matrix(2, 2)) self.assertRaises(QiskitError, op._multiply, 's') self.assertRaises(QiskitError,", "circ.h(qr[0]) circ.x(qr[1]) circ.ry(np.pi / 2, qr[2]) y90 = (1 /", "* op2([0, 1]), Operator(targ)) # op2 qargs=[2, 0] targ =", "= np.diag([1, 1, 1, np.exp(1j * lam)]) global_phase_equivalent = matrix_equal(", "http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this", "0.5 * np.pi * np.array([[0, 1], [1, 0]]) / 2)", "Operator(self.rand_matrix(2 * 3 * 4, 4 * 5), input_dims=[4, 5],", "op2([0, 1]), Operator(targ)) # op2 qargs=[2, 0] targ = np.dot(np.kron(mat_a,", "self.assertEqual(op2.compose(op1), targ) self.assertEqual(op2 @ op1, targ) def test_dot(self): \"\"\"Test dot", "\"\"\"Test negate method\"\"\" mat = np.diag([1, np.exp(1j * np.pi /", "mat_b = self.rand_matrix(2, 2) mat_c = self.rand_matrix(2, 2) op =", "@ op2, targ) targ = Operator(np.dot(self.UX, self.UY)) self.assertEqual(op2.compose(op1), targ) self.assertEqual(op2", "mat2 = self.rand_matrix(4, 4) op1 = Operator(mat1) op2 = Operator(mat2)", "= (1 / np.sqrt(2)) * np.array([[1, -1], [1, 1]]) target", "circuit.h(0) circuit.x(1) circuit.ry(np.pi / 2, 2) op = Operator(circuit) y90", "= np.array([[1, 1], [1, -1]]) / np.sqrt(2) @classmethod def rand_rho(cls,", "np.kron(mat2, mat1) op21 = Operator(mat1).expand(Operator(mat2)) self.assertEqual(op21.dim, (6, 6)) assert_allclose(op21.data, Operator(mat21).data)", "real=False): \"\"\"Return a random matrix.\"\"\" seed = np.random.randint(0, np.iinfo(np.int32).max) logger.debug(\"rand_matrix", "QuantumCircuit from qiskit.extensions.standard import HGate, CHGate, CXGate from qiskit.test import", "= Operator(self.rand_matrix(3, 3)) # Non-integer power raises error self.assertRaises(QiskitError, op.power,", "np.pi * np.array([[0, 1], [1, 0]]) / 2) self.assertTrue(Operator(X90).is_unitary()) #", "this code must retain this # copyright notice, and modified", "0], [0, 0]]).is_unitary()) def test_to_operator(self): \"\"\"Test to_operator method.\"\"\" op1 =", "clone._data[0, 0] = 0.0 self.assertTrue(clone == orig) def test_is_unitary(self): \"\"\"Test", "# copyright notice, and modified files need to carry a", "np.array([[0, -1j], [1j, 0]]) UZ = np.diag([1, -1]) UH =", "def test_reshape(self): \"\"\"Test Operator reshape method.\"\"\" op = Operator(self.rand_matrix(8, 8))", "test_compose_front_subsystem(self): \"\"\"Test subsystem front compose method.\"\"\" # 3-qubit operator mat", "[0, 0]]).is_unitary()) def test_to_operator(self): \"\"\"Test to_operator method.\"\"\" op1 = Operator(self.rand_matrix(4,", "of Controlled-u1 gate lam = np.pi / 4 circuit =", "import matrix_equal logger = logging.getLogger(__name__) class OperatorTestCase(QiskitTestCase): \"\"\"Test utils for", "output_dims=[8]) reshaped2 = op.reshape(input_dims=[4, 2], output_dims=[2, 4]) self.assertEqual(op.output_dims(), (2, 2,", "self.assertEqual(op.output_dims(), (2, 2, 2)) self.assertEqual(op.input_dims(), (2, 2, 2)) self.assertEqual(reshaped1.output_dims(), (8,))", "= Operator(self.UX) targ = Operator(np.dot(self.UY, self.UX)) self.assertEqual(op1.dot(op2), targ) self.assertEqual(op1 *", "mat_b))) self.assertEqual(op.compose(op2, qargs=[2, 0], front=True), Operator(targ)) # op1 qargs=[0] targ", "rand_matrix(cls, rows, cols=None, real=False): \"\"\"Return a random matrix.\"\"\" seed =", "= np.pi / 4 circuit = QuantumCircuit(2) circuit.cu1(lam, 0, 1)", "1, 2]), (2, 3, 4)) self.assertEqual(op.output_dims(qargs=[2, 1, 0]), (4, 3,", "np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2)))) self.assertEqual(op.compose(op1, qargs=[1], front=True), Operator(targ)) # op1", "np.array([[0, 1], [1, 0]]) / 2) self.assertTrue(Operator(X90).is_unitary()) # Non-unitary should", "1]), Operator(targ)) self.assertEqual(op @ op2([0, 1]), Operator(targ)) # op2 qargs=[2,", "@classmethod def rand_matrix(cls, rows, cols=None, real=False): \"\"\"Return a random matrix.\"\"\"", "mat = self.rand_matrix(8, 8) op = Operator(mat) assert_allclose(op.data, mat) self.assertEqual(op.dim,", "utils for Operator\"\"\" # Pauli-matrix unitaries UI = np.eye(2) UX", "np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b))) self.assertEqual(op.dot(op2, qargs=[2, 0]), Operator(targ)) self.assertEqual(op *", "have been altered from the originals. # pylint: disable=invalid-name \"\"\"Tests", "targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b))) self.assertEqual(op.dot(op2, qargs=[2, 0]), Operator(targ))", "1) op = Operator(circuit) target = np.diag([1, 1, 1, np.exp(1j", "qargs=[0, 1, 2] targ = np.dot(np.kron(mat_c, np.kron(mat_b, mat_a)), mat) self.assertEqual(op.compose(op3,", "seed) rng = np.random.RandomState(seed) psi = rng.rand(n) + 1j *", "# This code is part of Qiskit. # # (C)", "= np.eye(3) op = Operator(mat) assert_allclose(op.data, mat) self.assertEqual(op.dim, (3, 3))", "\"\"\"Tests for Operator matrix linear operator class.\"\"\" import unittest import", "qargs=[2, 1, 0] targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c))) self.assertEqual(op.dot(op3,", "qargs=[0]), Operator(targ)) self.assertEqual(op * op1([0]), Operator(targ)) # op1 qargs=[1] targ", "(4, 5)) self.assertEqual(op.input_dims(qargs=[0, 1]), (4, 5)) self.assertEqual(op.input_dims(qargs=[1, 0]), (5, 4))", "assert_allclose(op12.data, Operator(mat12).data) def test_power_except(self): \"\"\"Test power method raises exceptions.\"\"\" op", "Test decomposition of controlled-H gate circuit = QuantumCircuit(2) circuit.ch(0, 1)", "circ class TestOperator(OperatorTestCase): \"\"\"Tests for Operator linear operator class.\"\"\" def", "def test_init_array_except(self): \"\"\"Test initialization exception from array.\"\"\" mat = self.rand_matrix(4,", "'s') self.assertRaises(QiskitError, op.__rmul__, 's') self.assertRaises(QiskitError, op._multiply, op) self.assertRaises(QiskitError, op.__rmul__, op)", "np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a))) self.assertEqual(op.dot(op2, qargs=[0, 1]), Operator(targ)) self.assertEqual(op *", "@ op1([2]), Operator(targ)) def test_dot_subsystem(self): \"\"\"Test subsystem dot method.\"\"\" #", "+ 1j * mati) uni_adj = op.adjoint() self.assertEqual(uni_adj, Operator(matr.T -", "= matrix_equal(op, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) def test_circuit_init_except(self): \"\"\"Test initialization from", "4 * 5) op = Operator(mat, input_dims=[4, 5], output_dims=[2, 3,", "Test tensor product of 1-qubit gates circuit = QuantumCircuit(3) circuit.h(0)", "(4,)) self.assertEqual(op.input_dims(qargs=[1]), (5,)) def test_output_dims(self): \"\"\"Test Operator output_dims method.\"\"\" op", "= Operator(self.UX) op2 = Operator(self.UY) targ = Operator(np.dot(self.UY, self.UX)) self.assertEqual(op1.compose(op2),", "(4, 4)) def test_input_dims(self): \"\"\"Test Operator input_dims method.\"\"\" op =", "self.assertEqual(op.output_dims(qargs=[1]), (3,)) self.assertEqual(op.output_dims(qargs=[2]), (4,)) self.assertEqual(op.output_dims(qargs=[0, 2]), (2, 4)) self.assertEqual(op.output_dims(qargs=[2, 0]),", "\"\"\"Test subsystem front compose method.\"\"\" # 3-qubit operator mat =", "8)) self.assertEqual(op.input_dims(), (2, 2, 2)) self.assertEqual(op.output_dims(), (2, 2, 2)) op", "\"\"\"Test Operator representation string property.\"\"\" mat = self.rand_matrix(2, 2) op", "mat, input_dims=5) def test_init_operator(self): \"\"\"Test initialization from Operator.\"\"\" op1 =", "qargs=[0] targ = np.dot(mat, np.kron(np.eye(4), mat_a)) self.assertEqual(op.dot(op1, qargs=[0]), Operator(targ)) self.assertEqual(op", "with self.subTest(\"Shallow copy\"): orig = Operator(mat) clone = copy.copy(orig) clone._data[0,", "== orig) def test_is_unitary(self): \"\"\"Test is_unitary method.\"\"\" # X-90 rotation", "qargs=[0, 1] targ = np.dot(np.kron(np.eye(2), np.kron(mat_b, mat_a)), mat) self.assertEqual(op.compose(op2, qargs=[0,", "4)) self.assertEqual(Operator(mat, input_dims=[4], output_dims=[4]).dim, (4, 4)) self.assertEqual(Operator(mat, input_dims=[2, 2], output_dims=[2,", "from circuit with measure raises exception.\"\"\" circuit = self.simple_circuit_with_measure() self.assertRaises(QiskitError,", "op1 = Operator(mat1) op2 = Operator(mat2) self.assertEqual(op1._add(op2), Operator(mat1 + mat2))", "np.diag([0, 1])) global_phase_equivalent = matrix_equal( op.data, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) def", "= self.rand_matrix(2, 2) mat_c = self.rand_matrix(2, 2) op = Operator(mat)", "def simple_circuit_no_measure(self): \"\"\"Return a unitary circuit and the corresponding unitary", "= self.rand_matrix(4, 4) self.assertEqual(Operator(mat).dim, (4, 4)) self.assertEqual(Operator(mat, input_dims=[4], output_dims=[4]).dim, (4,", "assert_allclose(op21.data, Operator(mat21).data) mat12 = np.kron(mat1, mat2) op12 = Operator(mat2).expand(Operator(mat1)) self.assertEqual(op12.dim,", "0] targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b))) self.assertEqual(op.compose(op2, qargs=[2, 0],", "or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works", "\"\"\"Test __eq__ method\"\"\" mat = self.rand_matrix(2, 2, real=True) self.assertEqual(Operator(np.array(mat, dtype=complex)),", "* lam)]) global_phase_equivalent = matrix_equal( op.data, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) #", "np.kron(mat2, mat1) op21 = Operator(mat2).tensor(Operator(mat1)) self.assertEqual(op21.dim, (6, 6)) assert_allclose(op21.data, Operator(mat21).data)", "mat2)) self.assertEqual(op1 - op2, Operator(mat1 - mat2)) def test_add_except(self): \"\"\"Test", "circuit) def test_equal(self): \"\"\"Test __eq__ method\"\"\" mat = self.rand_matrix(2, 2,", "test_output_dims(self): \"\"\"Test Operator output_dims method.\"\"\" op = Operator(self.rand_matrix(2 * 3", "= np.random.RandomState(seed) if cols is None: cols = rows if", "= Operator(mat1).expand(Operator(mat2)) self.assertEqual(op21.dim, (6, 6)) assert_allclose(op21.data, Operator(mat21).data) mat12 = np.kron(mat1,", "op.reshape(input_dims=[4, 2], output_dims=[2, 4]) self.assertEqual(op.output_dims(), (2, 2, 2)) self.assertEqual(op.input_dims(), (2,", "3, 4)) self.assertEqual(op.output_dims(qargs=[2, 1, 0]), (4, 3, 2)) self.assertEqual(op.output_dims(qargs=[2, 0,", "Operator(mat) assert_allclose(op.data, mat) self.assertEqual(op.dim, (8, 8)) self.assertEqual(op.input_dims(), (2, 2, 2))", "= op.adjoint() self.assertEqual(uni_adj, Operator(matr.T - 1j * mati.T)) def test_compose_except(self):", "(2, 3, 4)) self.assertEqual(op.output_dims(qargs=[0, 1, 2]), (2, 3, 4)) self.assertEqual(op.output_dims(qargs=[2,", "self.assertEqual(op.output_dims(), (2, 3, 4)) def test_init_array_except(self): \"\"\"Test initialization exception from", "1], [1, 0]]) / 2) op = Operator(X90) self.assertEqual(op.power(2), Operator([[0,", "op21 = Operator(mat1).expand(Operator(mat2)) self.assertEqual(op21.dim, (6, 6)) assert_allclose(op21.data, Operator(mat21).data) mat12 =", "cr) circ.h(qr[0]) circ.x(qr[1]) circ.measure(qr, cr) return circ class TestOperator(OperatorTestCase): \"\"\"Tests", "circuit = self.simple_circuit_with_measure() self.assertRaises(QiskitError, Operator, circuit) def test_equal(self): \"\"\"Test __eq__", "exception from array.\"\"\" mat = self.rand_matrix(4, 4) self.assertRaises(QiskitError, Operator, mat,", "Operator(mat) self.assertEqual(-op, Operator(-1 * mat)) def test_equiv(self): \"\"\"Test negate method\"\"\"", "QuantumRegister(3) circ = QuantumCircuit(qr) circ.h(qr[0]) circ.x(qr[1]) circ.ry(np.pi / 2, qr[2])", "self.assertEqual(op.output_dims(qargs=[2]), (4,)) self.assertEqual(op.output_dims(qargs=[0, 2]), (2, 4)) self.assertEqual(op.output_dims(qargs=[2, 0]), (4, 2))", "mat_a)) op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a))) # op3 qargs=[0, 1,", "2]), Operator(targ)) self.assertEqual(op * op3([0, 1, 2]), Operator(targ)) # op3", "Operator(targ)) # op2 qargs=[2, 0] targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2),", "self.assertEqual(Operator(mat, input_dims=[2, 2], output_dims=[2, 2]).dim, (4, 4)) def test_input_dims(self): \"\"\"Test", "\"\"\"Test Operator dim property.\"\"\" mat = self.rand_matrix(4, 4) self.assertEqual(Operator(mat).dim, (4,", "mat_a)), mat) self.assertEqual(op.compose(op2, qargs=[0, 1]), Operator(targ)) self.assertEqual(op @ op2([0, 1]),", "self.rand_matrix(8, 8) mat_a = self.rand_matrix(2, 2) mat_b = self.rand_matrix(2, 2)", "def test_multiply_except(self): \"\"\"Test multiply method raises exceptions.\"\"\" op = Operator(self.rand_matrix(2,", "1]), Operator(targ)) # op2 qargs=[2, 0] targ = np.dot(mat, np.kron(mat_a,", "targ) targ = Operator(np.dot(self.UX, self.UY)) self.assertEqual(op2.compose(op1), targ) self.assertEqual(op2 @ op1,", "Operator(val * mat)) self.assertEqual(val * op, Operator(val * mat)) def", "targ = np.dot(np.kron(np.eye(2), np.kron(mat_a, np.eye(2))), mat) self.assertEqual(op.compose(op1, qargs=[1]), Operator(targ)) self.assertEqual(op", "Operator(self.UX) targ = Operator(np.dot(self.UY, self.UX)) self.assertEqual(op1.dot(op2), targ) self.assertEqual(op1 * op2,", "self.assertEqual(uni_t, Operator(matr.T + 1j * mati.T)) def test_adjoint(self): \"\"\"Test adjoint", "op3([2, 1, 0]), Operator(targ)) # op2 qargs=[0, 1] targ =", "qargs=[1]), Operator(targ)) self.assertEqual(op @ op1([1]), Operator(targ)) # op1 qargs=[2] targ", "= matrix_equal( op.data, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) # Test decomposition of", "CXGate() op = Operator(gate).data target = gate.to_matrix() global_phase_equivalent = matrix_equal(op,", "altered from the originals. # pylint: disable=invalid-name \"\"\"Tests for Operator", "2)) def test_init_array(self): \"\"\"Test initialization from array.\"\"\" mat = np.eye(3)", "pylint: disable=invalid-name \"\"\"Tests for Operator matrix linear operator class.\"\"\" import", "# Test decomposition of controlled-H gate circuit = QuantumCircuit(2) circuit.ch(0,", "op = Operator(mat, input_dims=[4, 5], output_dims=[2, 3, 4]) assert_allclose(op.data, mat)", "mati.T)) def test_adjoint(self): \"\"\"Test adjoint method.\"\"\" matr = self.rand_matrix(2, 4,", "Operator(self.rand_matrix(3, 3)) # Non-integer power raises error self.assertRaises(QiskitError, op.power, 0.5)", "= np.dot(np.kron(mat_a, np.kron(mat_b, mat_c)), mat) self.assertEqual(op.compose(op3, qargs=[2, 1, 0]), Operator(targ))", "method\"\"\" mat = self.rand_matrix(2, 2, real=True) self.assertEqual(Operator(np.array(mat, dtype=complex)), Operator(mat)) mat", "= np.kron(mat2, mat1) op21 = Operator(mat2).tensor(Operator(mat1)) self.assertEqual(op21.dim, (6, 6)) assert_allclose(op21.data,", "self.UH))) return circ, target def simple_circuit_with_measure(self): \"\"\"Return a unitary circuit", "test_dim(self): \"\"\"Test Operator dim property.\"\"\" mat = self.rand_matrix(4, 4) self.assertEqual(Operator(mat).dim,", "import Operator from qiskit.quantum_info.operators.predicates import matrix_equal logger = logging.getLogger(__name__) class", "op = Operator(gate).data target = gate.to_matrix() global_phase_equivalent = matrix_equal(op, target,", "Operator(mat2).tensor(Operator(mat1)) self.assertEqual(op21.dim, (6, 6)) assert_allclose(op21.data, Operator(mat21).data) mat12 = np.kron(mat1, mat2)", "4) self.assertEqual(Operator(mat.tolist()), Operator(mat)) def test_data(self): \"\"\"Test Operator representation string property.\"\"\"", "np.diag([1, 0])) global_phase_equivalent = matrix_equal(op, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) def test_circuit_init_except(self):", "self.assertEqual(op._multiply(val), Operator(val * mat)) self.assertEqual(val * op, Operator(val * mat))", "Operator from qiskit.quantum_info.operators.predicates import matrix_equal logger = logging.getLogger(__name__) class OperatorTestCase(QiskitTestCase):", "* op1([1]), Operator(targ)) # op1 qargs=[2] targ = np.dot(mat, np.kron(mat_a,", "self.assertEqual(op1, op2) def test_conjugate(self): \"\"\"Test conjugate method.\"\"\" matr = self.rand_matrix(2,", "Operator(matYX)) opXY = Operator(self.UX).compose(Operator(self.UY), front=True) matXY = np.dot(self.UX, self.UY) self.assertEqual(opXY,", "Operator(-1 * np.eye(2))) self.assertEqual(op.power(8), Operator(np.eye(2))) def test_expand(self): \"\"\"Test expand method.\"\"\"", "qargs=[2, 1, 0] targ = np.dot(np.kron(mat_a, np.kron(mat_b, mat_c)), mat) self.assertEqual(op.compose(op3,", "1])) + np.kron( np.eye(2), np.diag([1, 0])) global_phase_equivalent = matrix_equal(op, target,", "/ 4) op = Operator(mat) self.assertTrue(op.equiv(phase * mat)) self.assertTrue(op.equiv(Operator(phase *", "orig = Operator(mat) cpy = orig.copy() cpy._data[0, 0] = 0.0", "# op3 qargs=[0, 1, 2] targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b,", "Operator(np.kron(mat_c, np.kron(mat_b, mat_a))) # op3 qargs=[0, 1, 2] targ =", "self.assertEqual(op.power(8), Operator(np.eye(2))) def test_expand(self): \"\"\"Test expand method.\"\"\" mat1 = self.UX", "exception.\"\"\" circuit = self.simple_circuit_with_measure() self.assertRaises(QiskitError, Operator, circuit) def test_equal(self): \"\"\"Test", "method\"\"\" mat = np.diag([1, np.exp(1j * np.pi / 2)]) phase", "Operator(targ)) # op2 qargs=[0, 1] targ = np.dot(np.kron(np.eye(2), np.kron(mat_b, mat_a)),", "np.kron(mat_c, np.kron(mat_b, mat_a))) self.assertEqual(op.compose(op3, qargs=[0, 1, 2], front=True), Operator(targ)) #", "test_input_dims(self): \"\"\"Test Operator input_dims method.\"\"\" op = Operator(self.rand_matrix(2 * 3", "qargs=[2, 1, 0]), Operator(targ)) self.assertEqual(op @ op3([2, 1, 0]), Operator(targ))", "CHGate() op = Operator(gate).data had = HGate().to_matrix() target = np.kron(had,", "decomposition of controlled-H gate circuit = QuantumCircuit(2) circuit.ch(0, 1) op", "2, real=True) self.assertEqual(Operator(np.array(mat, dtype=complex)), Operator(mat)) mat = self.rand_matrix(4, 4) self.assertEqual(Operator(mat.tolist()),", "ignore_phase=True) self.assertTrue(global_phase_equivalent) gate = CHGate() op = Operator(gate).data had =", "mat1) op21 = Operator(mat1).expand(Operator(mat2)) self.assertEqual(op21.dim, (6, 6)) assert_allclose(op21.data, Operator(mat21).data) mat12", "gates circuit = QuantumCircuit(3) circuit.h(0) circuit.x(1) circuit.ry(np.pi / 2, 2)", "= Operator(mat) self.assertEqual(op._multiply(val), Operator(val * mat)) self.assertEqual(val * op, Operator(val", "targ = Operator(np.dot(self.UY, self.UX)) self.assertEqual(op1.compose(op2), targ) self.assertEqual(op1 @ op2, targ)", "5) op = Operator(mat, input_dims=[4, 5], output_dims=[2, 3, 4]) assert_allclose(op.data,", "QuantumCircuit(qr, cr) circ.h(qr[0]) circ.x(qr[1]) circ.measure(qr, cr) return circ class TestOperator(OperatorTestCase):", "1, 0]), Operator(targ)) self.assertEqual(op @ op3([2, 1, 0]), Operator(targ)) #", "Operator(mat) clone = copy.copy(orig) clone._data[0, 0] = 0.0 self.assertTrue(clone ==", "qargs=[2, 0] targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b))) self.assertEqual(op.dot(op2, qargs=[2,", "RandomState seeded with seed=%s\", seed) rng = np.random.RandomState(seed) psi =", "qargs=[2, 1, 0]), Operator(targ)) self.assertEqual(op * op3([2, 1, 0]), Operator(targ))", "qargs=[2, 0] targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b))) self.assertEqual(op.compose(op2, qargs=[2,", "input_dims=[4, 5], output_dims=[2, 3, 4]) self.assertEqual(op.output_dims(), (2, 3, 4)) self.assertEqual(op.output_dims(qargs=[0,", "(C) Copyright IBM 2017, 2019. # # This code is", "matrix\"\"\" seed = np.random.randint(0, np.iinfo(np.int32).max) logger.debug(\"rand_rho RandomState seeded with seed=%s\",", "= self.rand_matrix(4, 4) self.assertEqual(Operator(mat.tolist()), Operator(mat)) def test_data(self): \"\"\"Test Operator representation", "4, real=True) mati = self.rand_matrix(2, 4, real=True) op = Operator(matr", "4)) self.assertEqual(op.input_dims(qargs=[0]), (4,)) self.assertEqual(op.input_dims(qargs=[1]), (5,)) def test_output_dims(self): \"\"\"Test Operator output_dims", "[1j, 0]]) UZ = np.diag([1, -1]) UH = np.array([[1, 1],", "= Operator(self.rand_matrix(8, 8)) reshaped1 = op.reshape(input_dims=[8], output_dims=[8]) reshaped2 = op.reshape(input_dims=[4,", "array.\"\"\" mat = np.eye(3) op = Operator(mat) assert_allclose(op.data, mat) self.assertEqual(op.dim,", "-1]]) / np.sqrt(2) @classmethod def rand_rho(cls, n): \"\"\"Return random density", "qargs=[0, 1, 2], front=True), Operator(targ)) # op3 qargs=[2, 1, 0]", "qargs=[0] targ = np.dot(mat, np.kron(np.eye(4), mat_a)) self.assertEqual(op.compose(op1, qargs=[0], front=True), Operator(targ))", "Operator output_dims method.\"\"\" op = Operator(self.rand_matrix(2 * 3 * 4,", "Operator, circuit) def test_equal(self): \"\"\"Test __eq__ method\"\"\" mat = self.rand_matrix(2,", "# op1 qargs=[0] targ = np.dot(mat, np.kron(np.eye(4), mat_a)) self.assertEqual(op.compose(op1, qargs=[0],", "from qiskit.extensions.standard import HGate, CHGate, CXGate from qiskit.test import QiskitTestCase", "* mat)) self.assertEqual(val * op, Operator(val * mat)) def test_multiply_except(self):", "np.diag([1, np.exp(1j * np.pi / 2)]) phase = np.exp(-1j *", "* 5), input_dims=[4, 5], output_dims=[2, 3, 4]) self.assertEqual(op.input_dims(), (4, 5))", "1])) global_phase_equivalent = matrix_equal( op.data, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) def test_instruction_init(self):", "1, 1, np.exp(1j * lam)]) global_phase_equivalent = matrix_equal( op.data, target,", "mat, input_dims=[2, 4]) self.assertRaises(QiskitError, Operator, mat, input_dims=5) def test_init_operator(self): \"\"\"Test", "op3 qargs=[2, 1, 0] targ = np.dot(np.kron(mat_a, np.kron(mat_b, mat_c)), mat)", "* 5), input_dims=[4, 5], output_dims=[2, 3, 4]) self.assertEqual(op.output_dims(), (2, 3,", "from N-qubit array.\"\"\" # Test automatic inference of qubit subsystems", "def test_to_operator(self): \"\"\"Test to_operator method.\"\"\" op1 = Operator(self.rand_matrix(4, 4)) op2", "method.\"\"\" op1 = Operator(self.rand_matrix(4, 4)) op2 = op1.to_operator() self.assertEqual(op1, op2)", "targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b))) self.assertEqual(op.compose(op2, qargs=[2, 0], front=True),", "method.\"\"\" matr = self.rand_matrix(2, 4, real=True) mati = self.rand_matrix(2, 4,", "self.assertEqual(op.compose(op1, qargs=[0], front=True), Operator(targ)) # op1 qargs=[1] targ = np.dot(mat,", "6)) assert_allclose(op21.data, Operator(mat21).data) mat12 = np.kron(mat1, mat2) op12 = Operator(mat2).expand(Operator(mat1))", "method.\"\"\" # 3-qubit operator mat = self.rand_matrix(8, 8) mat_a =", "np.dot(mat, np.kron(np.eye(4), mat_a)) self.assertEqual(op.dot(op1, qargs=[0]), Operator(targ)) self.assertEqual(op * op1([0]), Operator(targ))", "targ) self.assertEqual(op2 @ op1, targ) def test_dot(self): \"\"\"Test dot method.\"\"\"", "exception\"\"\" self.assertRaises(QiskitError, Operator(np.eye(2)).compose, Operator(np.eye(3))) self.assertRaises(QiskitError, Operator(np.eye(2)).compose, 2) def test_compose(self): \"\"\"Test", "test_compose_except(self): \"\"\"Test compose different dimension exception\"\"\" self.assertRaises(QiskitError, Operator(np.eye(2)).compose, Operator(np.eye(3))) self.assertRaises(QiskitError,", "y90 = (1 / np.sqrt(2)) * np.array([[1, -1], [1, 1]])", "def test_compose_except(self): \"\"\"Test compose different dimension exception\"\"\" self.assertRaises(QiskitError, Operator(np.eye(2)).compose, Operator(np.eye(3)))", "targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a))) self.assertEqual(op.dot(op3, qargs=[0, 1, 2]),", "self.assertEqual(uni_conj, Operator(matr - 1j * mati)) def test_transpose(self): \"\"\"Test transpose", "Operator(mat) assert_allclose(op.data, mat) self.assertEqual(op.dim, (3, 3)) self.assertEqual(op.input_dims(), (3,)) self.assertEqual(op.output_dims(), (3,))", "dimension exception\"\"\" self.assertRaises(QiskitError, Operator(np.eye(2)).compose, Operator(np.eye(3))) self.assertRaises(QiskitError, Operator(np.eye(2)).compose, 2) def test_compose(self):", "method.\"\"\" mat1 = self.UX mat2 = np.eye(3, dtype=complex) mat21 =", "copy\"): orig = Operator(mat) clone = copy.copy(orig) clone._data[0, 0] =", "def test_input_dims(self): \"\"\"Test Operator input_dims method.\"\"\" op = Operator(self.rand_matrix(2 *", "self.assertEqual(op1._add(op2), Operator(mat1 + mat2)) self.assertEqual(op1 + op2, Operator(mat1 + mat2))", "def test_dot_subsystem(self): \"\"\"Test subsystem dot method.\"\"\" # 3-qubit operator mat", "test_power_except(self): \"\"\"Test power method raises exceptions.\"\"\" op = Operator(self.rand_matrix(3, 3))", "circ.measure(qr, cr) return circ class TestOperator(OperatorTestCase): \"\"\"Tests for Operator linear", "qubit subsystems mat = self.rand_matrix(8, 8) op = Operator(mat) assert_allclose(op.data,", "np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a))) self.assertEqual(op.dot(op3, qargs=[0, 1, 2]), Operator(targ)) self.assertEqual(op", "mat = self.rand_matrix(4, 4) self.assertEqual(Operator(mat).dim, (4, 4)) self.assertEqual(Operator(mat, input_dims=[4], output_dims=[4]).dim,", "4)) self.assertEqual(op.input_dims(), (4, 5)) self.assertEqual(op.output_dims(), (2, 3, 4)) def test_init_array_except(self):", "Operator copy method\"\"\" mat = np.eye(2) with self.subTest(\"Deep copy\"): orig", "mat)) def test_multiply_except(self): \"\"\"Test multiply method raises exceptions.\"\"\" op =", "measurement.\"\"\" qr = QuantumRegister(2) cr = ClassicalRegister(2) circ = QuantumCircuit(qr,", "self.assertRaises(QiskitError, op.__rmul__, 's') self.assertRaises(QiskitError, op._multiply, op) self.assertRaises(QiskitError, op.__rmul__, op) def", "np.dot(mat, np.kron(np.eye(4), mat_a)) self.assertEqual(op.compose(op1, qargs=[0], front=True), Operator(targ)) # op1 qargs=[1]", "self.rand_matrix(4, 4) mat2 = self.rand_matrix(4, 4) op1 = Operator(mat1) op2", "test_circuit_init_except(self): \"\"\"Test initialization from circuit with measure raises exception.\"\"\" circuit", "/ 2)]) phase = np.exp(-1j * np.pi / 4) op", "= self.rand_matrix(4, 4) val = np.exp(5j) op = Operator(mat) self.assertEqual(op._multiply(val),", "test_conjugate(self): \"\"\"Test conjugate method.\"\"\" matr = self.rand_matrix(2, 4, real=True) mati", "self.assertEqual(op.input_dims(qargs=[1]), (5,)) def test_output_dims(self): \"\"\"Test Operator output_dims method.\"\"\" op =", "mat = np.diag([1, np.exp(1j * np.pi / 2)]) phase =", "self.assertEqual(op.compose(op2, qargs=[0, 1], front=True), Operator(targ)) # op2 qargs=[2, 0] targ", "target, ignore_phase=True) self.assertTrue(global_phase_equivalent) def test_instruction_init(self): \"\"\"Test initialization from a circuit.\"\"\"", "targ) self.assertEqual(op2 * op1, targ) def test_compose_front(self): \"\"\"Test front compose", "targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c))) self.assertEqual(op.compose(op3, qargs=[2, 1, 0],", "np.random.randint(0, np.iinfo(np.int32).max) logger.debug(\"rand_matrix RandomState seeded with seed=%s\", seed) rng =", "4 * 5), input_dims=[4, 5], output_dims=[2, 3, 4]) self.assertEqual(op.input_dims(), (4,", "Operator, mat, input_dims=[2, 4]) self.assertRaises(QiskitError, Operator, mat, input_dims=5) def test_init_operator(self):", "= np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a))) self.assertEqual(op.compose(op3, qargs=[0, 1, 2], front=True),", "np.kron(np.eye(4), mat_a)) self.assertEqual(op.compose(op1, qargs=[0], front=True), Operator(targ)) # op1 qargs=[1] targ", "np.eye(2))) self.assertEqual(op.power(8), Operator(np.eye(2))) def test_expand(self): \"\"\"Test expand method.\"\"\" mat1 =", "self.assertEqual(op * op1([1]), Operator(targ)) # op1 qargs=[2] targ = np.dot(mat,", "self.assertEqual(op.compose(op3, qargs=[0, 1, 2]), Operator(targ)) self.assertEqual(op.compose(op3([0, 1, 2])), Operator(targ)) self.assertEqual(op", "self.assertEqual(op.dim, (4 * 5, 2 * 3 * 4)) self.assertEqual(op.input_dims(),", "output_dims=[2, 3, 4]) self.assertEqual(op.input_dims(), (4, 5)) self.assertEqual(op.input_dims(qargs=[0, 1]), (4, 5))", "power raises error self.assertRaises(QiskitError, op.power, 0.5) def test_add(self): \"\"\"Test add", "should return false self.assertFalse(Operator([[1, 0], [0, 0]]).is_unitary()) def test_to_operator(self): \"\"\"Test", "= self.rand_matrix(4, 4) op = Operator(mat) self.assertEqual(-op, Operator(-1 * mat))", "= np.random.randint(0, np.iinfo(np.int32).max) logger.debug(\"rand_rho RandomState seeded with seed=%s\", seed) rng", "def test_equiv(self): \"\"\"Test negate method\"\"\" mat = np.diag([1, np.exp(1j *", "# Test tensor product of 1-qubit gates circuit = QuantumCircuit(3)", "front=True), Operator(targ)) # op1 qargs=[1] targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a,", "op12 = Operator(mat1).tensor(Operator(mat2)) self.assertEqual(op12.dim, (6, 6)) assert_allclose(op12.data, Operator(mat12).data) def test_power_except(self):", "= np.dot(mat, np.kron(mat_a, np.eye(4))) self.assertEqual(op.compose(op1, qargs=[2], front=True), Operator(targ)) def test_power(self):", "* 4, 4 * 5), input_dims=[4, 5], output_dims=[2, 3, 4])", "tensor product of 1-qubit gates circuit = QuantumCircuit(3) circuit.h(0) circuit.x(1)", "+ np.kron( self.UH, np.diag([0, 1])) global_phase_equivalent = matrix_equal( op.data, target,", "self.assertEqual(op * op2([2, 0]), Operator(targ)) # op1 qargs=[0] targ =", "1j * rng.rand(rows, cols) def simple_circuit_no_measure(self): \"\"\"Return a unitary circuit", "1, 2]), Operator(targ)) # op3 qargs=[2, 1, 0] targ =", "np.kron(mat_b, mat_c))) self.assertEqual(op.compose(op3, qargs=[2, 1, 0], front=True), Operator(targ)) # op2", "self.assertEqual(op.dot(op1, qargs=[1]), Operator(targ)) self.assertEqual(op * op1([1]), Operator(targ)) # op1 qargs=[2]", "= Operator(mat_a) op2 = Operator(np.kron(mat_b, mat_a)) op3 = Operator(np.kron(mat_c, np.kron(mat_b,", "5)) self.assertEqual(op.output_dims(), (2, 3, 4)) def test_init_array_except(self): \"\"\"Test initialization exception", "self.assertEqual(op1.dot(op2), targ) self.assertEqual(op1 * op2, targ) targ = Operator(np.dot(self.UX, self.UY))", "np.array([[1, -1], [1, 1]]) target = Operator(np.kron(y90, np.kron(self.UX, self.UH))) return", "# # This code is licensed under the Apache License,", "test_multiply(self): \"\"\"Test multiply method.\"\"\" mat = self.rand_matrix(4, 4) val =", "np.dot(self.UY, self.UX) self.assertEqual(opYX, Operator(matYX)) opXY = Operator(self.UX).compose(Operator(self.UY), front=True) matXY =", "with seed=%s\", seed) rng = np.random.RandomState(seed) if cols is None:", "test_circuit_init(self): \"\"\"Test initialization from a circuit.\"\"\" # Test tensor product", "target, ignore_phase=True) self.assertTrue(global_phase_equivalent) gate = CHGate() op = Operator(gate).data had", "qr[2]) y90 = (1 / np.sqrt(2)) * np.array([[1, -1], [1,", "target = Operator(np.kron(y90, np.kron(self.UX, self.UH))) return circ, target def simple_circuit_with_measure(self):", "np.diag([1, -1]) UH = np.array([[1, 1], [1, -1]]) / np.sqrt(2)", "= Operator(mat) op1 = Operator(mat_a) op2 = Operator(np.kron(mat_b, mat_a)) op3", "= self.rand_matrix(8, 8) op = Operator(mat) assert_allclose(op.data, mat) self.assertEqual(op.dim, (8,", "op2) def test_circuit_init(self): \"\"\"Test initialization from a circuit.\"\"\" # Test", "(3,)) self.assertEqual(op.output_dims(), (3,)) mat = self.rand_matrix(2 * 3 * 4,", "# op2 qargs=[2, 0] targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b)))", "np.kron( np.eye(2), np.diag([1, 0])) global_phase_equivalent = matrix_equal(op, target, ignore_phase=True) self.assertTrue(global_phase_equivalent)", "2, 2)) self.assertEqual(op.input_dims(), (2, 2, 2)) self.assertEqual(reshaped1.output_dims(), (8,)) self.assertEqual(reshaped1.input_dims(), (8,))", "op1.to_operator() self.assertEqual(op1, op2) def test_conjugate(self): \"\"\"Test conjugate method.\"\"\" matr =", "mat_a)), mat) self.assertEqual(op.compose(op3, qargs=[0, 1, 2]), Operator(targ)) self.assertEqual(op.compose(op3([0, 1, 2])),", "a unitary circuit with measurement.\"\"\" qr = QuantumRegister(2) cr =", "Operator(targ)) # op3 qargs=[2, 1, 0] targ = np.dot(mat, np.kron(mat_a,", "the root directory # of this source tree or at", "op = Operator(mat, input_dims=8, output_dims=8) assert_allclose(op.data, mat) self.assertEqual(op.dim, (8, 8))", "adjoint method.\"\"\" matr = self.rand_matrix(2, 4, real=True) mati = self.rand_matrix(2,", "qargs=[2, 0]), Operator(targ)) self.assertEqual(op @ op2([2, 0]), Operator(targ)) # op1", "1] targ = np.dot(np.kron(np.eye(2), np.kron(mat_b, mat_a)), mat) self.assertEqual(op.compose(op2, qargs=[0, 1]),", "\"\"\"Return a unitary circuit and the corresponding unitary array.\"\"\" qr", "= self.rand_matrix(2, 4, real=True) op = Operator(matr + 1j *", "op = Operator(mat) self.assertEqual(op._multiply(val), Operator(val * mat)) self.assertEqual(val * op,", "* 0.5 * np.pi * np.array([[0, 1], [1, 0]]) /", "X90 = la.expm(-1j * 0.5 * np.pi * np.array([[0, 1],", "matrix_equal( op.data, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) # Test decomposition of controlled-H", "matr = self.rand_matrix(2, 4, real=True) mati = self.rand_matrix(2, 4, real=True)", "* np.array([[1, -1], [1, 1]]) target = np.kron(y90, np.kron(self.UX, self.UH))", "self.assertEqual(reshaped1.input_dims(), (8,)) self.assertEqual(reshaped2.output_dims(), (2, 4)) self.assertEqual(reshaped2.input_dims(), (4, 2)) def test_copy(self):", "1, 2] targ = np.dot(np.kron(mat_c, np.kron(mat_b, mat_a)), mat) self.assertEqual(op.compose(op3, qargs=[0,", "np.kron(self.UX, self.UH)) global_phase_equivalent = matrix_equal( op.data, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) #", "np.kron(mat_b, mat_a))) # op3 qargs=[0, 1, 2] targ = np.dot(mat,", "# Pauli-matrix unitaries UI = np.eye(2) UX = np.array([[0, 1],", "* 3 * 4, 4 * 5) op = Operator(mat,", "self.assertEqual(op1.compose(op2), targ) self.assertEqual(op1 @ op2, targ) targ = Operator(np.dot(self.UX, self.UY))", "= np.diag([1, -1]) UH = np.array([[1, 1], [1, -1]]) /", "= Operator(mat) assert_allclose(op.data, mat) self.assertEqual(op.dim, (3, 3)) self.assertEqual(op.input_dims(), (3,)) self.assertEqual(op.output_dims(),", "= np.dot(np.kron(mat_a, np.kron(np.eye(2), mat_b)), mat) self.assertEqual(op.compose(op2, qargs=[2, 0]), Operator(targ)) self.assertEqual(op", "- mat2)) def test_add_except(self): \"\"\"Test add method raises exceptions.\"\"\" op1", "0])) global_phase_equivalent = matrix_equal(op, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) def test_circuit_init_except(self): \"\"\"Test", "@ op1, targ) def test_dot(self): \"\"\"Test dot method.\"\"\" op1 =", "front=True) matXY = np.dot(self.UX, self.UY) self.assertEqual(opXY, Operator(matXY)) def test_compose_subsystem(self): \"\"\"Test", "circ.x(qr[1]) circ.ry(np.pi / 2, qr[2]) y90 = (1 / np.sqrt(2))", "(8, 8)) self.assertEqual(op.input_dims(), (2, 2, 2)) self.assertEqual(op.output_dims(), (2, 2, 2))", "Operator(np.eye(2)).compose, 2) def test_compose(self): \"\"\"Test compose method.\"\"\" op1 = Operator(self.UX)", "np.kron(mat_b, mat_a))) self.assertEqual(op.dot(op3, qargs=[0, 1, 2]), Operator(targ)) self.assertEqual(op * op3([0,", "test_compose_front(self): \"\"\"Test front compose method.\"\"\" opYX = Operator(self.UY).compose(Operator(self.UX), front=True) matYX", "4) op = Operator(mat) self.assertTrue(op.equiv(phase * mat)) self.assertTrue(op.equiv(Operator(phase * mat)))", "target def simple_circuit_with_measure(self): \"\"\"Return a unitary circuit with measurement.\"\"\" qr", "string property.\"\"\" mat = self.rand_matrix(2, 2) op = Operator(mat) assert_allclose(mat,", "lam)]) global_phase_equivalent = matrix_equal( op.data, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) # Test", "= Operator(mat, input_dims=8, output_dims=8) assert_allclose(op.data, mat) self.assertEqual(op.dim, (8, 8)) self.assertEqual(op.input_dims(),", "def test_tensor(self): \"\"\"Test tensor method.\"\"\" mat1 = self.UX mat2 =", "seeded with seed=%s\", seed) rng = np.random.RandomState(seed) psi = rng.rand(n)", "= Operator(np.dot(self.UY, self.UX)) self.assertEqual(op1.compose(op2), targ) self.assertEqual(op1 @ op2, targ) targ", "self.assertEqual(op1 + op2, Operator(mat1 + mat2)) self.assertEqual(op1 - op2, Operator(mat1", "2) mat_c = self.rand_matrix(2, 2) op = Operator(mat) op1 =", "\"\"\"Test add method.\"\"\" mat1 = self.rand_matrix(4, 4) mat2 = self.rand_matrix(4,", "+ 1j * mati.T)) def test_adjoint(self): \"\"\"Test adjoint method.\"\"\" matr", "qargs=[2]), Operator(targ)) self.assertEqual(op * op1([2]), Operator(targ)) def test_compose_front_subsystem(self): \"\"\"Test subsystem", "(4,)) self.assertEqual(op.output_dims(qargs=[0, 2]), (2, 4)) self.assertEqual(op.output_dims(qargs=[2, 0]), (4, 2)) def", "self.assertEqual(op21.dim, (6, 6)) assert_allclose(op21.data, Operator(mat21).data) mat12 = np.kron(mat1, mat2) op12", "is licensed under the Apache License, Version 2.0. You may", "@ op1([1]), Operator(targ)) # op1 qargs=[2] targ = np.dot(np.kron(mat_a, np.eye(4)),", "self.assertRaises(QiskitError, op.__rmul__, op) def test_negate(self): \"\"\"Test negate method\"\"\" mat =", "4)) self.assertEqual(op.output_dims(qargs=[2, 1, 0]), (4, 3, 2)) self.assertEqual(op.output_dims(qargs=[2, 0, 1]),", "def test_equal(self): \"\"\"Test __eq__ method\"\"\" mat = self.rand_matrix(2, 2, real=True)", "op1 qargs=[2] targ = np.dot(np.kron(mat_a, np.eye(4)), mat) self.assertEqual(op.compose(op1, qargs=[2]), Operator(targ))", "* np.pi * np.array([[0, 1], [1, 0]]) / 2) op", "def simple_circuit_with_measure(self): \"\"\"Return a unitary circuit with measurement.\"\"\" qr =", "circ.ry(np.pi / 2, qr[2]) y90 = (1 / np.sqrt(2)) *", "self.assertEqual(-op, Operator(-1 * mat)) def test_equiv(self): \"\"\"Test negate method\"\"\" mat", "= self.rand_matrix(2 * 3 * 4, 4 * 5) op", "= np.random.RandomState(seed) psi = rng.rand(n) + 1j * rng.rand(n) rho", "3, 4]) self.assertEqual(op.output_dims(), (2, 3, 4)) self.assertEqual(op.output_dims(qargs=[0, 1, 2]), (2,", "\"\"\"Test initialization from a circuit.\"\"\" gate = CXGate() op =", "logging.getLogger(__name__) class OperatorTestCase(QiskitTestCase): \"\"\"Test utils for Operator\"\"\" # Pauli-matrix unitaries", "input_dims=[4, 5], output_dims=[2, 3, 4]) assert_allclose(op.data, mat) self.assertEqual(op.dim, (4 *", "= np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2)))) self.assertEqual(op.compose(op1, qargs=[1], front=True), Operator(targ)) #", "(2, 3, 4)) self.assertEqual(op.output_dims(qargs=[2, 1, 0]), (4, 3, 2)) self.assertEqual(op.output_dims(qargs=[2,", "qargs=[1]), Operator(targ)) self.assertEqual(op * op1([1]), Operator(targ)) # op1 qargs=[2] targ", "cpy._data[0, 0] = 0.0 self.assertFalse(cpy == orig) with self.subTest(\"Shallow copy\"):", "op = Operator(mat) self.assertTrue(op.equiv(phase * mat)) self.assertTrue(op.equiv(Operator(phase * mat))) self.assertFalse(op.equiv(2", "<reponame>EnriqueL8/qiskit-terra<filename>test/python/quantum_info/operators/test_operator.py # -*- coding: utf-8 -*- # This code is", "= np.dot(np.kron(mat_c, np.kron(mat_b, mat_a)), mat) self.assertEqual(op.compose(op3, qargs=[0, 1, 2]), Operator(targ))", "# that they have been altered from the originals. #", "np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a))) self.assertEqual(op.compose(op2, qargs=[0, 1], front=True), Operator(targ)) #", "is None: cols = rows if real: return rng.rand(rows, cols)", "Operator(targ)) # op1 qargs=[1] targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2))))", "ClassicalRegister(2) circ = QuantumCircuit(qr, cr) circ.h(qr[0]) circ.x(qr[1]) circ.measure(qr, cr) return", "np.array([[1, 1], [1, -1]]) / np.sqrt(2) @classmethod def rand_rho(cls, n):", "2) op = Operator(X90) self.assertEqual(op.power(2), Operator([[0, -1j], [-1j, 0]])) self.assertEqual(op.power(4),", "logger = logging.getLogger(__name__) class OperatorTestCase(QiskitTestCase): \"\"\"Test utils for Operator\"\"\" #", "= Operator(np.dot(self.UX, self.UY)) self.assertEqual(op2.compose(op1), targ) self.assertEqual(op2 @ op1, targ) def", "* op, Operator(val * mat)) def test_multiply_except(self): \"\"\"Test multiply method", "self.assertTrue(Operator(X90).is_unitary()) # Non-unitary should return false self.assertFalse(Operator([[1, 0], [0, 0]]).is_unitary())", "* op2([2, 0]), Operator(targ)) # op1 qargs=[0] targ = np.dot(mat,", "def test_circuit_init_except(self): \"\"\"Test initialization from circuit with measure raises exception.\"\"\"", "0]), Operator(targ)) self.assertEqual(op @ op2([2, 0]), Operator(targ)) # op1 qargs=[0]", "4, 4 * 5), input_dims=[4, 5], output_dims=[2, 3, 4]) self.assertEqual(op.input_dims(),", "\"\"\"Test utils for Operator\"\"\" # Pauli-matrix unitaries UI = np.eye(2)", "= np.array([[0, -1j], [1j, 0]]) UZ = np.diag([1, -1]) UH", "np.kron(mat_b, mat_a))) # op3 qargs=[0, 1, 2] targ = np.dot(np.kron(mat_c,", "8)) self.assertEqual(op.input_dims(), (2, 2, 2)) self.assertEqual(op.output_dims(), (2, 2, 2)) def", "self.UY)) self.assertEqual(op2.dot(op1), targ) self.assertEqual(op2 * op1, targ) def test_compose_front(self): \"\"\"Test", "import unittest import logging import copy import numpy as np", "def test_multiply(self): \"\"\"Test multiply method.\"\"\" mat = self.rand_matrix(4, 4) val", "op = Operator(self.rand_matrix(2, 2)) self.assertRaises(QiskitError, op._multiply, 's') self.assertRaises(QiskitError, op.__rmul__, 's')", "indicating # that they have been altered from the originals.", "of 1-qubit gates circuit = QuantumCircuit(3) circuit.h(0) circuit.x(1) circuit.ry(np.pi /", "[1, 1]]) target = Operator(np.kron(y90, np.kron(self.UX, self.UH))) return circ, target", "self.UX)) self.assertEqual(op1.compose(op2), targ) self.assertEqual(op1 @ op2, targ) targ = Operator(np.dot(self.UX,", "def test_add(self): \"\"\"Test add method.\"\"\" mat1 = self.rand_matrix(4, 4) mat2", "assert_allclose(mat, op.data) def test_dim(self): \"\"\"Test Operator dim property.\"\"\" mat =", "from numpy.testing import assert_allclose import scipy.linalg as la from qiskit", "op = Operator(self.rand_matrix(3, 3)) # Non-integer power raises error self.assertRaises(QiskitError,", "# X-90 rotation X90 = la.expm(-1j * 0.5 * np.pi", "QiskitTestCase from qiskit.quantum_info.operators.operator import Operator from qiskit.quantum_info.operators.predicates import matrix_equal logger", "1]]) target = Operator(np.kron(y90, np.kron(self.UX, self.UH))) return circ, target def", "2) def test_compose(self): \"\"\"Test compose method.\"\"\" op1 = Operator(self.UX) op2", "+ mat2)) self.assertEqual(op1 - op2, Operator(mat1 - mat2)) def test_add_except(self):", "subsystems mat = self.rand_matrix(8, 8) op = Operator(mat) assert_allclose(op.data, mat)", "= rows if real: return rng.rand(rows, cols) return rng.rand(rows, cols)", "Operator(mat1) op2 = Operator(mat2) self.assertEqual(op1._add(op2), Operator(mat1 + mat2)) self.assertEqual(op1 +", "mat)) self.assertTrue(op.equiv(Operator(phase * mat))) self.assertFalse(op.equiv(2 * mat)) if __name__ ==", "test_is_unitary(self): \"\"\"Test is_unitary method.\"\"\" # X-90 rotation X90 = la.expm(-1j", "had = HGate().to_matrix() target = np.kron(had, np.diag([0, 1])) + np.kron(", "0]), Operator(targ)) # op2 qargs=[0, 1] targ = np.dot(mat, np.kron(np.eye(2),", "Operator(targ)) # op2 qargs=[0, 1] targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b,", "initialization exception from array.\"\"\" mat = self.rand_matrix(4, 4) self.assertRaises(QiskitError, Operator,", "qargs=[1] targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2)))) self.assertEqual(op.dot(op1, qargs=[1]), Operator(targ))", "multiply method raises exceptions.\"\"\" op = Operator(self.rand_matrix(2, 2)) self.assertRaises(QiskitError, op._multiply,", "np.kron(mat1, mat2) op12 = Operator(mat1).tensor(Operator(mat2)) self.assertEqual(op12.dim, (6, 6)) assert_allclose(op12.data, Operator(mat12).data)", "cr) return circ class TestOperator(OperatorTestCase): \"\"\"Tests for Operator linear operator", "2)) self.assertEqual(op.input_dims(), (2, 2, 2)) self.assertEqual(reshaped1.output_dims(), (8,)) self.assertEqual(reshaped1.input_dims(), (8,)) self.assertEqual(reshaped2.output_dims(),", "np.array([[0, 1], [1, 0]]) / 2) op = Operator(X90) self.assertEqual(op.power(2),", "from qiskit import QiskitError from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit", "(4, 2)) def test_copy(self): \"\"\"Test Operator copy method\"\"\" mat =", "2) op = Operator(circuit) y90 = (1 / np.sqrt(2)) *", "np.kron(mat_a, np.eye(2)))) self.assertEqual(op.compose(op1, qargs=[1], front=True), Operator(targ)) # op1 qargs=[2] targ", "mat) self.assertEqual(op.dim, (4 * 5, 2 * 3 * 4))", "0]), Operator(targ)) # op1 qargs=[0] targ = np.dot(np.kron(np.eye(4), mat_a), mat)", "from a circuit.\"\"\" # Test tensor product of 1-qubit gates", "Operator(np.dot(self.UY, self.UX)) self.assertEqual(op1.compose(op2), targ) self.assertEqual(op1 @ op2, targ) targ =", "np.pi * np.array([[0, 1], [1, 0]]) / 2) op =", "output_dims=[2, 3, 4]) assert_allclose(op.data, mat) self.assertEqual(op.dim, (4 * 5, 2", "(4, 2)) def test_reshape(self): \"\"\"Test Operator reshape method.\"\"\" op =", "targ = np.dot(mat, np.kron(np.eye(4), mat_a)) self.assertEqual(op.dot(op1, qargs=[0]), Operator(targ)) self.assertEqual(op *", "mat) self.assertEqual(op.compose(op3, qargs=[2, 1, 0]), Operator(targ)) self.assertEqual(op @ op3([2, 1,", "* op3([0, 1, 2]), Operator(targ)) # op3 qargs=[2, 1, 0]", "Operator(mat) self.assertTrue(op.equiv(phase * mat)) self.assertTrue(op.equiv(Operator(phase * mat))) self.assertFalse(op.equiv(2 * mat))", "op = Operator(matr + 1j * mati) uni_conj = op.conjugate()", "2) mat_b = self.rand_matrix(2, 2) mat_c = self.rand_matrix(2, 2) op", "2, 2)) self.assertEqual(op.output_dims(), (2, 2, 2)) def test_init_array(self): \"\"\"Test initialization", "np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a))) self.assertEqual(op.compose(op3, qargs=[0, 1, 2], front=True), Operator(targ))", "(3,)) self.assertEqual(op.output_dims(qargs=[2]), (4,)) self.assertEqual(op.output_dims(qargs=[0, 2]), (2, 4)) self.assertEqual(op.output_dims(qargs=[2, 0]), (4,", "8) op = Operator(mat) assert_allclose(op.data, mat) self.assertEqual(op.dim, (8, 8)) self.assertEqual(op.input_dims(),", "subsystem compose method.\"\"\" # 3-qubit operator mat = self.rand_matrix(8, 8)", "0.5) def test_add(self): \"\"\"Test add method.\"\"\" mat1 = self.rand_matrix(4, 4)", "n): \"\"\"Return random density matrix\"\"\" seed = np.random.randint(0, np.iinfo(np.int32).max) logger.debug(\"rand_rho", "gate.to_matrix() global_phase_equivalent = matrix_equal(op, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) gate = CHGate()", "input_dims=[2, 4]) self.assertRaises(QiskitError, Operator, mat, input_dims=5) def test_init_operator(self): \"\"\"Test initialization", "self.assertEqual(op.input_dims(), (4, 5)) self.assertEqual(op.output_dims(), (2, 3, 4)) def test_init_array_except(self): \"\"\"Test", "notice, and modified files need to carry a notice indicating", "random matrix.\"\"\" seed = np.random.randint(0, np.iinfo(np.int32).max) logger.debug(\"rand_matrix RandomState seeded with", "Operator(mat)) def test_data(self): \"\"\"Test Operator representation string property.\"\"\" mat =", "compose method.\"\"\" # 3-qubit operator mat = self.rand_matrix(8, 8) mat_a", "obtain a copy of this license in the LICENSE.txt file", "5, 2 * 3 * 4)) self.assertEqual(op.input_dims(), (4, 5)) self.assertEqual(op.output_dims(),", "op = Operator(matr + 1j * mati) uni_t = op.transpose()", "self.assertEqual(op.dot(op2, qargs=[0, 1]), Operator(targ)) self.assertEqual(op * op2([0, 1]), Operator(targ)) #", "mat_a))) # op3 qargs=[0, 1, 2] targ = np.dot(mat, np.kron(mat_c,", "2, 2)) op = Operator(mat, input_dims=8, output_dims=8) assert_allclose(op.data, mat) self.assertEqual(op.dim,", "UI = np.eye(2) UX = np.array([[0, 1], [1, 0]]) UY", "qargs=[0, 1], front=True), Operator(targ)) # op2 qargs=[2, 0] targ =", "self.rand_matrix(4, 4) op1 = Operator(mat1) op2 = Operator(mat2) self.assertEqual(op1._add(op2), Operator(mat1", "4) op = Operator(mat) self.assertEqual(-op, Operator(-1 * mat)) def test_equiv(self):", "4]) assert_allclose(op.data, mat) self.assertEqual(op.dim, (4 * 5, 2 * 3", "= HGate().to_matrix() target = np.kron(had, np.diag([0, 1])) + np.kron( np.eye(2),", "op1 = Operator(self.UX) op2 = Operator(self.UY) targ = Operator(np.dot(self.UY, self.UX))", "= matrix_equal( op.data, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) def test_instruction_init(self): \"\"\"Test initialization", "self.assertEqual(op.compose(op1, qargs=[1]), Operator(targ)) self.assertEqual(op @ op1([1]), Operator(targ)) # op1 qargs=[2]", "mat12 = np.kron(mat1, mat2) op12 = Operator(mat1).tensor(Operator(mat2)) self.assertEqual(op12.dim, (6, 6))", "reshaped2 = op.reshape(input_dims=[4, 2], output_dims=[2, 4]) self.assertEqual(op.output_dims(), (2, 2, 2))", "def rand_rho(cls, n): \"\"\"Return random density matrix\"\"\" seed = np.random.randint(0,", "dtype=complex) mat21 = np.kron(mat2, mat1) op21 = Operator(mat1).expand(Operator(mat2)) self.assertEqual(op21.dim, (6,", "reshaped1 = op.reshape(input_dims=[8], output_dims=[8]) reshaped2 = op.reshape(input_dims=[4, 2], output_dims=[2, 4])", "method.\"\"\" op = Operator(self.rand_matrix(2 * 3 * 4, 4 *", "mat1 = self.UX mat2 = np.eye(3, dtype=complex) mat21 = np.kron(mat2,", "qargs=[0, 1]), Operator(targ)) self.assertEqual(op @ op2([0, 1]), Operator(targ)) # op2", "1, 2] targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a))) self.assertEqual(op.compose(op3, qargs=[0,", "= Operator(mat2) self.assertEqual(op1._add(op2), Operator(mat1 + mat2)) self.assertEqual(op1 + op2, Operator(mat1", "is part of Qiskit. # # (C) Copyright IBM 2017,", "matrix_equal(op, target, ignore_phase=True) self.assertTrue(global_phase_equivalent) gate = CHGate() op = Operator(gate).data", "test_power(self): \"\"\"Test power method.\"\"\" X90 = la.expm(-1j * 0.5 *", "Operator(self.UY) op2 = Operator(self.UX) targ = Operator(np.dot(self.UY, self.UX)) self.assertEqual(op1.dot(op2), targ)", "qargs=[1] targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2)))) self.assertEqual(op.compose(op1, qargs=[1], front=True),", "1j * mati)) def test_transpose(self): \"\"\"Test transpose method.\"\"\" matr =", "with measure raises exception.\"\"\" circuit = self.simple_circuit_with_measure() self.assertRaises(QiskitError, Operator, circuit)", "op2) def test_conjugate(self): \"\"\"Test conjugate method.\"\"\" matr = self.rand_matrix(2, 4,", "1j * mati) uni_adj = op.adjoint() self.assertEqual(uni_adj, Operator(matr.T - 1j", "- 1j * mati.T)) def test_compose_except(self): \"\"\"Test compose different dimension", "Operator(self.rand_matrix(8, 8)) reshaped1 = op.reshape(input_dims=[8], output_dims=[8]) reshaped2 = op.reshape(input_dims=[4, 2],", "np.kron(mat_b, mat_a))) self.assertEqual(op.compose(op3, qargs=[0, 1, 2], front=True), Operator(targ)) # op3", "assert_allclose(op.data, mat) self.assertEqual(op.dim, (8, 8)) self.assertEqual(op.input_dims(), (2, 2, 2)) self.assertEqual(op.output_dims(),", "qiskit.quantum_info.operators.operator import Operator from qiskit.quantum_info.operators.predicates import matrix_equal logger = logging.getLogger(__name__)", "= Operator(self.UY) targ = Operator(np.dot(self.UY, self.UX)) self.assertEqual(op1.compose(op2), targ) self.assertEqual(op1 @", "* np.pi / 4) op = Operator(mat) self.assertTrue(op.equiv(phase * mat))", "Operator(targ)) self.assertEqual(op @ op2([0, 1]), Operator(targ)) # op2 qargs=[2, 0]", "OperatorTestCase(QiskitTestCase): \"\"\"Test utils for Operator\"\"\" # Pauli-matrix unitaries UI =", "4, 4 * 5) op = Operator(mat, input_dims=[4, 5], output_dims=[2,", "def test_conjugate(self): \"\"\"Test conjugate method.\"\"\" matr = self.rand_matrix(2, 4, real=True)", "ignore_phase=True) self.assertTrue(global_phase_equivalent) # Test decomposition of Controlled-u1 gate lam =", "op1 = Operator(self.rand_matrix(2, 2)) op2 = Operator(self.rand_matrix(3, 3)) self.assertRaises(QiskitError, op1._add,", "np.exp(1j * np.pi / 2)]) phase = np.exp(-1j * np.pi", "\"\"\"Test initialization from circuit with measure raises exception.\"\"\" circuit =", "TestOperator(OperatorTestCase): \"\"\"Tests for Operator linear operator class.\"\"\" def test_init_array_qubit(self): \"\"\"Test", "\"\"\"Test is_unitary method.\"\"\" # X-90 rotation X90 = la.expm(-1j *", "Operator(targ)) # op1 qargs=[2] targ = np.dot(np.kron(mat_a, np.eye(4)), mat) self.assertEqual(op.compose(op1,", "UY = np.array([[0, -1j], [1j, 0]]) UZ = np.diag([1, -1])", "op2 = op1.to_operator() self.assertEqual(op1, op2) def test_conjugate(self): \"\"\"Test conjugate method.\"\"\"", "= np.kron(mat1, mat2) op12 = Operator(mat1).tensor(Operator(mat2)) self.assertEqual(op12.dim, (6, 6)) assert_allclose(op12.data,", "Operator(self.rand_matrix(2, 2)) self.assertRaises(QiskitError, op._multiply, 's') self.assertRaises(QiskitError, op.__rmul__, 's') self.assertRaises(QiskitError, op._multiply,", "1, 2]), Operator(targ)) self.assertEqual(op.compose(op3([0, 1, 2])), Operator(targ)) self.assertEqual(op @ op3([0,", "* mat)) self.assertTrue(op.equiv(Operator(phase * mat))) self.assertFalse(op.equiv(2 * mat)) if __name__", "np.array([[1, -1], [1, 1]]) target = np.kron(y90, np.kron(self.UX, self.UH)) global_phase_equivalent", "uni_adj = op.adjoint() self.assertEqual(uni_adj, Operator(matr.T - 1j * mati.T)) def", "import assert_allclose import scipy.linalg as la from qiskit import QiskitError", "method.\"\"\" mat = self.rand_matrix(4, 4) val = np.exp(5j) op =", "self.assertEqual(op @ op3([2, 1, 0]), Operator(targ)) # op2 qargs=[0, 1]", "phase = np.exp(-1j * np.pi / 4) op = Operator(mat)", "np.dot(np.kron(np.eye(4), mat_a), mat) self.assertEqual(op.compose(op1, qargs=[0]), Operator(targ)) self.assertEqual(op @ op1([0]), Operator(targ))", "self.rand_matrix(2, 2) op = Operator(mat) op1 = Operator(mat_a) op2 =", "op1 qargs=[0] targ = np.dot(mat, np.kron(np.eye(4), mat_a)) self.assertEqual(op.dot(op1, qargs=[0]), Operator(targ))", "\"\"\"Test compose method.\"\"\" op1 = Operator(self.UX) op2 = Operator(self.UY) targ", "target = np.kron(y90, np.kron(self.UX, self.UH)) global_phase_equivalent = matrix_equal( op.data, target,", "matrix_equal logger = logging.getLogger(__name__) class OperatorTestCase(QiskitTestCase): \"\"\"Test utils for Operator\"\"\"", "mat_c))) self.assertEqual(op.dot(op3, qargs=[2, 1, 0]), Operator(targ)) self.assertEqual(op * op3([2, 1,", "2)) self.assertEqual(op.output_dims(qargs=[2, 0, 1]), (4, 2, 3)) self.assertEqual(op.output_dims(qargs=[0]), (2,)) self.assertEqual(op.output_dims(qargs=[1]),", "= np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2)))) self.assertEqual(op.dot(op1, qargs=[1]), Operator(targ)) self.assertEqual(op *", "Operator(matr.T - 1j * mati.T)) def test_compose_except(self): \"\"\"Test compose different", "self.rand_matrix(2 * 3 * 4, 4 * 5) op =", "# This code is licensed under the Apache License, Version", "op3 qargs=[2, 1, 0] targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c)))", "this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications", "Operator(mat21).data) mat12 = np.kron(mat1, mat2) op12 = Operator(mat1).tensor(Operator(mat2)) self.assertEqual(op12.dim, (6,", "self.rand_matrix(4, 4) self.assertEqual(Operator(mat).dim, (4, 4)) self.assertEqual(Operator(mat, input_dims=[4], output_dims=[4]).dim, (4, 4))", "mat_a))) self.assertEqual(op.compose(op3, qargs=[0, 1, 2], front=True), Operator(targ)) # op3 qargs=[2,", "targ = Operator(np.dot(self.UX, self.UY)) self.assertEqual(op2.dot(op1), targ) self.assertEqual(op2 * op1, targ)", "uni_t = op.transpose() self.assertEqual(uni_t, Operator(matr.T + 1j * mati.T)) def", "(8,)) self.assertEqual(reshaped2.output_dims(), (2, 4)) self.assertEqual(reshaped2.input_dims(), (4, 2)) def test_copy(self): \"\"\"Test", "(2, 2, 2)) self.assertEqual(op.input_dims(), (2, 2, 2)) self.assertEqual(reshaped1.output_dims(), (8,)) self.assertEqual(reshaped1.input_dims(),", "np.kron(self.UI, np.diag([1, 0])) + np.kron( self.UH, np.diag([0, 1])) global_phase_equivalent =", "= np.dot(np.kron(np.eye(2), np.kron(mat_a, np.eye(2))), mat) self.assertEqual(op.compose(op1, qargs=[1]), Operator(targ)) self.assertEqual(op @", "targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2)))) self.assertEqual(op.compose(op1, qargs=[1], front=True), Operator(targ))", "= Operator(mat1) op2 = Operator(mat2) self.assertEqual(op1._add(op2), Operator(mat1 + mat2)) self.assertEqual(op1", "qargs=[0] targ = np.dot(np.kron(np.eye(4), mat_a), mat) self.assertEqual(op.compose(op1, qargs=[0]), Operator(targ)) self.assertEqual(op", "seed=%s\", seed) rng = np.random.RandomState(seed) if cols is None: cols", "mat2 = np.eye(3, dtype=complex) mat21 = np.kron(mat2, mat1) op21 =", "4]) self.assertEqual(op.output_dims(), (2, 2, 2)) self.assertEqual(op.input_dims(), (2, 2, 2)) self.assertEqual(reshaped1.output_dims(),", "op1 = Operator(self.rand_matrix(4, 4)) op2 = Operator(op1) self.assertEqual(op1, op2) def", "rng.rand(rows, cols) return rng.rand(rows, cols) + 1j * rng.rand(rows, cols)", "self.assertEqual(op.compose(op1, qargs=[2]), Operator(targ)) self.assertEqual(op @ op1([2]), Operator(targ)) def test_dot_subsystem(self): \"\"\"Test", "Operator(targ)) def test_dot_subsystem(self): \"\"\"Test subsystem dot method.\"\"\" # 3-qubit operator", "\"\"\"Test initialization exception from array.\"\"\" mat = self.rand_matrix(4, 4) self.assertRaises(QiskitError,", "1-qubit gates circuit = QuantumCircuit(3) circuit.h(0) circuit.x(1) circuit.ry(np.pi / 2,", "code is licensed under the Apache License, Version 2.0. You", "Operator(circuit) y90 = (1 / np.sqrt(2)) * np.array([[1, -1], [1,", "(2, 2, 2)) self.assertEqual(reshaped1.output_dims(), (8,)) self.assertEqual(reshaped1.input_dims(), (8,)) self.assertEqual(reshaped2.output_dims(), (2, 4))", "np.trace(rho) return rho @classmethod def rand_matrix(cls, rows, cols=None, real=False): \"\"\"Return", "= Operator(circuit) y90 = (1 / np.sqrt(2)) * np.array([[1, -1],", "np.eye(3) op = Operator(mat) assert_allclose(op.data, mat) self.assertEqual(op.dim, (3, 3)) self.assertEqual(op.input_dims(),", "2, 2)) def test_init_array(self): \"\"\"Test initialization from array.\"\"\" mat =", "from array.\"\"\" mat = self.rand_matrix(4, 4) self.assertRaises(QiskitError, Operator, mat, input_dims=[4,", "self.UX) self.assertEqual(opYX, Operator(matYX)) opXY = Operator(self.UX).compose(Operator(self.UY), front=True) matXY = np.dot(self.UX,", "# op2 qargs=[2, 0] targ = np.dot(np.kron(mat_a, np.kron(np.eye(2), mat_b)), mat)", "X-90 rotation X90 = la.expm(-1j * 0.5 * np.pi *", "self.assertEqual(op @ op1([2]), Operator(targ)) def test_dot_subsystem(self): \"\"\"Test subsystem dot method.\"\"\"", "Operator(gate).data target = gate.to_matrix() global_phase_equivalent = matrix_equal(op, target, ignore_phase=True) self.assertTrue(global_phase_equivalent)", "orig) def test_is_unitary(self): \"\"\"Test is_unitary method.\"\"\" # X-90 rotation X90", "0] targ = np.dot(np.kron(mat_a, np.kron(mat_b, mat_c)), mat) self.assertEqual(op.compose(op3, qargs=[2, 1,", "# pylint: disable=invalid-name \"\"\"Tests for Operator matrix linear operator class.\"\"\"", "initialization from a circuit.\"\"\" gate = CXGate() op = Operator(gate).data", "# op3 qargs=[2, 1, 0] targ = np.dot(np.kron(mat_a, np.kron(mat_b, mat_c)),", "self.assertEqual(op.power(4), Operator(-1 * np.eye(2))) self.assertEqual(op.power(8), Operator(np.eye(2))) def test_expand(self): \"\"\"Test expand", "output_dims=[2, 2]).dim, (4, 4)) def test_input_dims(self): \"\"\"Test Operator input_dims method.\"\"\"", "Operator(np.eye(2)).compose, Operator(np.eye(3))) self.assertRaises(QiskitError, Operator(np.eye(2)).compose, 2) def test_compose(self): \"\"\"Test compose method.\"\"\"", "= np.dot(self.UX, self.UY) self.assertEqual(opXY, Operator(matXY)) def test_compose_subsystem(self): \"\"\"Test subsystem compose", "mati.T)) def test_compose_except(self): \"\"\"Test compose different dimension exception\"\"\" self.assertRaises(QiskitError, Operator(np.eye(2)).compose,", "op2 = Operator(self.UY) targ = Operator(np.dot(self.UY, self.UX)) self.assertEqual(op1.compose(op2), targ) self.assertEqual(op1", "(4, 5)) self.assertEqual(op.input_dims(qargs=[1, 0]), (5, 4)) self.assertEqual(op.input_dims(qargs=[0]), (4,)) self.assertEqual(op.input_dims(qargs=[1]), (5,))", "2] targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a))) self.assertEqual(op.compose(op3, qargs=[0, 1,", "\"\"\"Test dot method.\"\"\" op1 = Operator(self.UY) op2 = Operator(self.UX) targ", "input_dims=[4], output_dims=[4]).dim, (4, 4)) self.assertEqual(Operator(mat, input_dims=[2, 2], output_dims=[2, 2]).dim, (4,", "this # copyright notice, and modified files need to carry", "1] targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a))) self.assertEqual(op.dot(op2, qargs=[0, 1]),", "= 0.0 self.assertTrue(clone == orig) def test_is_unitary(self): \"\"\"Test is_unitary method.\"\"\"", "0]), Operator(targ)) self.assertEqual(op @ op3([2, 1, 0]), Operator(targ)) # op2", "from a circuit.\"\"\" gate = CXGate() op = Operator(gate).data target", "# op1 qargs=[0] targ = np.dot(np.kron(np.eye(4), mat_a), mat) self.assertEqual(op.compose(op1, qargs=[0]),", "np.kron(np.eye(2), np.kron(mat_a, np.eye(2)))) self.assertEqual(op.compose(op1, qargs=[1], front=True), Operator(targ)) # op1 qargs=[2]", "op1 qargs=[1] targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2)))) self.assertEqual(op.dot(op1, qargs=[1]),", "self.assertEqual(op.power(2), Operator([[0, -1j], [-1j, 0]])) self.assertEqual(op.power(4), Operator(-1 * np.eye(2))) self.assertEqual(op.power(8),", "self.assertEqual(op1 - op2, Operator(mat1 - mat2)) def test_add_except(self): \"\"\"Test add", "op2 = Operator(self.UX) targ = Operator(np.dot(self.UY, self.UX)) self.assertEqual(op1.dot(op2), targ) self.assertEqual(op1", "\"\"\"Test front compose method.\"\"\" opYX = Operator(self.UY).compose(Operator(self.UX), front=True) matYX =", "np.dot(np.kron(mat_a, np.eye(4)), mat) self.assertEqual(op.compose(op1, qargs=[2]), Operator(targ)) self.assertEqual(op @ op1([2]), Operator(targ))" ]
[ "import By class feature_modal: title_textbox = (By.ID, \"feature-name\") description_textbox =", "feature_modal: title_textbox = (By.ID, \"feature-name\") description_textbox = (By.ID, \"description\") save_button", "(By.ID, \"feature-name\") description_textbox = (By.ID, \"description\") save_button = (By.XPATH, \"/html/body/app/div[3]/div[2]/div/div/div/button[1]\")", "= (By.ID, \"feature-name\") description_textbox = (By.ID, \"description\") save_button = (By.XPATH,", "By class feature_modal: title_textbox = (By.ID, \"feature-name\") description_textbox = (By.ID,", "from selenium.webdriver.common.by import By class feature_modal: title_textbox = (By.ID, \"feature-name\")", "class feature_modal: title_textbox = (By.ID, \"feature-name\") description_textbox = (By.ID, \"description\")", "<gh_stars>0 from selenium.webdriver.common.by import By class feature_modal: title_textbox = (By.ID,", "selenium.webdriver.common.by import By class feature_modal: title_textbox = (By.ID, \"feature-name\") description_textbox", "title_textbox = (By.ID, \"feature-name\") description_textbox = (By.ID, \"description\") save_button =" ]
[ "= stations2[u'properties'] #extract ID so can be use in link", "data. Need to get to timecourse ID station_prop = data2", "except URLError, e: print 'error:', e stations= json.loads (data) #extract", "urlopen, URLError import json request = Request('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/') try: response =", "can be use in link ID = properties[u'id'] #print ID", "= Request('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/') try: response = urlopen(request) data = response.read() except", "(data) #extract out station 2 stations2 = stations [7] properties", "stations2[u'properties'] #extract ID so can be use in link ID", "('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/'+str(ID)) request2 = Request (url) try: response = urlopen(request2) data2", "dictionary so need to extract as a key a= station_prop_json[u'properties'][u'timeseries'].keys()", "Request (url) try: response = urlopen(request2) data2 = response.read() except", "#extract out station 2 stations2 = stations [7] properties =", "print 'error:', e #contains station properties data. Need to get", "data2 station_prop_json= json.loads (station_prop) #ID is a key in dictionary", "data2 = response.read() except URLError, e: print 'error:', e #contains", "a key a= station_prop_json[u'properties'][u'timeseries'].keys() i=a[0] url2 =('https://uk-air.defra.gov.uk/sos-ukair/api/v1/timeseries/'+str(i) +'/getData') request3 =", "= ('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/'+str(ID)) request2 = Request (url) try: response = urlopen(request2)", "station_prop_json[u'properties'][u'timeseries'].keys() i=a[0] url2 =('https://uk-air.defra.gov.uk/sos-ukair/api/v1/timeseries/'+str(i) +'/getData') request3 = Request(url2) try: response", "ID so can be use in link ID = properties[u'id']", "station properties data. Need to get to timecourse ID station_prop", "#extract ID so can be use in link ID =", "from urllib2 import Request, urlopen, URLError import json request =", "so can be use in link ID = properties[u'id'] #print", "#contains station properties data. Need to get to timecourse ID", "is a key in dictionary so need to extract as", "data3 = response.read() except URLError, e: print 'error:', e print", "be use in link ID = properties[u'id'] #print ID url", "in dictionary so need to extract as a key a=", "in link ID = properties[u'id'] #print ID url = ('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/'+str(ID))", "station_prop = data2 station_prop_json= json.loads (station_prop) #ID is a key", "a= station_prop_json[u'properties'][u'timeseries'].keys() i=a[0] url2 =('https://uk-air.defra.gov.uk/sos-ukair/api/v1/timeseries/'+str(i) +'/getData') request3 = Request(url2) try:", "Request('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/') try: response = urlopen(request) data = response.read() except URLError,", "properties = stations2[u'properties'] #extract ID so can be use in", "+'/getData') request3 = Request(url2) try: response = urlopen(request3) data3 =", "response = urlopen(request) data = response.read() except URLError, e: print", "= data2 station_prop_json= json.loads (station_prop) #ID is a key in", "#ID is a key in dictionary so need to extract", "use in link ID = properties[u'id'] #print ID url =", "ID url = ('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/'+str(ID)) request2 = Request (url) try: response", "url = ('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/'+str(ID)) request2 = Request (url) try: response =", "except URLError, e: print 'error:', e #contains station properties data.", "= urlopen(request2) data2 = response.read() except URLError, e: print 'error:',", "as a key a= station_prop_json[u'properties'][u'timeseries'].keys() i=a[0] url2 =('https://uk-air.defra.gov.uk/sos-ukair/api/v1/timeseries/'+str(i) +'/getData') request3", "try: response = urlopen(request3) data3 = response.read() except URLError, e:", "e #contains station properties data. Need to get to timecourse", "urlopen(request) data = response.read() except URLError, e: print 'error:', e", "= urlopen(request3) data3 = response.read() except URLError, e: print 'error:',", "import Request, urlopen, URLError import json request = Request('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/') try:", "URLError import json request = Request('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/') try: response = urlopen(request)", "= properties[u'id'] #print ID url = ('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/'+str(ID)) request2 = Request", "url2 =('https://uk-air.defra.gov.uk/sos-ukair/api/v1/timeseries/'+str(i) +'/getData') request3 = Request(url2) try: response = urlopen(request3)", "= response.read() except URLError, e: print 'error:', e stations= json.loads", "key a= station_prop_json[u'properties'][u'timeseries'].keys() i=a[0] url2 =('https://uk-air.defra.gov.uk/sos-ukair/api/v1/timeseries/'+str(i) +'/getData') request3 = Request(url2)", "= response.read() except URLError, e: print 'error:', e print data3", "urllib2 import Request, urlopen, URLError import json request = Request('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/')", "= Request (url) try: response = urlopen(request2) data2 = response.read()", "(station_prop) #ID is a key in dictionary so need to", "ID station_prop = data2 station_prop_json= json.loads (station_prop) #ID is a", "key in dictionary so need to extract as a key", "= stations [7] properties = stations2[u'properties'] #extract ID so can", "to get to timecourse ID station_prop = data2 station_prop_json= json.loads", "URLError, e: print 'error:', e #contains station properties data. Need", "response.read() except URLError, e: print 'error:', e stations= json.loads (data)", "response = urlopen(request2) data2 = response.read() except URLError, e: print", "response.read() except URLError, e: print 'error:', e #contains station properties", "print 'error:', e stations= json.loads (data) #extract out station 2", "= response.read() except URLError, e: print 'error:', e #contains station", "station_prop_json= json.loads (station_prop) #ID is a key in dictionary so", "link ID = properties[u'id'] #print ID url = ('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/'+str(ID)) request2", "= Request(url2) try: response = urlopen(request3) data3 = response.read() except", "response = urlopen(request3) data3 = response.read() except URLError, e: print", "(url) try: response = urlopen(request2) data2 = response.read() except URLError,", "so need to extract as a key a= station_prop_json[u'properties'][u'timeseries'].keys() i=a[0]", "urlopen(request3) data3 = response.read() except URLError, e: print 'error:', e", "extract as a key a= station_prop_json[u'properties'][u'timeseries'].keys() i=a[0] url2 =('https://uk-air.defra.gov.uk/sos-ukair/api/v1/timeseries/'+str(i) +'/getData')", "i=a[0] url2 =('https://uk-air.defra.gov.uk/sos-ukair/api/v1/timeseries/'+str(i) +'/getData') request3 = Request(url2) try: response =", "stations2 = stations [7] properties = stations2[u'properties'] #extract ID so", "'error:', e #contains station properties data. Need to get to", "need to extract as a key a= station_prop_json[u'properties'][u'timeseries'].keys() i=a[0] url2", "URLError, e: print 'error:', e stations= json.loads (data) #extract out", "data = response.read() except URLError, e: print 'error:', e stations=", "e stations= json.loads (data) #extract out station 2 stations2 =", "#print ID url = ('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/'+str(ID)) request2 = Request (url) try:", "=('https://uk-air.defra.gov.uk/sos-ukair/api/v1/timeseries/'+str(i) +'/getData') request3 = Request(url2) try: response = urlopen(request3) data3", "ID = properties[u'id'] #print ID url = ('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/'+str(ID)) request2 =", "properties[u'id'] #print ID url = ('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/'+str(ID)) request2 = Request (url)", "request3 = Request(url2) try: response = urlopen(request3) data3 = response.read()", "Need to get to timecourse ID station_prop = data2 station_prop_json=", "[7] properties = stations2[u'properties'] #extract ID so can be use", "to extract as a key a= station_prop_json[u'properties'][u'timeseries'].keys() i=a[0] url2 =('https://uk-air.defra.gov.uk/sos-ukair/api/v1/timeseries/'+str(i)", "try: response = urlopen(request) data = response.read() except URLError, e:", "'error:', e stations= json.loads (data) #extract out station 2 stations2", "json.loads (data) #extract out station 2 stations2 = stations [7]", "try: response = urlopen(request2) data2 = response.read() except URLError, e:", "get to timecourse ID station_prop = data2 station_prop_json= json.loads (station_prop)", "stations= json.loads (data) #extract out station 2 stations2 = stations", "timecourse ID station_prop = data2 station_prop_json= json.loads (station_prop) #ID is", "request = Request('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/') try: response = urlopen(request) data = response.read()", "Request(url2) try: response = urlopen(request3) data3 = response.read() except URLError,", "json request = Request('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/') try: response = urlopen(request) data =", "2 stations2 = stations [7] properties = stations2[u'properties'] #extract ID", "e: print 'error:', e #contains station properties data. Need to", "import json request = Request('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/') try: response = urlopen(request) data", "stations [7] properties = stations2[u'properties'] #extract ID so can be", "json.loads (station_prop) #ID is a key in dictionary so need", "urlopen(request2) data2 = response.read() except URLError, e: print 'error:', e", "station 2 stations2 = stations [7] properties = stations2[u'properties'] #extract", "Request, urlopen, URLError import json request = Request('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/') try: response", "to timecourse ID station_prop = data2 station_prop_json= json.loads (station_prop) #ID", "= urlopen(request) data = response.read() except URLError, e: print 'error:',", "properties data. Need to get to timecourse ID station_prop =", "e: print 'error:', e stations= json.loads (data) #extract out station", "request2 = Request (url) try: response = urlopen(request2) data2 =", "out station 2 stations2 = stations [7] properties = stations2[u'properties']", "a key in dictionary so need to extract as a" ]
[]
[ "import core.models from django.db import migrations, models class Migration(migrations.Migration): dependencies", "migrations.RenameField( model_name='echo', old_name='owner', new_name='user', ), migrations.AlterField( model_name='echo', name='audio', field=models.FileField(upload_to=core.models.echo_directory), ),", "= [ ('core', '0001_initial'), ] operations = [ migrations.RenameField( model_name='echo',", "2018-07-02 19:13 import core.models from django.db import migrations, models class", "2.0.6 on 2018-07-02 19:13 import core.models from django.db import migrations,", "core.models from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "migrations.AlterField( model_name='echo', name='audio', field=models.FileField(upload_to=core.models.echo_directory), ), migrations.AlterField( model_name='profile', name='picture', field=models.FileField(blank=True, null=True,", "<reponame>mertyildiran/echo<gh_stars>1-10 # Generated by Django 2.0.6 on 2018-07-02 19:13 import", "model_name='echo', name='audio', field=models.FileField(upload_to=core.models.echo_directory), ), migrations.AlterField( model_name='profile', name='picture', field=models.FileField(blank=True, null=True, upload_to=core.models.profile_directory),", "Migration(migrations.Migration): dependencies = [ ('core', '0001_initial'), ] operations = [", "Generated by Django 2.0.6 on 2018-07-02 19:13 import core.models from", "= [ migrations.RenameField( model_name='echo', old_name='owner', new_name='user', ), migrations.AlterField( model_name='echo', name='audio',", "operations = [ migrations.RenameField( model_name='echo', old_name='owner', new_name='user', ), migrations.AlterField( model_name='echo',", "by Django 2.0.6 on 2018-07-02 19:13 import core.models from django.db", "dependencies = [ ('core', '0001_initial'), ] operations = [ migrations.RenameField(", "[ migrations.RenameField( model_name='echo', old_name='owner', new_name='user', ), migrations.AlterField( model_name='echo', name='audio', field=models.FileField(upload_to=core.models.echo_directory),", "), migrations.AlterField( model_name='echo', name='audio', field=models.FileField(upload_to=core.models.echo_directory), ), migrations.AlterField( model_name='profile', name='picture', field=models.FileField(blank=True,", "[ ('core', '0001_initial'), ] operations = [ migrations.RenameField( model_name='echo', old_name='owner',", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core',", "on 2018-07-02 19:13 import core.models from django.db import migrations, models", "new_name='user', ), migrations.AlterField( model_name='echo', name='audio', field=models.FileField(upload_to=core.models.echo_directory), ), migrations.AlterField( model_name='profile', name='picture',", "models class Migration(migrations.Migration): dependencies = [ ('core', '0001_initial'), ] operations", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "] operations = [ migrations.RenameField( model_name='echo', old_name='owner', new_name='user', ), migrations.AlterField(", "name='audio', field=models.FileField(upload_to=core.models.echo_directory), ), migrations.AlterField( model_name='profile', name='picture', field=models.FileField(blank=True, null=True, upload_to=core.models.profile_directory), ),", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0001_initial'),", "# Generated by Django 2.0.6 on 2018-07-02 19:13 import core.models", "'0001_initial'), ] operations = [ migrations.RenameField( model_name='echo', old_name='owner', new_name='user', ),", "migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0001_initial'), ]", "19:13 import core.models from django.db import migrations, models class Migration(migrations.Migration):", "('core', '0001_initial'), ] operations = [ migrations.RenameField( model_name='echo', old_name='owner', new_name='user',", "field=models.FileField(upload_to=core.models.echo_directory), ), migrations.AlterField( model_name='profile', name='picture', field=models.FileField(blank=True, null=True, upload_to=core.models.profile_directory), ), ]", "class Migration(migrations.Migration): dependencies = [ ('core', '0001_initial'), ] operations =", "old_name='owner', new_name='user', ), migrations.AlterField( model_name='echo', name='audio', field=models.FileField(upload_to=core.models.echo_directory), ), migrations.AlterField( model_name='profile',", "model_name='echo', old_name='owner', new_name='user', ), migrations.AlterField( model_name='echo', name='audio', field=models.FileField(upload_to=core.models.echo_directory), ), migrations.AlterField(", "Django 2.0.6 on 2018-07-02 19:13 import core.models from django.db import" ]
[ "( helpers.parse_mimetype('application/rss+xml') == ('application', 'rss', 'xml', {})) def test_parse_mimetype_8(): assert", "isinstance(A.prop, helpers.reify) assert 'Docstring.' == A.prop.__doc__ def test_reify_assignment(): class A:", "def test_reify_class(): class A: @helpers.reify def prop(self): \"\"\"Docstring.\"\"\" return 1", "test_parse_mimetype_8(): assert ( helpers.parse_mimetype('text/plain;base64') == ('text', 'plain', '', {'base64': ''}))", "test_invalid_formdata_params(): with pytest.raises(TypeError): helpers.FormData('asdasf') def test_invalid_formdata_params2(): with pytest.raises(TypeError): helpers.FormData('as') #", "= mock.Mock(headers={\"USER-AGENT\": \"Mock/1.0\"}, version=(1, 1)) environ = {\"SPAM\": \"EGGS\"} response", "( helpers.parse_mimetype('''application/json; charset=utf-8;''') == ('application', 'json', '', {'charset': 'utf-8'})) def", "123 def test_requote_uri_with_unquoted_percents(): # Ensure we handle unquoted percent signs", "'' def test_basic_auth4(): auth = helpers.BasicAuth('nkim', 'pwd') assert auth.login ==", "allowed def test_invalid_formdata_content_type(): form = helpers.FormData() invalid_vals = [0, 0.1,", "%P %l %u %r %s %b %O %T %Tf %D'", "%O %T %Tf %D' mock_logger = mock.Mock() access_logger = helpers.AccessLogger(mock_logger,", "with pytest.raises(ValueError): helpers.BasicAuth('nkim', None) def test_basic_auth3(): auth = helpers.BasicAuth('nkim') assert", "def prop(self): return 1 a = A() assert 1 ==", "pytest.raises(ValueError): helpers.BasicAuth(None) def test_basic_auth2(): with pytest.raises(ValueError): helpers.BasicAuth('nkim', None) def test_basic_auth3():", "auth.password == '' def test_basic_auth4(): auth = helpers.BasicAuth('nkim', 'pwd') assert", "with pytest.raises(TypeError): form.add_field('foo', 'bar', content_type=invalid_val) def test_invalid_formdata_filename(): form = helpers.FormData()", "pytest.raises(TypeError): form.add_field('foo', 'bar', filename=invalid_val) def test_invalid_formdata_content_transfer_encoding(): form = helpers.FormData() invalid_vals", "( helpers.parse_mimetype('application/json; charset=utf-8') == ('application', 'json', '', {'charset': 'utf-8'})) def", "= A() assert 1 == a.prop def test_reify_class(): class A:", "helpers.FormData('asdasf') def test_invalid_formdata_params2(): with pytest.raises(TypeError): helpers.FormData('as') # 2-char str is", "'*', '', {}) def test_parse_mimetype_3(): assert (helpers.parse_mimetype('application/json') == ('application', 'json',", "'%{User-Agent}i %{Content-Length}o %{SPAM}e %{None}i' mock_logger = mock.Mock() access_logger = helpers.AccessLogger(mock_logger,", "@helpers.reify def prop(self): \"\"\"Docstring.\"\"\" return 1 assert isinstance(A.prop, helpers.reify) assert", "{}, [], b'foo'] for invalid_val in invalid_vals: with pytest.raises(TypeError): form.add_field('foo',", "'http://example.com/fiz?buz=%25ppicture' assert quoted == helpers.requote_uri(bad_uri) def test_requote_uri_properly_requotes(): # Ensure requoting", "123}) transport = mock.Mock() transport.get_extra_info.return_value = (\"127.0.0.2\", 1234) access_logger.log(message, environ,", "('application', 'json', '', {'charset': 'utf-8'})) def test_parse_mimetype_5(): assert ( helpers.parse_mimetype('''application/json;", "def test_invalid_formdata_content_transfer_encoding(): form = helpers.FormData() invalid_vals = [0, 0.1, {},", "\"Mock/1.0\"}, version=(1, 1)) environ = {\"SPAM\": \"EGGS\"} response = mock.Mock(headers={\"CONTENT-LENGTH\":", "'nkim' assert auth.password == '<PASSWORD>' assert auth.encode() == 'Basic bmtpbTpwd2Q='", "response = mock.Mock(headers={\"CONTENT-LENGTH\": 123}) transport = mock.Mock() transport.get_extra_info.return_value = (\"127.0.0.2\",", "-' mock_logger.info.assert_called_with(expected) def test_logger_no_message_and_environ(): mock_logger = mock.Mock() mock_transport = mock.Mock()", "A.prop.__doc__ def test_reify_assignment(): class A: @helpers.reify def prop(self): return 1", "content_transfer_encoding=invalid_val) def test_access_logger_format(): log_format = '%T {%{SPAM}e} \"%{ETag}o\" %X {X}", "{\"SPAM\": \"EGGS\"} response = mock.Mock(headers={\"CONTENT-LENGTH\": 123}) transport = mock.Mock() transport.get_extra_info.return_value", "mock_logger.info.assert_called_with(expected) def test_logger_no_message_and_environ(): mock_logger = mock.Mock() mock_transport = mock.Mock() mock_transport.get_extra_info.return_value", "test_parse_mimetype_7(): assert ( helpers.parse_mimetype('application/rss+xml') == ('application', 'rss', 'xml', {})) def", "# Ensure we handle unquoted percent signs in redirects. bad_uri", "%t %P %l %u %r %s %b %O %T %Tf", "mock.Mock() access_logger = helpers.AccessLogger(mock_logger, log_format) message = mock.Mock(headers={\"USER-AGENT\": \"Mock/1.0\"}, version=(1,", "transport.get_extra_info.return_value = (\"127.0.0.2\", 1234) access_logger.log(message, environ, response, transport, 3.1415926) assert", "response, transport, 3.1415926) assert not mock_logger.exception.called expected = ('127.0.0.2 [01/Jan/1843:00:00:00", "= mock.Mock(headers={}, method=\"GET\", path=\"/path\", version=(1, 1)) environ = {} response", "%l %u %r %s %b %O %T %Tf %D' mock_logger", "%Tf %D' mock_logger = mock.Mock() access_logger = helpers.AccessLogger(mock_logger, log_format) message", "'pwd') assert auth.login == 'nkim' assert auth.password == '<PASSWORD>' assert", "pytest.raises(TypeError): form.add_field('foo', 'bar', content_transfer_encoding=invalid_val) def test_access_logger_format(): log_format = '%T {%{SPAM}e}", "\"%r %{FOOBAR}e\") access_logger.log(None, None, None, mock_transport, 0.0) mock_logger.info.assert_called_with(\"- -\") def", "assert ( helpers.parse_mimetype('''application/json; charset=utf-8;''') == ('application', 'json', '', {'charset': 'utf-8'}))", "response, transport, 0.0) assert not mock_logger.error.called expected = 'Mock/1.0 123", "requoting doesn't break expectations. quoted = 'http://example.com/fiz?buz=%25ppicture' assert quoted ==", "helpers.parse_mimetype('*') == ('*', '*', '', {}) def test_parse_mimetype_3(): assert (helpers.parse_mimetype('application/json')", "= helpers.BasicAuth('nkim') assert auth.login == 'nkim' assert auth.password == ''", "('text', 'plain', '', {'base64': ''})) def test_basic_auth1(): # missing password", "form = helpers.FormData() invalid_vals = [0, 0.1, {}, [], b'foo']", "%r %s %b %O %T %Tf %D' mock_logger = mock.Mock()", "'bar', filename=invalid_val) def test_invalid_formdata_content_transfer_encoding(): form = helpers.FormData() invalid_vals = [0,", "%{FOOBAR}e\") access_logger.log(None, None, None, mock_transport, 0.0) mock_logger.info.assert_called_with(\"- -\") def test_reify():", "%%%s' assert expected == access_logger._log_format @mock.patch(\"aiohttp.helpers.datetime\") @mock.patch(\"os.getpid\") def test_access_logger_atoms(mock_getpid, mock_datetime):", "%b %O %T %Tf %D' mock_logger = mock.Mock() access_logger =", "auth.encode() == 'Basic bmtpbTpwd2Q=' def test_invalid_formdata_params(): with pytest.raises(TypeError): helpers.FormData('asdasf') def", "invalid_val in invalid_vals: with pytest.raises(TypeError): form.add_field('foo', 'bar', content_transfer_encoding=invalid_val) def test_access_logger_format():", "1, 0, 0) mock_datetime.datetime.utcnow.return_value = utcnow mock_getpid.return_value = 42 log_format", "for invalid_val in invalid_vals: with pytest.raises(TypeError): form.add_field('foo', 'bar', content_type=invalid_val) def", "expected = ('127.0.0.2 [01/Jan/1843:00:00:00 +0000] <42> - - ' 'GET", "1234) access_logger.log(message, environ, response, transport, 0.0) assert not mock_logger.error.called expected", "= '%a %t %P %l %u %r %s %b %O", "access_logger = helpers.AccessLogger(mock_logger, \"%r %{FOOBAR}e\") access_logger.log(None, None, None, mock_transport, 0.0)", "helpers.parse_mimetype('application/json; charset=utf-8') == ('application', 'json', '', {'charset': 'utf-8'})) def test_parse_mimetype_5():", "test_parse_mimetype_3(): assert (helpers.parse_mimetype('application/json') == ('application', 'json', '', {})) def test_parse_mimetype_4():", "prop(self): return 1 a = A() assert 1 == a.prop", "def test_parse_mimetype_2(): assert helpers.parse_mimetype('*') == ('*', '*', '', {}) def", "environ, response, transport, 0.0) assert not mock_logger.error.called expected = 'Mock/1.0", "0) access_logger = helpers.AccessLogger(mock_logger, \"%r %{FOOBAR}e\") access_logger.log(None, None, None, mock_transport,", "environ = {} response = mock.Mock(headers={}, output_length=123, body_length=42, status=200) transport", "A() with pytest.raises(AttributeError): a.prop = 123 def test_requote_uri_with_unquoted_percents(): # Ensure", "helpers.BasicAuth(None) def test_basic_auth2(): with pytest.raises(ValueError): helpers.BasicAuth('nkim', None) def test_basic_auth3(): auth", "'xml', {})) def test_parse_mimetype_8(): assert ( helpers.parse_mimetype('text/plain;base64') == ('text', 'plain',", "= 'Mock/1.0 123 EGGS -' mock_logger.info.assert_called_with(expected) def test_logger_no_message_and_environ(): mock_logger =", "transport.get_extra_info.return_value = (\"127.0.0.2\", 1234) access_logger.log(message, environ, response, transport, 0.0) assert", "helpers.parse_mimetype('''application/json; charset=utf-8;''') == ('application', 'json', '', {'charset': 'utf-8'})) def test_parse_mimetype_6():", "utcnow = datetime.datetime(1843, 1, 1, 0, 0) mock_datetime.datetime.utcnow.return_value = utcnow", "transport, 3.1415926) assert not mock_logger.exception.called expected = ('127.0.0.2 [01/Jan/1843:00:00:00 +0000]", "<42> - - ' 'GET /path HTTP/1.1 200 42 123", "log_format = '%T {%{SPAM}e} \"%{ETag}o\" %X {X} %%P' mock_logger =", "pytest.raises(TypeError): helpers.FormData('asdasf') def test_invalid_formdata_params2(): with pytest.raises(TypeError): helpers.FormData('as') # 2-char str", "unquoted percent signs in redirects. bad_uri = 'http://example.com/fiz?buz=%ppicture' quoted =", "expected = '%s {%s} \"%s\" %%X {X} %%%s' assert expected", "(\"127.0.0.3\", 0) access_logger = helpers.AccessLogger(mock_logger, \"%r %{FOOBAR}e\") access_logger.log(None, None, None,", "test_basic_auth1(): # missing password here with pytest.raises(ValueError): helpers.BasicAuth(None) def test_basic_auth2():", "charset=utf-8') == ('application', 'json', '', {'charset': 'utf-8'})) def test_parse_mimetype_5(): assert", "== ('application', 'json', '', {'charset': 'utf-8'})) def test_parse_mimetype_5(): assert (", "mock.Mock() mock_transport.get_extra_info.return_value = (\"127.0.0.3\", 0) access_logger = helpers.AccessLogger(mock_logger, \"%r %{FOOBAR}e\")", "body_length=42, status=200) transport = mock.Mock() transport.get_extra_info.return_value = (\"127.0.0.2\", 1234) access_logger.log(message,", "= helpers.AccessLogger(mock_logger, \"%r %{FOOBAR}e\") access_logger.log(None, None, None, mock_transport, 0.0) mock_logger.info.assert_called_with(\"-", "'', '', {}) def test_parse_mimetype_2(): assert helpers.parse_mimetype('*') == ('*', '*',", "environ = {\"SPAM\": \"EGGS\"} response = mock.Mock(headers={\"CONTENT-LENGTH\": 123}) transport =", "a.prop def test_reify_class(): class A: @helpers.reify def prop(self): \"\"\"Docstring.\"\"\" return", "0.0) mock_logger.info.assert_called_with(\"- -\") def test_reify(): class A: @helpers.reify def prop(self):", "%{None}i' mock_logger = mock.Mock() access_logger = helpers.AccessLogger(mock_logger, log_format) message =", "unittest import mock from aiohttp import helpers import datetime def", "auth = helpers.BasicAuth('nkim') assert auth.login == 'nkim' assert auth.password ==", "with pytest.raises(TypeError): helpers.FormData('as') # 2-char str is not allowed def", "expected == access_logger._log_format @mock.patch(\"aiohttp.helpers.datetime\") @mock.patch(\"os.getpid\") def test_access_logger_atoms(mock_getpid, mock_datetime): utcnow =", "' 'GET /path HTTP/1.1 200 42 123 3 3.141593 3141593')", "prop(self): \"\"\"Docstring.\"\"\" return 1 assert isinstance(A.prop, helpers.reify) assert 'Docstring.' ==", "assert ( helpers.parse_mimetype('application/json; charset=utf-8') == ('application', 'json', '', {'charset': 'utf-8'}))", "None, None, mock_transport, 0.0) mock_logger.info.assert_called_with(\"- -\") def test_reify(): class A:", "0) mock_datetime.datetime.utcnow.return_value = utcnow mock_getpid.return_value = 42 log_format = '%a", "'', {}) def test_parse_mimetype_2(): assert helpers.parse_mimetype('*') == ('*', '*', '',", "== ('application', 'json', '', {'charset': 'utf-8'})) def test_parse_mimetype_6(): assert( helpers.parse_mimetype('ApPlIcAtIoN/JSON;ChaRseT=\"UTF-8\"')", "\"%s\" %%X {X} %%%s' assert expected == access_logger._log_format @mock.patch(\"aiohttp.helpers.datetime\") @mock.patch(\"os.getpid\")", "mock_getpid.return_value = 42 log_format = '%a %t %P %l %u", "pytest from unittest import mock from aiohttp import helpers import", "mock.Mock() mock_transport = mock.Mock() mock_transport.get_extra_info.return_value = (\"127.0.0.3\", 0) access_logger =", "mock_logger.info.assert_called_with(expected) def test_access_logger_dicts(): log_format = '%{User-Agent}i %{Content-Length}o %{SPAM}e %{None}i' mock_logger", "mock_datetime): utcnow = datetime.datetime(1843, 1, 1, 0, 0) mock_datetime.datetime.utcnow.return_value =", "== ('application', 'rss', 'xml', {})) def test_parse_mimetype_8(): assert ( helpers.parse_mimetype('text/plain;base64')", "mock.Mock(headers={\"CONTENT-LENGTH\": 123}) transport = mock.Mock() transport.get_extra_info.return_value = (\"127.0.0.2\", 1234) access_logger.log(message,", "= (\"127.0.0.2\", 1234) access_logger.log(message, environ, response, transport, 3.1415926) assert not", "password here with pytest.raises(ValueError): helpers.BasicAuth(None) def test_basic_auth2(): with pytest.raises(ValueError): helpers.BasicAuth('nkim',", "log_format = '%{User-Agent}i %{Content-Length}o %{SPAM}e %{None}i' mock_logger = mock.Mock() access_logger", "== ('*', '*', '', {}) def test_parse_mimetype_3(): assert (helpers.parse_mimetype('application/json') ==", "auth.login == 'nkim' assert auth.password == '<PASSWORD>' assert auth.encode() ==", "test_access_logger_atoms(mock_getpid, mock_datetime): utcnow = datetime.datetime(1843, 1, 1, 0, 0) mock_datetime.datetime.utcnow.return_value", "'Mock/1.0 123 EGGS -' mock_logger.info.assert_called_with(expected) def test_logger_no_message_and_environ(): mock_logger = mock.Mock()", "= '%{User-Agent}i %{Content-Length}o %{SPAM}e %{None}i' mock_logger = mock.Mock() access_logger =", "= helpers.AccessLogger(mock_logger, log_format) message = mock.Mock(headers={\"USER-AGENT\": \"Mock/1.0\"}, version=(1, 1)) environ", "assert helpers.parse_mimetype('') == ('', '', '', {}) def test_parse_mimetype_2(): assert", "not allowed def test_invalid_formdata_content_type(): form = helpers.FormData() invalid_vals = [0,", "helpers.parse_mimetype('ApPlIcAtIoN/JSON;ChaRseT=\"UTF-8\"') == ('application', 'json', '', {'charset': 'UTF-8'})) def test_parse_mimetype_7(): assert", "= {\"SPAM\": \"EGGS\"} response = mock.Mock(headers={\"CONTENT-LENGTH\": 123}) transport = mock.Mock()", "b'foo'] for invalid_val in invalid_vals: with pytest.raises(TypeError): form.add_field('foo', 'bar', content_transfer_encoding=invalid_val)", "test_invalid_formdata_filename(): form = helpers.FormData() invalid_vals = [0, 0.1, {}, [],", "# missing password here with pytest.raises(ValueError): helpers.BasicAuth(None) def test_basic_auth2(): with", "log_format) message = mock.Mock(headers={\"USER-AGENT\": \"Mock/1.0\"}, version=(1, 1)) environ = {\"SPAM\":", "mock_transport.get_extra_info.return_value = (\"127.0.0.3\", 0) access_logger = helpers.AccessLogger(mock_logger, \"%r %{FOOBAR}e\") access_logger.log(None,", "import helpers import datetime def test_parse_mimetype_1(): assert helpers.parse_mimetype('') == ('',", "mock_datetime.datetime.utcnow.return_value = utcnow mock_getpid.return_value = 42 log_format = '%a %t", "access_logger.log(None, None, None, mock_transport, 0.0) mock_logger.info.assert_called_with(\"- -\") def test_reify(): class", "return 1 a = A() with pytest.raises(AttributeError): a.prop = 123", "= mock.Mock() mock_transport = mock.Mock() mock_transport.get_extra_info.return_value = (\"127.0.0.3\", 0) access_logger", "%T %Tf %D' mock_logger = mock.Mock() access_logger = helpers.AccessLogger(mock_logger, log_format)", "access_logger = helpers.AccessLogger(mock_logger, log_format) message = mock.Mock(headers={}, method=\"GET\", path=\"/path\", version=(1,", "invalid_vals: with pytest.raises(TypeError): form.add_field('foo', 'bar', filename=invalid_val) def test_invalid_formdata_content_transfer_encoding(): form =", "assert helpers.parse_mimetype('*') == ('*', '*', '', {}) def test_parse_mimetype_3(): assert", "test_invalid_formdata_params2(): with pytest.raises(TypeError): helpers.FormData('as') # 2-char str is not allowed", "with pytest.raises(TypeError): form.add_field('foo', 'bar', content_transfer_encoding=invalid_val) def test_access_logger_format(): log_format = '%T", "[], b'foo'] for invalid_val in invalid_vals: with pytest.raises(TypeError): form.add_field('foo', 'bar',", "= {} response = mock.Mock(headers={}, output_length=123, body_length=42, status=200) transport =", "mock.Mock(headers={\"USER-AGENT\": \"Mock/1.0\"}, version=(1, 1)) environ = {\"SPAM\": \"EGGS\"} response =", "assert auth.login == 'nkim' assert auth.password == '' def test_basic_auth4():", "test_basic_auth3(): auth = helpers.BasicAuth('nkim') assert auth.login == 'nkim' assert auth.password", "here with pytest.raises(ValueError): helpers.BasicAuth(None) def test_basic_auth2(): with pytest.raises(ValueError): helpers.BasicAuth('nkim', None)", "== access_logger._log_format @mock.patch(\"aiohttp.helpers.datetime\") @mock.patch(\"os.getpid\") def test_access_logger_atoms(mock_getpid, mock_datetime): utcnow = datetime.datetime(1843,", "'utf-8'})) def test_parse_mimetype_5(): assert ( helpers.parse_mimetype('''application/json; charset=utf-8;''') == ('application', 'json',", "= 123 def test_requote_uri_with_unquoted_percents(): # Ensure we handle unquoted percent", "= (\"127.0.0.2\", 1234) access_logger.log(message, environ, response, transport, 0.0) assert not", "'json', '', {})) def test_parse_mimetype_4(): assert ( helpers.parse_mimetype('application/json; charset=utf-8') ==", "{%s} \"%s\" %%X {X} %%%s' assert expected == access_logger._log_format @mock.patch(\"aiohttp.helpers.datetime\")", "= mock.Mock() mock_transport.get_extra_info.return_value = (\"127.0.0.3\", 0) access_logger = helpers.AccessLogger(mock_logger, \"%r", "def test_invalid_formdata_content_type(): form = helpers.FormData() invalid_vals = [0, 0.1, {},", "response = mock.Mock(headers={}, output_length=123, body_length=42, status=200) transport = mock.Mock() transport.get_extra_info.return_value", "in invalid_vals: with pytest.raises(TypeError): form.add_field('foo', 'bar', content_transfer_encoding=invalid_val) def test_access_logger_format(): log_format", "doesn't break expectations. quoted = 'http://example.com/fiz?buz=%25ppicture' assert quoted == helpers.requote_uri(quoted)", "pytest.raises(TypeError): form.add_field('foo', 'bar', content_type=invalid_val) def test_invalid_formdata_filename(): form = helpers.FormData() invalid_vals", "'utf-8'})) def test_parse_mimetype_6(): assert( helpers.parse_mimetype('ApPlIcAtIoN/JSON;ChaRseT=\"UTF-8\"') == ('application', 'json', '', {'charset':", "{'charset': 'utf-8'})) def test_parse_mimetype_5(): assert ( helpers.parse_mimetype('''application/json; charset=utf-8;''') == ('application',", "prop(self): return 1 a = A() with pytest.raises(AttributeError): a.prop =", "a.prop = 123 def test_requote_uri_with_unquoted_percents(): # Ensure we handle unquoted", "== a.prop def test_reify_class(): class A: @helpers.reify def prop(self): \"\"\"Docstring.\"\"\"", "helpers.AccessLogger(mock_logger, log_format) message = mock.Mock(headers={}, method=\"GET\", path=\"/path\", version=(1, 1)) environ", "test_access_logger_format(): log_format = '%T {%{SPAM}e} \"%{ETag}o\" %X {X} %%P' mock_logger", "'Basic bmtpbTpwd2Q=' def test_invalid_formdata_params(): with pytest.raises(TypeError): helpers.FormData('asdasf') def test_invalid_formdata_params2(): with", "\"EGGS\"} response = mock.Mock(headers={\"CONTENT-LENGTH\": 123}) transport = mock.Mock() transport.get_extra_info.return_value =", "form.add_field('foo', 'bar', content_type=invalid_val) def test_invalid_formdata_filename(): form = helpers.FormData() invalid_vals =", "test_invalid_formdata_content_type(): form = helpers.FormData() invalid_vals = [0, 0.1, {}, [],", "= helpers.FormData() invalid_vals = [0, 0.1, {}, [], b'foo'] for", "== '<PASSWORD>' assert auth.encode() == 'Basic bmtpbTpwd2Q=' def test_invalid_formdata_params(): with", "pytest.raises(ValueError): helpers.BasicAuth('nkim', None) def test_basic_auth3(): auth = helpers.BasicAuth('nkim') assert auth.login", "EGGS -' mock_logger.info.assert_called_with(expected) def test_logger_no_message_and_environ(): mock_logger = mock.Mock() mock_transport =", "test_parse_mimetype_1(): assert helpers.parse_mimetype('') == ('', '', '', {}) def test_parse_mimetype_2():", "handle unquoted percent signs in redirects. bad_uri = 'http://example.com/fiz?buz=%ppicture' quoted", "class A: @helpers.reify def prop(self): \"\"\"Docstring.\"\"\" return 1 assert isinstance(A.prop,", "= '%T {%{SPAM}e} \"%{ETag}o\" %X {X} %%P' mock_logger = mock.Mock()", "= mock.Mock() access_logger = helpers.AccessLogger(mock_logger, log_format) message = mock.Mock(headers={}, method=\"GET\",", "%s %b %O %T %Tf %D' mock_logger = mock.Mock() access_logger", "class A: @helpers.reify def prop(self): return 1 a = A()", "version=(1, 1)) environ = {\"SPAM\": \"EGGS\"} response = mock.Mock(headers={\"CONTENT-LENGTH\": 123})", "assert isinstance(A.prop, helpers.reify) assert 'Docstring.' == A.prop.__doc__ def test_reify_assignment(): class", "('application', 'json', '', {})) def test_parse_mimetype_4(): assert ( helpers.parse_mimetype('application/json; charset=utf-8')", "('127.0.0.2 [01/Jan/1843:00:00:00 +0000] <42> - - ' 'GET /path HTTP/1.1", "status=200) transport = mock.Mock() transport.get_extra_info.return_value = (\"127.0.0.2\", 1234) access_logger.log(message, environ,", "@mock.patch(\"os.getpid\") def test_access_logger_atoms(mock_getpid, mock_datetime): utcnow = datetime.datetime(1843, 1, 1, 0,", "1 == a.prop def test_reify_class(): class A: @helpers.reify def prop(self):", "'%s {%s} \"%s\" %%X {X} %%%s' assert expected == access_logger._log_format", "pytest.raises(AttributeError): a.prop = 123 def test_requote_uri_with_unquoted_percents(): # Ensure we handle", "<filename>tests/test_helpers.py import pytest from unittest import mock from aiohttp import", "def test_parse_mimetype_1(): assert helpers.parse_mimetype('') == ('', '', '', {}) def", "1)) environ = {\"SPAM\": \"EGGS\"} response = mock.Mock(headers={\"CONTENT-LENGTH\": 123}) transport", "A() assert 1 == a.prop def test_reify_class(): class A: @helpers.reify", "[0, 0.1, {}, [], b'foo'] for invalid_val in invalid_vals: with", "mock_logger = mock.Mock() access_logger = helpers.AccessLogger(mock_logger, log_format) message = mock.Mock(headers={},", "def test_parse_mimetype_6(): assert( helpers.parse_mimetype('ApPlIcAtIoN/JSON;ChaRseT=\"UTF-8\"') == ('application', 'json', '', {'charset': 'UTF-8'}))", "percent signs in redirects. bad_uri = 'http://example.com/fiz?buz=%ppicture' quoted = 'http://example.com/fiz?buz=%25ppicture'", "mock.Mock() access_logger = helpers.AccessLogger(mock_logger, log_format) message = mock.Mock(headers={}, method=\"GET\", path=\"/path\",", "helpers.BasicAuth('nkim') assert auth.login == 'nkim' assert auth.password == '' def", "None) def test_basic_auth3(): auth = helpers.BasicAuth('nkim') assert auth.login == 'nkim'", "helpers.BasicAuth('nkim', None) def test_basic_auth3(): auth = helpers.BasicAuth('nkim') assert auth.login ==", "1)) environ = {} response = mock.Mock(headers={}, output_length=123, body_length=42, status=200)", "pytest.raises(TypeError): helpers.FormData('as') # 2-char str is not allowed def test_invalid_formdata_content_type():", "a = A() with pytest.raises(AttributeError): a.prop = 123 def test_requote_uri_with_unquoted_percents():", "datetime.datetime(1843, 1, 1, 0, 0) mock_datetime.datetime.utcnow.return_value = utcnow mock_getpid.return_value =", "= helpers.BasicAuth('nkim', 'pwd') assert auth.login == 'nkim' assert auth.password ==", "'', {'charset': 'utf-8'})) def test_parse_mimetype_5(): assert ( helpers.parse_mimetype('''application/json; charset=utf-8;''') ==", "assert ( helpers.parse_mimetype('application/rss+xml') == ('application', 'rss', 'xml', {})) def test_parse_mimetype_8():", "invalid_val in invalid_vals: with pytest.raises(TypeError): form.add_field('foo', 'bar', content_type=invalid_val) def test_invalid_formdata_filename():", "not mock_logger.exception.called expected = ('127.0.0.2 [01/Jan/1843:00:00:00 +0000] <42> - -", "= mock.Mock(headers={\"CONTENT-LENGTH\": 123}) transport = mock.Mock() transport.get_extra_info.return_value = (\"127.0.0.2\", 1234)", "'%T {%{SPAM}e} \"%{ETag}o\" %X {X} %%P' mock_logger = mock.Mock() access_logger", "{'charset': 'utf-8'})) def test_parse_mimetype_6(): assert( helpers.parse_mimetype('ApPlIcAtIoN/JSON;ChaRseT=\"UTF-8\"') == ('application', 'json', '',", "bmtpbTpwd2Q=' def test_invalid_formdata_params(): with pytest.raises(TypeError): helpers.FormData('asdasf') def test_invalid_formdata_params2(): with pytest.raises(TypeError):", "= mock.Mock() transport.get_extra_info.return_value = (\"127.0.0.2\", 1234) access_logger.log(message, environ, response, transport,", "(\"127.0.0.2\", 1234) access_logger.log(message, environ, response, transport, 0.0) assert not mock_logger.error.called", "def test_invalid_formdata_params2(): with pytest.raises(TypeError): helpers.FormData('as') # 2-char str is not", "def test_logger_no_message_and_environ(): mock_logger = mock.Mock() mock_transport = mock.Mock() mock_transport.get_extra_info.return_value =", "test_requote_uri_with_unquoted_percents(): # Ensure we handle unquoted percent signs in redirects.", "mock.Mock() access_logger = helpers.AccessLogger(mock_logger, log_format) expected = '%s {%s} \"%s\"", "utcnow mock_getpid.return_value = 42 log_format = '%a %t %P %l", "auth.password == '<PASSWORD>' assert auth.encode() == 'Basic bmtpbTpwd2Q=' def test_invalid_formdata_params():", "%%X {X} %%%s' assert expected == access_logger._log_format @mock.patch(\"aiohttp.helpers.datetime\") @mock.patch(\"os.getpid\") def", "= [0, 0.1, {}, [], b'foo'] for invalid_val in invalid_vals:", "def test_access_logger_format(): log_format = '%T {%{SPAM}e} \"%{ETag}o\" %X {X} %%P'", "A: @helpers.reify def prop(self): return 1 a = A() assert", "'rss', 'xml', {})) def test_parse_mimetype_8(): assert ( helpers.parse_mimetype('text/plain;base64') == ('text',", "assert (helpers.parse_mimetype('application/json') == ('application', 'json', '', {})) def test_parse_mimetype_4(): assert", "mock_transport = mock.Mock() mock_transport.get_extra_info.return_value = (\"127.0.0.3\", 0) access_logger = helpers.AccessLogger(mock_logger,", "test_invalid_formdata_content_transfer_encoding(): form = helpers.FormData() invalid_vals = [0, 0.1, {}, [],", "42 log_format = '%a %t %P %l %u %r %s", "{'base64': ''})) def test_basic_auth1(): # missing password here with pytest.raises(ValueError):", "('application', 'json', '', {'charset': 'UTF-8'})) def test_parse_mimetype_7(): assert ( helpers.parse_mimetype('application/rss+xml')", "def test_reify_assignment(): class A: @helpers.reify def prop(self): return 1 a", "helpers.parse_mimetype('application/rss+xml') == ('application', 'rss', 'xml', {})) def test_parse_mimetype_8(): assert (", "('*', '*', '', {}) def test_parse_mimetype_3(): assert (helpers.parse_mimetype('application/json') == ('application',", "'bar', content_type=invalid_val) def test_invalid_formdata_filename(): form = helpers.FormData() invalid_vals = [0,", "1 a = A() assert 1 == a.prop def test_reify_class():", "'', {'charset': 'utf-8'})) def test_parse_mimetype_6(): assert( helpers.parse_mimetype('ApPlIcAtIoN/JSON;ChaRseT=\"UTF-8\"') == ('application', 'json',", "def test_basic_auth2(): with pytest.raises(ValueError): helpers.BasicAuth('nkim', None) def test_basic_auth3(): auth =", "'', {'charset': 'UTF-8'})) def test_parse_mimetype_7(): assert ( helpers.parse_mimetype('application/rss+xml') == ('application',", "def test_requote_uri_with_unquoted_percents(): # Ensure we handle unquoted percent signs in", "# 2-char str is not allowed def test_invalid_formdata_content_type(): form =", "= '%s {%s} \"%s\" %%X {X} %%%s' assert expected ==", "'', {})) def test_parse_mimetype_4(): assert ( helpers.parse_mimetype('application/json; charset=utf-8') == ('application',", "== 'nkim' assert auth.password == '<PASSWORD>' assert auth.encode() == 'Basic", "{})) def test_parse_mimetype_8(): assert ( helpers.parse_mimetype('text/plain;base64') == ('text', 'plain', '',", "%{Content-Length}o %{SPAM}e %{None}i' mock_logger = mock.Mock() access_logger = helpers.AccessLogger(mock_logger, log_format)", "def test_basic_auth4(): auth = helpers.BasicAuth('nkim', 'pwd') assert auth.login == 'nkim'", "== 'Basic bmtpbTpwd2Q=' def test_invalid_formdata_params(): with pytest.raises(TypeError): helpers.FormData('asdasf') def test_invalid_formdata_params2():", "environ, response, transport, 3.1415926) assert not mock_logger.exception.called expected = ('127.0.0.2", "200 42 123 3 3.141593 3141593') mock_logger.info.assert_called_with(expected) def test_access_logger_dicts(): log_format", "# Ensure requoting doesn't break expectations. quoted = 'http://example.com/fiz?buz=%25ppicture' assert", "access_logger.log(message, environ, response, transport, 3.1415926) assert not mock_logger.exception.called expected =", "expected = 'Mock/1.0 123 EGGS -' mock_logger.info.assert_called_with(expected) def test_logger_no_message_and_environ(): mock_logger", "mock.Mock(headers={}, output_length=123, body_length=42, status=200) transport = mock.Mock() transport.get_extra_info.return_value = (\"127.0.0.2\",", "'', {'base64': ''})) def test_basic_auth1(): # missing password here with", "@helpers.reify def prop(self): return 1 a = A() assert 1", "3 3.141593 3141593') mock_logger.info.assert_called_with(expected) def test_access_logger_dicts(): log_format = '%{User-Agent}i %{Content-Length}o", "helpers import datetime def test_parse_mimetype_1(): assert helpers.parse_mimetype('') == ('', '',", "test_access_logger_dicts(): log_format = '%{User-Agent}i %{Content-Length}o %{SPAM}e %{None}i' mock_logger = mock.Mock()", "mock_logger = mock.Mock() access_logger = helpers.AccessLogger(mock_logger, log_format) message = mock.Mock(headers={\"USER-AGENT\":", "= 'http://example.com/fiz?buz=%ppicture' quoted = 'http://example.com/fiz?buz=%25ppicture' assert quoted == helpers.requote_uri(bad_uri) def", "'plain', '', {'base64': ''})) def test_basic_auth1(): # missing password here", "test_parse_mimetype_6(): assert( helpers.parse_mimetype('ApPlIcAtIoN/JSON;ChaRseT=\"UTF-8\"') == ('application', 'json', '', {'charset': 'UTF-8'})) def", "- ' 'GET /path HTTP/1.1 200 42 123 3 3.141593", "= utcnow mock_getpid.return_value = 42 log_format = '%a %t %P", "def test_invalid_formdata_filename(): form = helpers.FormData() invalid_vals = [0, 0.1, {},", "with pytest.raises(TypeError): form.add_field('foo', 'bar', filename=invalid_val) def test_invalid_formdata_content_transfer_encoding(): form = helpers.FormData()", "(\"127.0.0.2\", 1234) access_logger.log(message, environ, response, transport, 3.1415926) assert not mock_logger.exception.called", "log_format) message = mock.Mock(headers={}, method=\"GET\", path=\"/path\", version=(1, 1)) environ =", "form.add_field('foo', 'bar', content_transfer_encoding=invalid_val) def test_access_logger_format(): log_format = '%T {%{SPAM}e} \"%{ETag}o\"", "message = mock.Mock(headers={}, method=\"GET\", path=\"/path\", version=(1, 1)) environ = {}", "= (\"127.0.0.3\", 0) access_logger = helpers.AccessLogger(mock_logger, \"%r %{FOOBAR}e\") access_logger.log(None, None,", "datetime def test_parse_mimetype_1(): assert helpers.parse_mimetype('') == ('', '', '', {})", "'bar', content_transfer_encoding=invalid_val) def test_access_logger_format(): log_format = '%T {%{SPAM}e} \"%{ETag}o\" %X", "mock.Mock(headers={}, method=\"GET\", path=\"/path\", version=(1, 1)) environ = {} response =", "test_reify_assignment(): class A: @helpers.reify def prop(self): return 1 a =", "mock_logger.info.assert_called_with(\"- -\") def test_reify(): class A: @helpers.reify def prop(self): return", "\"%{ETag}o\" %X {X} %%P' mock_logger = mock.Mock() access_logger = helpers.AccessLogger(mock_logger,", "= helpers.AccessLogger(mock_logger, log_format) message = mock.Mock(headers={}, method=\"GET\", path=\"/path\", version=(1, 1))", "not mock_logger.error.called expected = 'Mock/1.0 123 EGGS -' mock_logger.info.assert_called_with(expected) def", "= A() with pytest.raises(AttributeError): a.prop = 123 def test_requote_uri_with_unquoted_percents(): #", "with pytest.raises(ValueError): helpers.BasicAuth(None) def test_basic_auth2(): with pytest.raises(ValueError): helpers.BasicAuth('nkim', None) def", "'', {}) def test_parse_mimetype_3(): assert (helpers.parse_mimetype('application/json') == ('application', 'json', '',", "mock.Mock() transport.get_extra_info.return_value = (\"127.0.0.2\", 1234) access_logger.log(message, environ, response, transport, 3.1415926)", "3141593') mock_logger.info.assert_called_with(expected) def test_access_logger_dicts(): log_format = '%{User-Agent}i %{Content-Length}o %{SPAM}e %{None}i'", "{X} %%P' mock_logger = mock.Mock() access_logger = helpers.AccessLogger(mock_logger, log_format) expected", "assert auth.password == '<PASSWORD>' assert auth.encode() == 'Basic bmtpbTpwd2Q=' def", "in invalid_vals: with pytest.raises(TypeError): form.add_field('foo', 'bar', content_type=invalid_val) def test_invalid_formdata_filename(): form", "def test_access_logger_dicts(): log_format = '%{User-Agent}i %{Content-Length}o %{SPAM}e %{None}i' mock_logger =", "(helpers.parse_mimetype('application/json') == ('application', 'json', '', {})) def test_parse_mimetype_4(): assert (", "log_format = '%a %t %P %l %u %r %s %b", "A: @helpers.reify def prop(self): return 1 a = A() with", "is not allowed def test_invalid_formdata_content_type(): form = helpers.FormData() invalid_vals =", "''})) def test_basic_auth1(): # missing password here with pytest.raises(ValueError): helpers.BasicAuth(None)", "test_reify_class(): class A: @helpers.reify def prop(self): \"\"\"Docstring.\"\"\" return 1 assert", "helpers.FormData() invalid_vals = [0, 0.1, {}, [], b'foo'] for invalid_val", "None, mock_transport, 0.0) mock_logger.info.assert_called_with(\"- -\") def test_reify(): class A: @helpers.reify", "== ('application', 'json', '', {})) def test_parse_mimetype_4(): assert ( helpers.parse_mimetype('application/json;", "1 a = A() with pytest.raises(AttributeError): a.prop = 123 def", "123 3 3.141593 3141593') mock_logger.info.assert_called_with(expected) def test_access_logger_dicts(): log_format = '%{User-Agent}i", "access_logger = helpers.AccessLogger(mock_logger, log_format) message = mock.Mock(headers={\"USER-AGENT\": \"Mock/1.0\"}, version=(1, 1))", "\"\"\"Docstring.\"\"\" return 1 assert isinstance(A.prop, helpers.reify) assert 'Docstring.' == A.prop.__doc__", "def test_basic_auth3(): auth = helpers.BasicAuth('nkim') assert auth.login == 'nkim' assert", "test_parse_mimetype_4(): assert ( helpers.parse_mimetype('application/json; charset=utf-8') == ('application', 'json', '', {'charset':", "{}) def test_parse_mimetype_3(): assert (helpers.parse_mimetype('application/json') == ('application', 'json', '', {}))", "def test_requote_uri_properly_requotes(): # Ensure requoting doesn't break expectations. quoted =", "3.1415926) assert not mock_logger.exception.called expected = ('127.0.0.2 [01/Jan/1843:00:00:00 +0000] <42>", "42 123 3 3.141593 3141593') mock_logger.info.assert_called_with(expected) def test_access_logger_dicts(): log_format =", "a = A() assert 1 == a.prop def test_reify_class(): class", "Ensure requoting doesn't break expectations. quoted = 'http://example.com/fiz?buz=%25ppicture' assert quoted", "0, 0) mock_datetime.datetime.utcnow.return_value = utcnow mock_getpid.return_value = 42 log_format =", "'UTF-8'})) def test_parse_mimetype_7(): assert ( helpers.parse_mimetype('application/rss+xml') == ('application', 'rss', 'xml',", "'GET /path HTTP/1.1 200 42 123 3 3.141593 3141593') mock_logger.info.assert_called_with(expected)", "auth = helpers.BasicAuth('nkim', 'pwd') assert auth.login == 'nkim' assert auth.password", "'json', '', {'charset': 'utf-8'})) def test_parse_mimetype_5(): assert ( helpers.parse_mimetype('''application/json; charset=utf-8;''')", "== ('text', 'plain', '', {'base64': ''})) def test_basic_auth1(): # missing", "1234) access_logger.log(message, environ, response, transport, 3.1415926) assert not mock_logger.exception.called expected", "access_logger.log(message, environ, response, transport, 0.0) assert not mock_logger.error.called expected =", "'<PASSWORD>' assert auth.encode() == 'Basic bmtpbTpwd2Q=' def test_invalid_formdata_params(): with pytest.raises(TypeError):", "helpers.AccessLogger(mock_logger, log_format) expected = '%s {%s} \"%s\" %%X {X} %%%s'", "= ('127.0.0.2 [01/Jan/1843:00:00:00 +0000] <42> - - ' 'GET /path", "invalid_vals: with pytest.raises(TypeError): form.add_field('foo', 'bar', content_transfer_encoding=invalid_val) def test_access_logger_format(): log_format =", "auth.login == 'nkim' assert auth.password == '' def test_basic_auth4(): auth", "assert not mock_logger.exception.called expected = ('127.0.0.2 [01/Jan/1843:00:00:00 +0000] <42> -", "== A.prop.__doc__ def test_reify_assignment(): class A: @helpers.reify def prop(self): return", "message = mock.Mock(headers={\"USER-AGENT\": \"Mock/1.0\"}, version=(1, 1)) environ = {\"SPAM\": \"EGGS\"}", "assert ( helpers.parse_mimetype('text/plain;base64') == ('text', 'plain', '', {'base64': ''})) def", "test_basic_auth4(): auth = helpers.BasicAuth('nkim', 'pwd') assert auth.login == 'nkim' assert", "in redirects. bad_uri = 'http://example.com/fiz?buz=%ppicture' quoted = 'http://example.com/fiz?buz=%25ppicture' assert quoted", "mock.Mock() transport.get_extra_info.return_value = (\"127.0.0.2\", 1234) access_logger.log(message, environ, response, transport, 0.0)", "- - ' 'GET /path HTTP/1.1 200 42 123 3", "b'foo'] for invalid_val in invalid_vals: with pytest.raises(TypeError): form.add_field('foo', 'bar', content_type=invalid_val)", "== '' def test_basic_auth4(): auth = helpers.BasicAuth('nkim', 'pwd') assert auth.login", "helpers.BasicAuth('nkim', 'pwd') assert auth.login == 'nkim' assert auth.password == '<PASSWORD>'", "{}) def test_parse_mimetype_2(): assert helpers.parse_mimetype('*') == ('*', '*', '', {})", "{} response = mock.Mock(headers={}, output_length=123, body_length=42, status=200) transport = mock.Mock()", "{X} %%%s' assert expected == access_logger._log_format @mock.patch(\"aiohttp.helpers.datetime\") @mock.patch(\"os.getpid\") def test_access_logger_atoms(mock_getpid,", "'http://example.com/fiz?buz=%ppicture' quoted = 'http://example.com/fiz?buz=%25ppicture' assert quoted == helpers.requote_uri(bad_uri) def test_requote_uri_properly_requotes():", "def test_parse_mimetype_8(): assert ( helpers.parse_mimetype('text/plain;base64') == ('text', 'plain', '', {'base64':", "assert 'Docstring.' == A.prop.__doc__ def test_reify_assignment(): class A: @helpers.reify def", "= mock.Mock() access_logger = helpers.AccessLogger(mock_logger, log_format) expected = '%s {%s}", "in invalid_vals: with pytest.raises(TypeError): form.add_field('foo', 'bar', filename=invalid_val) def test_invalid_formdata_content_transfer_encoding(): form", "for invalid_val in invalid_vals: with pytest.raises(TypeError): form.add_field('foo', 'bar', content_transfer_encoding=invalid_val) def", "missing password here with pytest.raises(ValueError): helpers.BasicAuth(None) def test_basic_auth2(): with pytest.raises(ValueError):", "invalid_vals = [0, 0.1, {}, [], b'foo'] for invalid_val in", "test_reify(): class A: @helpers.reify def prop(self): return 1 a =", "'json', '', {'charset': 'utf-8'})) def test_parse_mimetype_6(): assert( helpers.parse_mimetype('ApPlIcAtIoN/JSON;ChaRseT=\"UTF-8\"') == ('application',", "helpers.AccessLogger(mock_logger, \"%r %{FOOBAR}e\") access_logger.log(None, None, None, mock_transport, 0.0) mock_logger.info.assert_called_with(\"- -\")", "quoted = 'http://example.com/fiz?buz=%25ppicture' assert quoted == helpers.requote_uri(bad_uri) def test_requote_uri_properly_requotes(): #", "%{SPAM}e %{None}i' mock_logger = mock.Mock() access_logger = helpers.AccessLogger(mock_logger, log_format) message", "0.1, {}, [], b'foo'] for invalid_val in invalid_vals: with pytest.raises(TypeError):", "%u %r %s %b %O %T %Tf %D' mock_logger =", "form.add_field('foo', 'bar', filename=invalid_val) def test_invalid_formdata_content_transfer_encoding(): form = helpers.FormData() invalid_vals =", "assert quoted == helpers.requote_uri(bad_uri) def test_requote_uri_properly_requotes(): # Ensure requoting doesn't", "%D' mock_logger = mock.Mock() access_logger = helpers.AccessLogger(mock_logger, log_format) message =", "'json', '', {'charset': 'UTF-8'})) def test_parse_mimetype_7(): assert ( helpers.parse_mimetype('application/rss+xml') ==", "access_logger._log_format @mock.patch(\"aiohttp.helpers.datetime\") @mock.patch(\"os.getpid\") def test_access_logger_atoms(mock_getpid, mock_datetime): utcnow = datetime.datetime(1843, 1,", "str is not allowed def test_invalid_formdata_content_type(): form = helpers.FormData() invalid_vals", "= mock.Mock() access_logger = helpers.AccessLogger(mock_logger, log_format) message = mock.Mock(headers={\"USER-AGENT\": \"Mock/1.0\"},", "assert auth.login == 'nkim' assert auth.password == '<PASSWORD>' assert auth.encode()", "invalid_vals: with pytest.raises(TypeError): form.add_field('foo', 'bar', content_type=invalid_val) def test_invalid_formdata_filename(): form =", "version=(1, 1)) environ = {} response = mock.Mock(headers={}, output_length=123, body_length=42,", "helpers.parse_mimetype('text/plain;base64') == ('text', 'plain', '', {'base64': ''})) def test_basic_auth1(): #", "def test_invalid_formdata_params(): with pytest.raises(TypeError): helpers.FormData('asdasf') def test_invalid_formdata_params2(): with pytest.raises(TypeError): helpers.FormData('as')", "test_parse_mimetype_5(): assert ( helpers.parse_mimetype('''application/json; charset=utf-8;''') == ('application', 'json', '', {'charset':", "1, 1, 0, 0) mock_datetime.datetime.utcnow.return_value = utcnow mock_getpid.return_value = 42", "= mock.Mock(headers={}, output_length=123, body_length=42, status=200) transport = mock.Mock() transport.get_extra_info.return_value =", "== ('', '', '', {}) def test_parse_mimetype_2(): assert helpers.parse_mimetype('*') ==", "log_format) expected = '%s {%s} \"%s\" %%X {X} %%%s' assert", "def test_parse_mimetype_3(): assert (helpers.parse_mimetype('application/json') == ('application', 'json', '', {})) def", "== ('application', 'json', '', {'charset': 'UTF-8'})) def test_parse_mimetype_7(): assert (", "assert auth.encode() == 'Basic bmtpbTpwd2Q=' def test_invalid_formdata_params(): with pytest.raises(TypeError): helpers.FormData('asdasf')", "'nkim' assert auth.password == '' def test_basic_auth4(): auth = helpers.BasicAuth('nkim',", "aiohttp import helpers import datetime def test_parse_mimetype_1(): assert helpers.parse_mimetype('') ==", "mock_transport, 0.0) mock_logger.info.assert_called_with(\"- -\") def test_reify(): class A: @helpers.reify def", "content_type=invalid_val) def test_invalid_formdata_filename(): form = helpers.FormData() invalid_vals = [0, 0.1,", "[01/Jan/1843:00:00:00 +0000] <42> - - ' 'GET /path HTTP/1.1 200", "= 'http://example.com/fiz?buz=%25ppicture' assert quoted == helpers.requote_uri(bad_uri) def test_requote_uri_properly_requotes(): # Ensure", "helpers.requote_uri(bad_uri) def test_requote_uri_properly_requotes(): # Ensure requoting doesn't break expectations. quoted", "'%a %t %P %l %u %r %s %b %O %T", "def test_access_logger_atoms(mock_getpid, mock_datetime): utcnow = datetime.datetime(1843, 1, 1, 0, 0)", "123 EGGS -' mock_logger.info.assert_called_with(expected) def test_logger_no_message_and_environ(): mock_logger = mock.Mock() mock_transport", "( helpers.parse_mimetype('text/plain;base64') == ('text', 'plain', '', {'base64': ''})) def test_basic_auth1():", "path=\"/path\", version=(1, 1)) environ = {} response = mock.Mock(headers={}, output_length=123,", "assert auth.password == '' def test_basic_auth4(): auth = helpers.BasicAuth('nkim', 'pwd')", "= helpers.AccessLogger(mock_logger, log_format) expected = '%s {%s} \"%s\" %%X {X}", "assert not mock_logger.error.called expected = 'Mock/1.0 123 EGGS -' mock_logger.info.assert_called_with(expected)", "def test_parse_mimetype_4(): assert ( helpers.parse_mimetype('application/json; charset=utf-8') == ('application', 'json', '',", "('', '', '', {}) def test_parse_mimetype_2(): assert helpers.parse_mimetype('*') == ('*',", "bad_uri = 'http://example.com/fiz?buz=%ppicture' quoted = 'http://example.com/fiz?buz=%25ppicture' assert quoted == helpers.requote_uri(bad_uri)", "quoted == helpers.requote_uri(bad_uri) def test_requote_uri_properly_requotes(): # Ensure requoting doesn't break", "def test_parse_mimetype_7(): assert ( helpers.parse_mimetype('application/rss+xml') == ('application', 'rss', 'xml', {}))", "test_parse_mimetype_2(): assert helpers.parse_mimetype('*') == ('*', '*', '', {}) def test_parse_mimetype_3():", "test_logger_no_message_and_environ(): mock_logger = mock.Mock() mock_transport = mock.Mock() mock_transport.get_extra_info.return_value = (\"127.0.0.3\",", "/path HTTP/1.1 200 42 123 3 3.141593 3141593') mock_logger.info.assert_called_with(expected) def", "mock from aiohttp import helpers import datetime def test_parse_mimetype_1(): assert", "-\") def test_reify(): class A: @helpers.reify def prop(self): return 1", "output_length=123, body_length=42, status=200) transport = mock.Mock() transport.get_extra_info.return_value = (\"127.0.0.2\", 1234)", "= 42 log_format = '%a %t %P %l %u %r", "helpers.reify) assert 'Docstring.' == A.prop.__doc__ def test_reify_assignment(): class A: @helpers.reify", "2-char str is not allowed def test_invalid_formdata_content_type(): form = helpers.FormData()", "signs in redirects. bad_uri = 'http://example.com/fiz?buz=%ppicture' quoted = 'http://example.com/fiz?buz=%25ppicture' assert", "= datetime.datetime(1843, 1, 1, 0, 0) mock_datetime.datetime.utcnow.return_value = utcnow mock_getpid.return_value", "redirects. bad_uri = 'http://example.com/fiz?buz=%ppicture' quoted = 'http://example.com/fiz?buz=%25ppicture' assert quoted ==", "return 1 a = A() assert 1 == a.prop def", "'Docstring.' == A.prop.__doc__ def test_reify_assignment(): class A: @helpers.reify def prop(self):", "0.0) assert not mock_logger.error.called expected = 'Mock/1.0 123 EGGS -'", "mock_logger = mock.Mock() mock_transport = mock.Mock() mock_transport.get_extra_info.return_value = (\"127.0.0.3\", 0)", "test_basic_auth2(): with pytest.raises(ValueError): helpers.BasicAuth('nkim', None) def test_basic_auth3(): auth = helpers.BasicAuth('nkim')", "filename=invalid_val) def test_invalid_formdata_content_transfer_encoding(): form = helpers.FormData() invalid_vals = [0, 0.1,", "helpers.parse_mimetype('') == ('', '', '', {}) def test_parse_mimetype_2(): assert helpers.parse_mimetype('*')", "%X {X} %%P' mock_logger = mock.Mock() access_logger = helpers.AccessLogger(mock_logger, log_format)", "mock_logger.exception.called expected = ('127.0.0.2 [01/Jan/1843:00:00:00 +0000] <42> - - '", "import pytest from unittest import mock from aiohttp import helpers", "charset=utf-8;''') == ('application', 'json', '', {'charset': 'utf-8'})) def test_parse_mimetype_6(): assert(", "%%P' mock_logger = mock.Mock() access_logger = helpers.AccessLogger(mock_logger, log_format) expected =", "transport, 0.0) assert not mock_logger.error.called expected = 'Mock/1.0 123 EGGS", "def prop(self): \"\"\"Docstring.\"\"\" return 1 assert isinstance(A.prop, helpers.reify) assert 'Docstring.'", "from aiohttp import helpers import datetime def test_parse_mimetype_1(): assert helpers.parse_mimetype('')", "import datetime def test_parse_mimetype_1(): assert helpers.parse_mimetype('') == ('', '', '',", "assert 1 == a.prop def test_reify_class(): class A: @helpers.reify def", "we handle unquoted percent signs in redirects. bad_uri = 'http://example.com/fiz?buz=%ppicture'", "== 'nkim' assert auth.password == '' def test_basic_auth4(): auth =", "with pytest.raises(TypeError): helpers.FormData('asdasf') def test_invalid_formdata_params2(): with pytest.raises(TypeError): helpers.FormData('as') # 2-char", "== helpers.requote_uri(bad_uri) def test_requote_uri_properly_requotes(): # Ensure requoting doesn't break expectations.", "assert expected == access_logger._log_format @mock.patch(\"aiohttp.helpers.datetime\") @mock.patch(\"os.getpid\") def test_access_logger_atoms(mock_getpid, mock_datetime): utcnow", "transport = mock.Mock() transport.get_extra_info.return_value = (\"127.0.0.2\", 1234) access_logger.log(message, environ, response,", "def test_reify(): class A: @helpers.reify def prop(self): return 1 a", "access_logger = helpers.AccessLogger(mock_logger, log_format) expected = '%s {%s} \"%s\" %%X", "import mock from aiohttp import helpers import datetime def test_parse_mimetype_1():", "@helpers.reify def prop(self): return 1 a = A() with pytest.raises(AttributeError):", "3.141593 3141593') mock_logger.info.assert_called_with(expected) def test_access_logger_dicts(): log_format = '%{User-Agent}i %{Content-Length}o %{SPAM}e", "def test_parse_mimetype_5(): assert ( helpers.parse_mimetype('''application/json; charset=utf-8;''') == ('application', 'json', '',", "{%{SPAM}e} \"%{ETag}o\" %X {X} %%P' mock_logger = mock.Mock() access_logger =", "@mock.patch(\"aiohttp.helpers.datetime\") @mock.patch(\"os.getpid\") def test_access_logger_atoms(mock_getpid, mock_datetime): utcnow = datetime.datetime(1843, 1, 1,", "assert( helpers.parse_mimetype('ApPlIcAtIoN/JSON;ChaRseT=\"UTF-8\"') == ('application', 'json', '', {'charset': 'UTF-8'})) def test_parse_mimetype_7():", "test_requote_uri_properly_requotes(): # Ensure requoting doesn't break expectations. quoted = 'http://example.com/fiz?buz=%25ppicture'", "HTTP/1.1 200 42 123 3 3.141593 3141593') mock_logger.info.assert_called_with(expected) def test_access_logger_dicts():", "A: @helpers.reify def prop(self): \"\"\"Docstring.\"\"\" return 1 assert isinstance(A.prop, helpers.reify)", "Ensure we handle unquoted percent signs in redirects. bad_uri =", "+0000] <42> - - ' 'GET /path HTTP/1.1 200 42", "b'foo'] for invalid_val in invalid_vals: with pytest.raises(TypeError): form.add_field('foo', 'bar', filename=invalid_val)", "with pytest.raises(AttributeError): a.prop = 123 def test_requote_uri_with_unquoted_percents(): # Ensure we", "('application', 'rss', 'xml', {})) def test_parse_mimetype_8(): assert ( helpers.parse_mimetype('text/plain;base64') ==", "invalid_val in invalid_vals: with pytest.raises(TypeError): form.add_field('foo', 'bar', filename=invalid_val) def test_invalid_formdata_content_transfer_encoding():", "from unittest import mock from aiohttp import helpers import datetime", "def prop(self): return 1 a = A() with pytest.raises(AttributeError): a.prop", "('application', 'json', '', {'charset': 'utf-8'})) def test_parse_mimetype_6(): assert( helpers.parse_mimetype('ApPlIcAtIoN/JSON;ChaRseT=\"UTF-8\"') ==", "return 1 assert isinstance(A.prop, helpers.reify) assert 'Docstring.' == A.prop.__doc__ def", "mock_logger = mock.Mock() access_logger = helpers.AccessLogger(mock_logger, log_format) expected = '%s", "helpers.AccessLogger(mock_logger, log_format) message = mock.Mock(headers={\"USER-AGENT\": \"Mock/1.0\"}, version=(1, 1)) environ =", "{})) def test_parse_mimetype_4(): assert ( helpers.parse_mimetype('application/json; charset=utf-8') == ('application', 'json',", "mock_logger.error.called expected = 'Mock/1.0 123 EGGS -' mock_logger.info.assert_called_with(expected) def test_logger_no_message_and_environ():", "1 assert isinstance(A.prop, helpers.reify) assert 'Docstring.' == A.prop.__doc__ def test_reify_assignment():", "def test_basic_auth1(): # missing password here with pytest.raises(ValueError): helpers.BasicAuth(None) def", "for invalid_val in invalid_vals: with pytest.raises(TypeError): form.add_field('foo', 'bar', filename=invalid_val) def", "method=\"GET\", path=\"/path\", version=(1, 1)) environ = {} response = mock.Mock(headers={},", "{'charset': 'UTF-8'})) def test_parse_mimetype_7(): assert ( helpers.parse_mimetype('application/rss+xml') == ('application', 'rss',", "helpers.FormData('as') # 2-char str is not allowed def test_invalid_formdata_content_type(): form" ]
[ "\"specs\", \"workloads\") #FAAS_ROOT=\"/home/truls/uni/phd/faas-profiler\" WSK_PATH = \"wsk\" OPENWHISK_PATH = \"/lhome/trulsas/openwhisk\" #:", "\"/lhome/trulsas/openwhisk\" #: Location of output data DATA_DIR = join(FAAS_ROOT, \"..\",", "Location of output data DATA_DIR = join(FAAS_ROOT, \"..\", \"profiler_results\") SYSTEM_CPU_SET", "of output data DATA_DIR = join(FAAS_ROOT, \"..\", \"profiler_results\") SYSTEM_CPU_SET =", "os.path import join FAAS_ROOT=\"/lhome/trulsas/faas-profiler\" WORKLOAD_SPECS=join(FAAS_ROOT, \"specs\", \"workloads\") #FAAS_ROOT=\"/home/truls/uni/phd/faas-profiler\" WSK_PATH =", "\"workloads\") #FAAS_ROOT=\"/home/truls/uni/phd/faas-profiler\" WSK_PATH = \"wsk\" OPENWHISK_PATH = \"/lhome/trulsas/openwhisk\" #: Location", "OPENWHISK_PATH = \"/lhome/trulsas/openwhisk\" #: Location of output data DATA_DIR =", "= \"/lhome/trulsas/openwhisk\" #: Location of output data DATA_DIR = join(FAAS_ROOT,", "FAAS_ROOT=\"/lhome/trulsas/faas-profiler\" WORKLOAD_SPECS=join(FAAS_ROOT, \"specs\", \"workloads\") #FAAS_ROOT=\"/home/truls/uni/phd/faas-profiler\" WSK_PATH = \"wsk\" OPENWHISK_PATH =", "WORKLOAD_SPECS=join(FAAS_ROOT, \"specs\", \"workloads\") #FAAS_ROOT=\"/home/truls/uni/phd/faas-profiler\" WSK_PATH = \"wsk\" OPENWHISK_PATH = \"/lhome/trulsas/openwhisk\"", "WSK_PATH = \"wsk\" OPENWHISK_PATH = \"/lhome/trulsas/openwhisk\" #: Location of output", "output data DATA_DIR = join(FAAS_ROOT, \"..\", \"profiler_results\") SYSTEM_CPU_SET = \"0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30\"", "import join FAAS_ROOT=\"/lhome/trulsas/faas-profiler\" WORKLOAD_SPECS=join(FAAS_ROOT, \"specs\", \"workloads\") #FAAS_ROOT=\"/home/truls/uni/phd/faas-profiler\" WSK_PATH = \"wsk\"", "\"wsk\" OPENWHISK_PATH = \"/lhome/trulsas/openwhisk\" #: Location of output data DATA_DIR", "join FAAS_ROOT=\"/lhome/trulsas/faas-profiler\" WORKLOAD_SPECS=join(FAAS_ROOT, \"specs\", \"workloads\") #FAAS_ROOT=\"/home/truls/uni/phd/faas-profiler\" WSK_PATH = \"wsk\" OPENWHISK_PATH", "#FAAS_ROOT=\"/home/truls/uni/phd/faas-profiler\" WSK_PATH = \"wsk\" OPENWHISK_PATH = \"/lhome/trulsas/openwhisk\" #: Location of", "from os.path import join FAAS_ROOT=\"/lhome/trulsas/faas-profiler\" WORKLOAD_SPECS=join(FAAS_ROOT, \"specs\", \"workloads\") #FAAS_ROOT=\"/home/truls/uni/phd/faas-profiler\" WSK_PATH", "= \"wsk\" OPENWHISK_PATH = \"/lhome/trulsas/openwhisk\" #: Location of output data", "#: Location of output data DATA_DIR = join(FAAS_ROOT, \"..\", \"profiler_results\")" ]
[ "c = a+b return c except : print \"Error in", "divide(a,b): try: c = a/b return c except : print", "c except : print \"Error in divide function\" print divide(10,0)", "= a+b return c except : print \"Error in sum1", "def divide(a,b): try: c = a/b return c except :", "a/b return c except : print \"Error in divide function\"", "except : print \"Error in divide function\" print divide(10,0) print", "except : print \"Error in sum1 function\" def divide(a,b): try:", "print \"Error in sum1 function\" def divide(a,b): try: c =", "function\" def divide(a,b): try: c = a/b return c except", ": print \"Error in divide function\" print divide(10,0) print sum1(10,0)", "try: c = a+b return c except : print \"Error", "= a/b return c except : print \"Error in divide", "return c except : print \"Error in divide function\" print", "return c except : print \"Error in sum1 function\" def", "sum1(a,b): try: c = a+b return c except : print", "c = a/b return c except : print \"Error in", "c except : print \"Error in sum1 function\" def divide(a,b):", "in sum1 function\" def divide(a,b): try: c = a/b return", ": print \"Error in sum1 function\" def divide(a,b): try: c", "a+b return c except : print \"Error in sum1 function\"", "try: c = a/b return c except : print \"Error", "def sum1(a,b): try: c = a+b return c except :", "\"Error in sum1 function\" def divide(a,b): try: c = a/b", "sum1 function\" def divide(a,b): try: c = a/b return c" ]
[ ") def printable_fingerprint(k): '''Convert key fingerprint into OpenSSH printable format'''", "accept_and_add = False if self.mode == verify_mode.prompt: print('Unverified connection to", "LexisNexis Risk Data Management Inc. # # This file is", "[ord(x) for x in fingerprint] return ':'.join(['%02x' % x for", "lookup_name = hostname[:-3] else: host_base, port_base = hostname.rsplit(':', 1) lookup_name", "fingerprint] else: seq = [ord(x) for x in fingerprint] return", "Management Inc. # # This file is part of the", "don't care. ignore=100, # Turn host key verification OFF overwrite_blindly=666", "order of security/paranoia reject=0, # Missing keys are rejected prompt=1,", "class''' def __init__(self, **kwargs): self._fwd = kwargs self._reverse = {}", "Module''' import os import threading import warnings import paramiko.hostkeys #", "os import threading import warnings import paramiko.hostkeys # Deprecated as", "RadSSH software package. # # RadSSH is free software, released", "= True if accept_and_add: print('Accepting new host key for %s'", "key) self.hostkeys.save(self.known_hosts_file) self.lock.release() return True else: # Missing key if", "return ':'.join(['%02x' % x for x in seq]) class HostKeyVerifier(object):", "# Missing keys automatically accepted # After this point, key", "'''Class to control how (if) host keys are verified''' def", "for k, v in kwargs.items(): self.__setattr__(k, v) self._reverse[v] = k", "hostname.rsplit(':', 1) lookup_name = '[%s]:%s' % (host_base, port_base) # Try", "actual) if self.mode == verify_mode.overwrite_blindly: print('Blindly accepting updated host key", "# intercepted traffic for SSH sessions, and you don't care.", "code to use radssh.known_hosts instead.')) class CodeMap(object): '''CodeMap class''' def", "want to accept this key? (y/N): ') if answer[0].upper() ==", "code''' return self._reverse[code] verify_mode = CodeMap( # Different options for", "= hostname elif hostname.endswith(':22'): lookup_name = hostname[:-3] else: host_base, port_base", "key.get_name(), key) self.hostkeys.save(self.known_hosts_file) self.lock.release() return True else: # Missing key", "2016, 2018, 2020 LexisNexis Risk Data Management Inc. # #", "elif hostname.endswith(':22'): lookup_name = hostname[:-3] else: host_base, port_base = hostname.rsplit(':',", "Copyright (c) 2014, 2016, 2018, 2020 LexisNexis Risk Data Management", "if self.mode == verify_mode.prompt: print('Unverified connection to \"%s\"' % lookup_name)", "BSD License. # You are permitted to use, modify, and", "you don't care. ignore=100, # Turn host key verification OFF", "# You are permitted to use, modify, and redsitribute this", "(%s)' % lookup_name) print('Expected:', expected) print('Got :', actual) if self.mode", "This file is part of the RadSSH software package. #", "key verification OFF overwrite_blindly=666 # Concentrated evil ) def printable_fingerprint(k):", "# Copyright (c) 2014, 2016, 2018, 2020 LexisNexis Risk Data", "known_hosts rewrite instead if using this API warnings.warn(FutureWarning('RadSSH hostkey module", "self._reverse[code] verify_mode = CodeMap( # Different options for handling host", "OFF overwrite_blindly=666 # Concentrated evil ) def printable_fingerprint(k): '''Convert key", "class HostKeyVerifier(object): '''Class to control how (if) host keys are", "# '''HostKey Handling Module''' import os import threading import warnings", "SSH sessions, and you don't care. ignore=100, # Turn host", "accept_and_add = True if self.mode in (verify_mode.accept_new, verify_mode.overwrite_blindly): accept_and_add =", "if host_entry and key.get_name() in host_entry: # Entry mismatch expected", "keys automatically accepted # After this point, key conflicts no", "how (if) host keys are verified''' def __init__(self, mode='reject', known_hosts_file='~/.ssh/known_hosts'):", "= [ord(x) for x in fingerprint] return ':'.join(['%02x' % x", "= k.get_fingerprint() # Handle Python3 bytes or Python2 8-bit string", "Missing keys may be accepted, based on user prompt accept_new=2,", "Missing keys are rejected prompt=1, # Missing keys may be", "print('(Host Key Fingerprint [%s])' % actual) answer = input('Do you", "is part of the RadSSH software package. # # RadSSH", "paramiko.hostkeys # Deprecated as of 1.1 - Use known_hosts rewrite", "become vulnerable to spoofing and # intercepted traffic for SSH", "# Turn host key verification OFF overwrite_blindly=666 # Concentrated evil", "in seq]) class HostKeyVerifier(object): '''Class to control how (if) host", "spoofing and # intercepted traffic for SSH sessions, and you", "# according to the Revised BSD License, a copy of", "# RadSSH is free software, released under the Revised BSD", "__init__(self, **kwargs): self._fwd = kwargs self._reverse = {} for k,", "hostname.endswith(':22'): lookup_name = hostname[:-3] else: host_base, port_base = hostname.rsplit(':', 1)", "lookup_name) self.hostkeys.add(lookup_name, key.get_name(), key) self.hostkeys.save(self.known_hosts_file) self.lock.release() return True else: #", "released under the Revised BSD License. # You are permitted", "key verification # Listed in decreasing order of security/paranoia reject=0,", "# This file is part of the RadSSH software package.", "(y/N): ') if answer[0].upper() == 'Y': accept_and_add = True if", "overwrite_blindly=666 # Concentrated evil ) def printable_fingerprint(k): '''Convert key fingerprint", "else: host_base, port_base = hostname.rsplit(':', 1) lookup_name = '[%s]:%s' %", "host_base, port_base = hostname.rsplit(':', 1) lookup_name = '[%s]:%s' % (host_base,", "rewrite instead if using this API warnings.warn(FutureWarning('RadSSH hostkey module is", "threading import warnings import paramiko.hostkeys # Deprecated as of 1.1", "format''' fingerprint = k.get_fingerprint() # Handle Python3 bytes or Python2", "prompt=1, # Missing keys may be accepted, based on user", "% actual) answer = input('Do you want to accept this", "Data Management Inc. # # This file is part of", "# Try remainder of host verification with locking self.lock.acquire() if", "code''' return self._fwd[name] def name(self, code): '''Given a code value,", "self.hostkeys.lookup(lookup_name) actual = printable_fingerprint(key) if host_entry and key.get_name() in host_entry:", "mode == verify_mode.ignore: return self.known_hosts_file = os.path.expanduser(known_hosts_file) if os.path.exists(self.known_hosts_file): self.hostkeys.load(self.known_hosts_file)", "no longer hinder connections # Using these options, you become", "Risk Data Management Inc. # # This file is part", "return the code''' return self._fwd[name] def name(self, code): '''Given a", "After this point, key conflicts no longer hinder connections #", "to control how (if) host keys are verified''' def __init__(self,", "care. ignore=100, # Turn host key verification OFF overwrite_blindly=666 #", "corresponding code''' return self._reverse[code] verify_mode = CodeMap( # Different options", "v in kwargs.items(): self.__setattr__(k, v) self._reverse[v] = k def code(self,", "x in seq]) class HostKeyVerifier(object): '''Class to control how (if)", "== verify_mode.ignore: return self.known_hosts_file = os.path.expanduser(known_hosts_file) if os.path.exists(self.known_hosts_file): self.hostkeys.load(self.known_hosts_file) elif", "verify_mode.overwrite_blindly): accept_and_add = True if accept_and_add: print('Accepting new host key", "%s' % lookup_name) self.hostkeys.add(lookup_name, key.get_name(), key) self.hostkeys.save(self.known_hosts_file) self.lock.release() return True", "which should be # included with the distribution as file", "self.__setattr__(k, v) self._reverse[v] = k def code(self, name): '''Given a", "= CodeMap( # Different options for handling host key verification", "may be accepted, based on user prompt accept_new=2, # Missing", "key? (y/N): ') if answer[0].upper() == 'Y': accept_and_add = True", "class CodeMap(object): '''CodeMap class''' def __init__(self, **kwargs): self._fwd = kwargs", "a hostname or IP''' if self.mode == verify_mode.ignore: return True", "ports... if ':' not in hostname: lookup_name = hostname elif", "release 2.0. Port existing code to use radssh.known_hosts instead.')) class", "the code''' return self._fwd[name] def name(self, code): '''Given a code", "True if self.mode in (verify_mode.accept_new, verify_mode.overwrite_blindly): accept_and_add = True if", "fingerprint] return ':'.join(['%02x' % x for x in seq]) class", "is no longer supported, and will be removed in release", "if ':' not in hostname: lookup_name = hostname elif hostname.endswith(':22'):", "port_base = hostname.rsplit(':', 1) lookup_name = '[%s]:%s' % (host_base, port_base)", "and # intercepted traffic for SSH sessions, and you don't", "import paramiko.hostkeys # Deprecated as of 1.1 - Use known_hosts", "Port existing code to use radssh.known_hosts instead.')) class CodeMap(object): '''CodeMap", "== verify_mode.ignore: return True # Special formatting for non-standard ports...", "package. # # RadSSH is free software, released under the", "'''Given a name, return the code''' return self._fwd[name] def name(self,", "= k def code(self, name): '''Given a name, return the", "options, you become vulnerable to spoofing and # intercepted traffic", "host verification with locking self.lock.acquire() if self.hostkeys.check(lookup_name, key): self.lock.release() return", "k.get_fingerprint() # Handle Python3 bytes or Python2 8-bit string style...", "{} for k, v in kwargs.items(): self.__setattr__(k, v) self._reverse[v] =", "existing code to use radssh.known_hosts instead.')) class CodeMap(object): '''CodeMap class'''", "as file LICENSE.txt # '''HostKey Handling Module''' import os import", "% x for x in seq]) class HostKeyVerifier(object): '''Class to", "% lookup_name) self.hostkeys.add(lookup_name, key.get_name(), key) self.hostkeys.save(self.known_hosts_file) self.lock.release() return True self.lock.release()", "instead.')) class CodeMap(object): '''CodeMap class''' def __init__(self, **kwargs): self._fwd =", "expected = printable_fingerprint(host_entry[key.get_name()]) print('Host key mismatch for (%s)' % lookup_name)", "connection to \"%s\"' % lookup_name) print('(Host Key Fingerprint [%s])' %", "seq = [int(x) for x in fingerprint] else: seq =", "accepting updated host key for %s' % lookup_name) self.hostkeys.add(lookup_name, key.get_name(),", "if mode == verify_mode.ignore: return self.known_hosts_file = os.path.expanduser(known_hosts_file) if os.path.exists(self.known_hosts_file):", "not os.path.exists(os.path.dirname(self.known_hosts_file)): os.makedirs(os.path.dirname(self.known_hosts_file)) def verify_host_key(self, hostname, key): '''Verify a single", "Special formatting for non-standard ports... if ':' not in hostname:", "Handle Python3 bytes or Python2 8-bit string style... if isinstance(fingerprint[0],", "lookup_name = hostname elif hostname.endswith(':22'): lookup_name = hostname[:-3] else: host_base,", "mismatch expected = printable_fingerprint(host_entry[key.get_name()]) print('Host key mismatch for (%s)' %", "host_entry and key.get_name() in host_entry: # Entry mismatch expected =", "kwargs self._reverse = {} for k, v in kwargs.items(): self.__setattr__(k,", "new host key for %s' % lookup_name) self.hostkeys.add(lookup_name, key.get_name(), key)", "self.mode == verify_mode.ignore: return True # Special formatting for non-standard", "name, return the code''' return self._fwd[name] def name(self, code): '''Given", "software, released under the Revised BSD License. # You are", "paramiko.hostkeys.HostKeys() self.lock = threading.Lock() if mode == verify_mode.ignore: return self.known_hosts_file", "to \"%s\"' % lookup_name) print('(Host Key Fingerprint [%s])' % actual)", "the distribution as file LICENSE.txt # '''HostKey Handling Module''' import", "software package. # # RadSSH is free software, released under", "handling host key verification # Listed in decreasing order of", "for %s' % lookup_name) self.hostkeys.add(lookup_name, key.get_name(), key) self.hostkeys.save(self.known_hosts_file) self.lock.release() return", "seq = [ord(x) for x in fingerprint] return ':'.join(['%02x' %", "os.makedirs(os.path.dirname(self.known_hosts_file)) def verify_host_key(self, hostname, key): '''Verify a single hostkey against", "into OpenSSH printable format''' fingerprint = k.get_fingerprint() # Handle Python3", "longer hinder connections # Using these options, you become vulnerable", "the RadSSH software package. # # RadSSH is free software,", "under the Revised BSD License. # You are permitted to", "Python3 bytes or Python2 8-bit string style... if isinstance(fingerprint[0], int):", "host_entry = self.hostkeys.lookup(lookup_name) actual = printable_fingerprint(key) if host_entry and key.get_name()", "value, return the corresponding code''' return self._reverse[code] verify_mode = CodeMap(", "automatically accepted # After this point, key conflicts no longer", "# Missing key if self.mode == verify_mode.reject: self.lock.release() return False", "accept_new=2, # Missing keys automatically accepted # After this point,", "= input('Do you want to accept this key? (y/N): ')", "print('Expected:', expected) print('Got :', actual) if self.mode == verify_mode.overwrite_blindly: print('Blindly", "the corresponding code''' return self._reverse[code] verify_mode = CodeMap( # Different", "in release 2.0. Port existing code to use radssh.known_hosts instead.'))", "= '[%s]:%s' % (host_base, port_base) # Try remainder of host", "hinder connections # Using these options, you become vulnerable to", "False if self.mode == verify_mode.prompt: print('Unverified connection to \"%s\"' %", "verify_host_key(self, hostname, key): '''Verify a single hostkey against a hostname", "Concentrated evil ) def printable_fingerprint(k): '''Convert key fingerprint into OpenSSH", "in kwargs.items(): self.__setattr__(k, v) self._reverse[v] = k def code(self, name):", "self.hostkeys.add(lookup_name, key.get_name(), key) self.hostkeys.save(self.known_hosts_file) self.lock.release() return True else: # Missing", "<gh_stars>10-100 # # Copyright (c) 2014, 2016, 2018, 2020 LexisNexis", "single hostkey against a hostname or IP''' if self.mode ==", "kwargs.items(): self.__setattr__(k, v) self._reverse[v] = k def code(self, name): '''Given", "def __init__(self, **kwargs): self._fwd = kwargs self._reverse = {} for", "# Entry mismatch expected = printable_fingerprint(host_entry[key.get_name()]) print('Host key mismatch for", "name): '''Given a name, return the code''' return self._fwd[name] def", "to the Revised BSD License, a copy of which should", "and will be removed in release 2.0. Port existing code", "k def code(self, name): '''Given a name, return the code'''", "control how (if) host keys are verified''' def __init__(self, mode='reject',", "key.get_name() in host_entry: # Entry mismatch expected = printable_fingerprint(host_entry[key.get_name()]) print('Host", "accepted, based on user prompt accept_new=2, # Missing keys automatically", "Python2 8-bit string style... if isinstance(fingerprint[0], int): seq = [int(x)", "# Concentrated evil ) def printable_fingerprint(k): '''Convert key fingerprint into", "seq]) class HostKeyVerifier(object): '''Class to control how (if) host keys", "instead if using this API warnings.warn(FutureWarning('RadSSH hostkey module is no", "if accept_and_add: print('Accepting new host key for %s' % lookup_name)", "2018, 2020 LexisNexis Risk Data Management Inc. # # This", "= [int(x) for x in fingerprint] else: seq = [ord(x)", "in hostname: lookup_name = hostname elif hostname.endswith(':22'): lookup_name = hostname[:-3]", "import os import threading import warnings import paramiko.hostkeys # Deprecated", "be accepted, based on user prompt accept_new=2, # Missing keys", "this key? (y/N): ') if answer[0].upper() == 'Y': accept_and_add =", "updated host key for %s' % lookup_name) self.hostkeys.add(lookup_name, key.get_name(), key)", "(c) 2014, 2016, 2018, 2020 LexisNexis Risk Data Management Inc.", "print('Unverified connection to \"%s\"' % lookup_name) print('(Host Key Fingerprint [%s])'", "code(self, name): '''Given a name, return the code''' return self._fwd[name]", "Revised BSD License. # You are permitted to use, modify,", "os.path.exists(self.known_hosts_file): self.hostkeys.load(self.known_hosts_file) elif not os.path.exists(os.path.dirname(self.known_hosts_file)): os.makedirs(os.path.dirname(self.known_hosts_file)) def verify_host_key(self, hostname, key):", "host key for %s' % lookup_name) self.hostkeys.add(lookup_name, key.get_name(), key) self.hostkeys.save(self.known_hosts_file)", "print('Host key mismatch for (%s)' % lookup_name) print('Expected:', expected) print('Got", "License. # You are permitted to use, modify, and redsitribute", "you become vulnerable to spoofing and # intercepted traffic for", "remainder of host verification with locking self.lock.acquire() if self.hostkeys.check(lookup_name, key):", "# Missing keys may be accepted, based on user prompt", "You are permitted to use, modify, and redsitribute this software", "import warnings import paramiko.hostkeys # Deprecated as of 1.1 -", "or Python2 8-bit string style... if isinstance(fingerprint[0], int): seq =", "% (host_base, port_base) # Try remainder of host verification with", "of security/paranoia reject=0, # Missing keys are rejected prompt=1, #", "in fingerprint] return ':'.join(['%02x' % x for x in seq])", "(verify_mode.accept_new, verify_mode.overwrite_blindly): accept_and_add = True if accept_and_add: print('Accepting new host", "def printable_fingerprint(k): '''Convert key fingerprint into OpenSSH printable format''' fingerprint", "key for %s' % lookup_name) self.hostkeys.add(lookup_name, key.get_name(), key) self.hostkeys.save(self.known_hosts_file) self.lock.release()", "= printable_fingerprint(key) if host_entry and key.get_name() in host_entry: # Entry", "user prompt accept_new=2, # Missing keys automatically accepted # After", "Fingerprint [%s])' % actual) answer = input('Do you want to", "module is no longer supported, and will be removed in", "known_hosts_file='~/.ssh/known_hosts'): self.mode = verify_mode.code(mode) self.hostkeys = paramiko.hostkeys.HostKeys() self.lock = threading.Lock()", "Entry mismatch expected = printable_fingerprint(host_entry[key.get_name()]) print('Host key mismatch for (%s)'", "def __init__(self, mode='reject', known_hosts_file='~/.ssh/known_hosts'): self.mode = verify_mode.code(mode) self.hostkeys = paramiko.hostkeys.HostKeys()", "if answer[0].upper() == 'Y': accept_and_add = True if self.mode in", "to use radssh.known_hosts instead.')) class CodeMap(object): '''CodeMap class''' def __init__(self,", "self.known_hosts_file = os.path.expanduser(known_hosts_file) if os.path.exists(self.known_hosts_file): self.hostkeys.load(self.known_hosts_file) elif not os.path.exists(os.path.dirname(self.known_hosts_file)): os.makedirs(os.path.dirname(self.known_hosts_file))", "self.lock = threading.Lock() if mode == verify_mode.ignore: return self.known_hosts_file =", "rejected prompt=1, # Missing keys may be accepted, based on", "based on user prompt accept_new=2, # Missing keys automatically accepted", "be # included with the distribution as file LICENSE.txt #", "\"%s\"' % lookup_name) print('(Host Key Fingerprint [%s])' % actual) answer", "print('Got :', actual) if self.mode == verify_mode.overwrite_blindly: print('Blindly accepting updated", "to accept this key? (y/N): ') if answer[0].upper() == 'Y':", "code value, return the corresponding code''' return self._reverse[code] verify_mode =", "= hostname.rsplit(':', 1) lookup_name = '[%s]:%s' % (host_base, port_base) #", "mode='reject', known_hosts_file='~/.ssh/known_hosts'): self.mode = verify_mode.code(mode) self.hostkeys = paramiko.hostkeys.HostKeys() self.lock =", "ignore=100, # Turn host key verification OFF overwrite_blindly=666 # Concentrated", "accept this key? (y/N): ') if answer[0].upper() == 'Y': accept_and_add", "and redsitribute this software # according to the Revised BSD", "style... if isinstance(fingerprint[0], int): seq = [int(x) for x in", "# included with the distribution as file LICENSE.txt # '''HostKey", "# Using these options, you become vulnerable to spoofing and", "printable format''' fingerprint = k.get_fingerprint() # Handle Python3 bytes or", "os.path.exists(os.path.dirname(self.known_hosts_file)): os.makedirs(os.path.dirname(self.known_hosts_file)) def verify_host_key(self, hostname, key): '''Verify a single hostkey", "- Use known_hosts rewrite instead if using this API warnings.warn(FutureWarning('RadSSH", "lookup_name = '[%s]:%s' % (host_base, port_base) # Try remainder of", "and you don't care. ignore=100, # Turn host key verification", "verification OFF overwrite_blindly=666 # Concentrated evil ) def printable_fingerprint(k): '''Convert", "lookup_name) print('Expected:', expected) print('Got :', actual) if self.mode == verify_mode.overwrite_blindly:", "on user prompt accept_new=2, # Missing keys automatically accepted #", "Different options for handling host key verification # Listed in", "Try remainder of host verification with locking self.lock.acquire() if self.hostkeys.check(lookup_name,", "host keys are verified''' def __init__(self, mode='reject', known_hosts_file='~/.ssh/known_hosts'): self.mode =", "1) lookup_name = '[%s]:%s' % (host_base, port_base) # Try remainder", "CodeMap( # Different options for handling host key verification #", "a name, return the code''' return self._fwd[name] def name(self, code):", "= printable_fingerprint(host_entry[key.get_name()]) print('Host key mismatch for (%s)' % lookup_name) print('Expected:',", ":', actual) if self.mode == verify_mode.overwrite_blindly: print('Blindly accepting updated host", "= verify_mode.code(mode) self.hostkeys = paramiko.hostkeys.HostKeys() self.lock = threading.Lock() if mode", "key): '''Verify a single hostkey against a hostname or IP'''", "of the RadSSH software package. # # RadSSH is free", "verify_mode.ignore: return True # Special formatting for non-standard ports... if", "self.lock.release() return True else: # Missing key if self.mode ==", "to use, modify, and redsitribute this software # according to", "printable_fingerprint(host_entry[key.get_name()]) print('Host key mismatch for (%s)' % lookup_name) print('Expected:', expected)", "== verify_mode.prompt: print('Unverified connection to \"%s\"' % lookup_name) print('(Host Key", "conflicts no longer hinder connections # Using these options, you", "free software, released under the Revised BSD License. # You", "k, v in kwargs.items(): self.__setattr__(k, v) self._reverse[v] = k def", "# Deprecated as of 1.1 - Use known_hosts rewrite instead", "host key verification # Listed in decreasing order of security/paranoia", "file is part of the RadSSH software package. # #", "Missing key if self.mode == verify_mode.reject: self.lock.release() return False accept_and_add", "use radssh.known_hosts instead.')) class CodeMap(object): '''CodeMap class''' def __init__(self, **kwargs):", "Deprecated as of 1.1 - Use known_hosts rewrite instead if", "decreasing order of security/paranoia reject=0, # Missing keys are rejected", "return True host_entry = self.hostkeys.lookup(lookup_name) actual = printable_fingerprint(key) if host_entry", "True if accept_and_add: print('Accepting new host key for %s' %", "key conflicts no longer hinder connections # Using these options,", "if isinstance(fingerprint[0], int): seq = [int(x) for x in fingerprint]", "hostname elif hostname.endswith(':22'): lookup_name = hostname[:-3] else: host_base, port_base =", "redsitribute this software # according to the Revised BSD License,", "if self.mode in (verify_mode.accept_new, verify_mode.overwrite_blindly): accept_and_add = True if accept_and_add:", "threading.Lock() if mode == verify_mode.ignore: return self.known_hosts_file = os.path.expanduser(known_hosts_file) if", "# After this point, key conflicts no longer hinder connections", "'''Convert key fingerprint into OpenSSH printable format''' fingerprint = k.get_fingerprint()", "verify_mode = CodeMap( # Different options for handling host key", "included with the distribution as file LICENSE.txt # '''HostKey Handling", "point, key conflicts no longer hinder connections # Using these", "elif not os.path.exists(os.path.dirname(self.known_hosts_file)): os.makedirs(os.path.dirname(self.known_hosts_file)) def verify_host_key(self, hostname, key): '''Verify a", "the Revised BSD License, a copy of which should be", "no longer supported, and will be removed in release 2.0.", "self._reverse = {} for k, v in kwargs.items(): self.__setattr__(k, v)", "self.mode = verify_mode.code(mode) self.hostkeys = paramiko.hostkeys.HostKeys() self.lock = threading.Lock() if", "verify_mode.code(mode) self.hostkeys = paramiko.hostkeys.HostKeys() self.lock = threading.Lock() if mode ==", "in host_entry: # Entry mismatch expected = printable_fingerprint(host_entry[key.get_name()]) print('Host key", "warnings import paramiko.hostkeys # Deprecated as of 1.1 - Use", "verification with locking self.lock.acquire() if self.hostkeys.check(lookup_name, key): self.lock.release() return True", "should be # included with the distribution as file LICENSE.txt", "self.lock.release() return False accept_and_add = False if self.mode == verify_mode.prompt:", "name(self, code): '''Given a code value, return the corresponding code'''", "2014, 2016, 2018, 2020 LexisNexis Risk Data Management Inc. #", "in fingerprint] else: seq = [ord(x) for x in fingerprint]", "def code(self, name): '''Given a name, return the code''' return", "accept_and_add: print('Accepting new host key for %s' % lookup_name) self.hostkeys.add(lookup_name,", "software # according to the Revised BSD License, a copy", "key if self.mode == verify_mode.reject: self.lock.release() return False accept_and_add =", "OpenSSH printable format''' fingerprint = k.get_fingerprint() # Handle Python3 bytes", "key): self.lock.release() return True host_entry = self.hostkeys.lookup(lookup_name) actual = printable_fingerprint(key)", "Revised BSD License, a copy of which should be #", "for SSH sessions, and you don't care. ignore=100, # Turn", "return self._fwd[name] def name(self, code): '''Given a code value, return", "key fingerprint into OpenSSH printable format''' fingerprint = k.get_fingerprint() #", "% lookup_name) print('(Host Key Fingerprint [%s])' % actual) answer =", "self.hostkeys = paramiko.hostkeys.HostKeys() self.lock = threading.Lock() if mode == verify_mode.ignore:", "[%s])' % actual) answer = input('Do you want to accept", "printable_fingerprint(k): '''Convert key fingerprint into OpenSSH printable format''' fingerprint =", "self.hostkeys.add(lookup_name, key.get_name(), key) self.hostkeys.save(self.known_hosts_file) self.lock.release() return True self.lock.release() return False", "answer = input('Do you want to accept this key? (y/N):", "self.hostkeys.load(self.known_hosts_file) elif not os.path.exists(os.path.dirname(self.known_hosts_file)): os.makedirs(os.path.dirname(self.known_hosts_file)) def verify_host_key(self, hostname, key): '''Verify", "== verify_mode.reject: self.lock.release() return False accept_and_add = False if self.mode", "copy of which should be # included with the distribution", "= hostname[:-3] else: host_base, port_base = hostname.rsplit(':', 1) lookup_name =", "Key Fingerprint [%s])' % actual) answer = input('Do you want", "fingerprint = k.get_fingerprint() # Handle Python3 bytes or Python2 8-bit", "self._fwd = kwargs self._reverse = {} for k, v in", "# Listed in decreasing order of security/paranoia reject=0, # Missing", "# # Copyright (c) 2014, 2016, 2018, 2020 LexisNexis Risk", "traffic for SSH sessions, and you don't care. ignore=100, #", "__init__(self, mode='reject', known_hosts_file='~/.ssh/known_hosts'): self.mode = verify_mode.code(mode) self.hostkeys = paramiko.hostkeys.HostKeys() self.lock", "in (verify_mode.accept_new, verify_mode.overwrite_blindly): accept_and_add = True if accept_and_add: print('Accepting new", "Handling Module''' import os import threading import warnings import paramiko.hostkeys", "are permitted to use, modify, and redsitribute this software #", "'[%s]:%s' % (host_base, port_base) # Try remainder of host verification", "return self._reverse[code] verify_mode = CodeMap( # Different options for handling", "removed in release 2.0. Port existing code to use radssh.known_hosts", "lookup_name) print('(Host Key Fingerprint [%s])' % actual) answer = input('Do", "**kwargs): self._fwd = kwargs self._reverse = {} for k, v", "for x in fingerprint] else: seq = [ord(x) for x", "[int(x) for x in fingerprint] else: seq = [ord(x) for", "bytes or Python2 8-bit string style... if isinstance(fingerprint[0], int): seq", "verify_mode.overwrite_blindly: print('Blindly accepting updated host key for %s' % lookup_name)", "hostname: lookup_name = hostname elif hostname.endswith(':22'): lookup_name = hostname[:-3] else:", "intercepted traffic for SSH sessions, and you don't care. ignore=100,", "with the distribution as file LICENSE.txt # '''HostKey Handling Module'''", "print('Accepting new host key for %s' % lookup_name) self.hostkeys.add(lookup_name, key.get_name(),", "API warnings.warn(FutureWarning('RadSSH hostkey module is no longer supported, and will", "according to the Revised BSD License, a copy of which", "of which should be # included with the distribution as", "LICENSE.txt # '''HostKey Handling Module''' import os import threading import", "key mismatch for (%s)' % lookup_name) print('Expected:', expected) print('Got :',", "or IP''' if self.mode == verify_mode.ignore: return True # Special", "':'.join(['%02x' % x for x in seq]) class HostKeyVerifier(object): '''Class", "answer[0].upper() == 'Y': accept_and_add = True if self.mode in (verify_mode.accept_new,", "= True if self.mode in (verify_mode.accept_new, verify_mode.overwrite_blindly): accept_and_add = True", "8-bit string style... if isinstance(fingerprint[0], int): seq = [int(x) for", "are verified''' def __init__(self, mode='reject', known_hosts_file='~/.ssh/known_hosts'): self.mode = verify_mode.code(mode) self.hostkeys", "input('Do you want to accept this key? (y/N): ') if", "'''HostKey Handling Module''' import os import threading import warnings import", "a code value, return the corresponding code''' return self._reverse[code] verify_mode", "radssh.known_hosts instead.')) class CodeMap(object): '''CodeMap class''' def __init__(self, **kwargs): self._fwd", "for handling host key verification # Listed in decreasing order", "connections # Using these options, you become vulnerable to spoofing", "code): '''Given a code value, return the corresponding code''' return", "self._reverse[v] = k def code(self, name): '''Given a name, return", "= False if self.mode == verify_mode.prompt: print('Unverified connection to \"%s\"'", "lookup_name) self.hostkeys.add(lookup_name, key.get_name(), key) self.hostkeys.save(self.known_hosts_file) self.lock.release() return True self.lock.release() return", "and key.get_name() in host_entry: # Entry mismatch expected = printable_fingerprint(host_entry[key.get_name()])", "string style... if isinstance(fingerprint[0], int): seq = [int(x) for x", "verify_mode.ignore: return self.known_hosts_file = os.path.expanduser(known_hosts_file) if os.path.exists(self.known_hosts_file): self.hostkeys.load(self.known_hosts_file) elif not", "evil ) def printable_fingerprint(k): '''Convert key fingerprint into OpenSSH printable", "fingerprint into OpenSSH printable format''' fingerprint = k.get_fingerprint() # Handle", "return True # Special formatting for non-standard ports... if ':'", "if os.path.exists(self.known_hosts_file): self.hostkeys.load(self.known_hosts_file) elif not os.path.exists(os.path.dirname(self.known_hosts_file)): os.makedirs(os.path.dirname(self.known_hosts_file)) def verify_host_key(self, hostname,", "== 'Y': accept_and_add = True if self.mode in (verify_mode.accept_new, verify_mode.overwrite_blindly):", "a single hostkey against a hostname or IP''' if self.mode", "using this API warnings.warn(FutureWarning('RadSSH hostkey module is no longer supported,", "keys are rejected prompt=1, # Missing keys may be accepted,", "# # This file is part of the RadSSH software", "Inc. # # This file is part of the RadSSH", "this software # according to the Revised BSD License, a", "Using these options, you become vulnerable to spoofing and #", "x for x in seq]) class HostKeyVerifier(object): '''Class to control", "# # RadSSH is free software, released under the Revised", "if self.mode == verify_mode.overwrite_blindly: print('Blindly accepting updated host key for", "import threading import warnings import paramiko.hostkeys # Deprecated as of", "== verify_mode.overwrite_blindly: print('Blindly accepting updated host key for %s' %", "keys are verified''' def __init__(self, mode='reject', known_hosts_file='~/.ssh/known_hosts'): self.mode = verify_mode.code(mode)", "hostkey module is no longer supported, and will be removed", "self.mode == verify_mode.overwrite_blindly: print('Blindly accepting updated host key for %s'", "actual = printable_fingerprint(key) if host_entry and key.get_name() in host_entry: #", "self._fwd[name] def name(self, code): '''Given a code value, return the", "these options, you become vulnerable to spoofing and # intercepted", "= kwargs self._reverse = {} for k, v in kwargs.items():", "security/paranoia reject=0, # Missing keys are rejected prompt=1, # Missing", "locking self.lock.acquire() if self.hostkeys.check(lookup_name, key): self.lock.release() return True host_entry =", "formatting for non-standard ports... if ':' not in hostname: lookup_name", "prompt accept_new=2, # Missing keys automatically accepted # After this", "self.mode in (verify_mode.accept_new, verify_mode.overwrite_blindly): accept_and_add = True if accept_and_add: print('Accepting", "hostkey against a hostname or IP''' if self.mode == verify_mode.ignore:", "hostname[:-3] else: host_base, port_base = hostname.rsplit(':', 1) lookup_name = '[%s]:%s'", "def verify_host_key(self, hostname, key): '''Verify a single hostkey against a", "verify_mode.reject: self.lock.release() return False accept_and_add = False if self.mode ==", "'''Verify a single hostkey against a hostname or IP''' if", "2.0. Port existing code to use radssh.known_hosts instead.')) class CodeMap(object):", "return True else: # Missing key if self.mode == verify_mode.reject:", "return self.known_hosts_file = os.path.expanduser(known_hosts_file) if os.path.exists(self.known_hosts_file): self.hostkeys.load(self.known_hosts_file) elif not os.path.exists(os.path.dirname(self.known_hosts_file)):", "part of the RadSSH software package. # # RadSSH is", "are rejected prompt=1, # Missing keys may be accepted, based", "hostname, key): '''Verify a single hostkey against a hostname or", "with locking self.lock.acquire() if self.hostkeys.check(lookup_name, key): self.lock.release() return True host_entry", "RadSSH is free software, released under the Revised BSD License.", "host_entry: # Entry mismatch expected = printable_fingerprint(host_entry[key.get_name()]) print('Host key mismatch", "longer supported, and will be removed in release 2.0. Port", "will be removed in release 2.0. Port existing code to", "for x in fingerprint] return ':'.join(['%02x' % x for x", "') if answer[0].upper() == 'Y': accept_and_add = True if self.mode", "'Y': accept_and_add = True if self.mode in (verify_mode.accept_new, verify_mode.overwrite_blindly): accept_and_add", "actual) answer = input('Do you want to accept this key?", "License, a copy of which should be # included with", "print('Blindly accepting updated host key for %s' % lookup_name) self.hostkeys.add(lookup_name,", "':' not in hostname: lookup_name = hostname elif hostname.endswith(':22'): lookup_name", "IP''' if self.mode == verify_mode.ignore: return True # Special formatting", "permitted to use, modify, and redsitribute this software # according", "Listed in decreasing order of security/paranoia reject=0, # Missing keys", "BSD License, a copy of which should be # included", "True host_entry = self.hostkeys.lookup(lookup_name) actual = printable_fingerprint(key) if host_entry and", "expected) print('Got :', actual) if self.mode == verify_mode.overwrite_blindly: print('Blindly accepting", "isinstance(fingerprint[0], int): seq = [int(x) for x in fingerprint] else:", "host key verification OFF overwrite_blindly=666 # Concentrated evil ) def", "if self.hostkeys.check(lookup_name, key): self.lock.release() return True host_entry = self.hostkeys.lookup(lookup_name) actual", "(host_base, port_base) # Try remainder of host verification with locking", "def name(self, code): '''Given a code value, return the corresponding", "Turn host key verification OFF overwrite_blindly=666 # Concentrated evil )", "accepted # After this point, key conflicts no longer hinder", "a copy of which should be # included with the", "1.1 - Use known_hosts rewrite instead if using this API", "self.hostkeys.save(self.known_hosts_file) self.lock.release() return True else: # Missing key if self.mode", "x in fingerprint] else: seq = [ord(x) for x in", "the Revised BSD License. # You are permitted to use,", "= os.path.expanduser(known_hosts_file) if os.path.exists(self.known_hosts_file): self.hostkeys.load(self.known_hosts_file) elif not os.path.exists(os.path.dirname(self.known_hosts_file)): os.makedirs(os.path.dirname(self.known_hosts_file)) def", "= self.hostkeys.lookup(lookup_name) actual = printable_fingerprint(key) if host_entry and key.get_name() in", "verify_mode.prompt: print('Unverified connection to \"%s\"' % lookup_name) print('(Host Key Fingerprint", "# Special formatting for non-standard ports... if ':' not in", "= paramiko.hostkeys.HostKeys() self.lock = threading.Lock() if mode == verify_mode.ignore: return", "self.lock.release() return True host_entry = self.hostkeys.lookup(lookup_name) actual = printable_fingerprint(key) if", "options for handling host key verification # Listed in decreasing", "self.mode == verify_mode.prompt: print('Unverified connection to \"%s\"' % lookup_name) print('(Host", "Missing keys automatically accepted # After this point, key conflicts", "'''Given a code value, return the corresponding code''' return self._reverse[code]", "(if) host keys are verified''' def __init__(self, mode='reject', known_hosts_file='~/.ssh/known_hosts'): self.mode", "in decreasing order of security/paranoia reject=0, # Missing keys are", "for (%s)' % lookup_name) print('Expected:', expected) print('Got :', actual) if", "x in fingerprint] return ':'.join(['%02x' % x for x in", "CodeMap(object): '''CodeMap class''' def __init__(self, **kwargs): self._fwd = kwargs self._reverse", "True # Special formatting for non-standard ports... if ':' not", "verification # Listed in decreasing order of security/paranoia reject=0, #", "for x in seq]) class HostKeyVerifier(object): '''Class to control how", "port_base) # Try remainder of host verification with locking self.lock.acquire()", "against a hostname or IP''' if self.mode == verify_mode.ignore: return", "os.path.expanduser(known_hosts_file) if os.path.exists(self.known_hosts_file): self.hostkeys.load(self.known_hosts_file) elif not os.path.exists(os.path.dirname(self.known_hosts_file)): os.makedirs(os.path.dirname(self.known_hosts_file)) def verify_host_key(self,", "if using this API warnings.warn(FutureWarning('RadSSH hostkey module is no longer", "verified''' def __init__(self, mode='reject', known_hosts_file='~/.ssh/known_hosts'): self.mode = verify_mode.code(mode) self.hostkeys =", "= {} for k, v in kwargs.items(): self.__setattr__(k, v) self._reverse[v]", "vulnerable to spoofing and # intercepted traffic for SSH sessions,", "self.mode == verify_mode.reject: self.lock.release() return False accept_and_add = False if", "= threading.Lock() if mode == verify_mode.ignore: return self.known_hosts_file = os.path.expanduser(known_hosts_file)", "this API warnings.warn(FutureWarning('RadSSH hostkey module is no longer supported, and", "modify, and redsitribute this software # according to the Revised", "you want to accept this key? (y/N): ') if answer[0].upper()", "for non-standard ports... if ':' not in hostname: lookup_name =", "warnings.warn(FutureWarning('RadSSH hostkey module is no longer supported, and will be", "to spoofing and # intercepted traffic for SSH sessions, and", "% lookup_name) print('Expected:', expected) print('Got :', actual) if self.mode ==", "if self.mode == verify_mode.reject: self.lock.release() return False accept_and_add = False", "self.hostkeys.check(lookup_name, key): self.lock.release() return True host_entry = self.hostkeys.lookup(lookup_name) actual =", "not in hostname: lookup_name = hostname elif hostname.endswith(':22'): lookup_name =", "non-standard ports... if ':' not in hostname: lookup_name = hostname", "if self.mode == verify_mode.ignore: return True # Special formatting for", "# Missing keys are rejected prompt=1, # Missing keys may", "of host verification with locking self.lock.acquire() if self.hostkeys.check(lookup_name, key): self.lock.release()", "HostKeyVerifier(object): '''Class to control how (if) host keys are verified'''", "supported, and will be removed in release 2.0. Port existing", "'''CodeMap class''' def __init__(self, **kwargs): self._fwd = kwargs self._reverse =", "Use known_hosts rewrite instead if using this API warnings.warn(FutureWarning('RadSSH hostkey", "is free software, released under the Revised BSD License. #", "file LICENSE.txt # '''HostKey Handling Module''' import os import threading", "return the corresponding code''' return self._reverse[code] verify_mode = CodeMap( #", "mismatch for (%s)' % lookup_name) print('Expected:', expected) print('Got :', actual)", "# Different options for handling host key verification # Listed", "reject=0, # Missing keys are rejected prompt=1, # Missing keys", "return False accept_and_add = False if self.mode == verify_mode.prompt: print('Unverified", "int): seq = [int(x) for x in fingerprint] else: seq", "2020 LexisNexis Risk Data Management Inc. # # This file", "as of 1.1 - Use known_hosts rewrite instead if using", "True else: # Missing key if self.mode == verify_mode.reject: self.lock.release()", "False accept_and_add = False if self.mode == verify_mode.prompt: print('Unverified connection", "# Handle Python3 bytes or Python2 8-bit string style... if", "use, modify, and redsitribute this software # according to the", "else: seq = [ord(x) for x in fingerprint] return ':'.join(['%02x'", "self.lock.acquire() if self.hostkeys.check(lookup_name, key): self.lock.release() return True host_entry = self.hostkeys.lookup(lookup_name)", "printable_fingerprint(key) if host_entry and key.get_name() in host_entry: # Entry mismatch", "of 1.1 - Use known_hosts rewrite instead if using this", "be removed in release 2.0. Port existing code to use", "keys may be accepted, based on user prompt accept_new=2, #", "distribution as file LICENSE.txt # '''HostKey Handling Module''' import os", "this point, key conflicts no longer hinder connections # Using", "accept_and_add = True if accept_and_add: print('Accepting new host key for", "else: # Missing key if self.mode == verify_mode.reject: self.lock.release() return", "hostname or IP''' if self.mode == verify_mode.ignore: return True #", "sessions, and you don't care. ignore=100, # Turn host key", "% lookup_name) self.hostkeys.add(lookup_name, key.get_name(), key) self.hostkeys.save(self.known_hosts_file) self.lock.release() return True else:", "v) self._reverse[v] = k def code(self, name): '''Given a name," ]
[ "h1 = ( h1 << 13 | h1 >> 19", "& 0xFFFFFFFF # tail tail_index = nblocks * 4 k1", "] << 8 | \\ key[ block_start + 8 ]", "0xFFFFFFFF k4 = ( k4 << 18 | k4 >>", "= key[ 2 * block_start + 7 ] << 56", "& 0xFFFFFFFF k2 = ( c2 * k2 ) &", "tail_index + 5 ] << 40 if tail_size >= 5:", "| h2 >> 15 ) & 0xFFFFFFFF # inlined ROTL32", "+ 13 ] << 8 | \\ key[ block_start +", "inlined ROTL32 k4 = ( k4 * c1 ) &", "k2 ^= key[ tail_index + 14 ] << 48 if", "block_start + 15 ] << 24 | \\ key[ block_start", "& 0xFF bytestring = bytestring + str( chr( lsbyte )", "h >> 16 h = ( h * 0x85ebca6b )", ") & 0xFFFFFFFF h ^= h >> 13 h =", "module is written to have the same format as mmh3", "& 0xFFFFFFFFFFFFFFFF h1 ^= k1 h1 = ( h1 <<", "<< 16 | \\ key[ 2 * block_start + 1", "2 * block_start + 9 ] << 8 | \\", "0xFFFFFFFF # tail tail_index = nblocks * 4 k1 =", "murmur3 hash. Returns a byte string. ''' hash_128 = hash128(", "if x64arch: return hash128_x64( key, seed ) else: return hash128_x86(", "0xFFFFFFFF h1 ^= k1 #finalization h1 ^= length h2 ^=", "0xFFFFFFFF h2 = ( h2 * 5 + 0x0bcaa747 )", "lsbyte = hash_128 & 0xFF bytestring = bytestring + str(", "* block_start + 12 ] << 32 | \\ key[", "] k2 = key[ block_start + 7 ] << 24", "block_start + 0 ] k1 = ( c1 * k1", "= ( k2 * c2 ) & 0xFFFFFFFF k2 =", "= True ): ''' Implements 128bit murmur3 hash. ''' def", "k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k1", "= bytestring + str( chr( lsbyte ) ) hash_128 =", "'pymurmur [options] \"string to hash\"' ) parser.add_argument( '--seed', type =", "= ( h1 + h2 ) & 0xFFFFFFFF h3 =", "same format as mmh3 python package found here for simple", ") def hash_bytes( key, seed = 0x0, x64arch = True", "48 if tail_size >= 6: k1 ^= key[ tail_index +", "^= k2 h2 = ( h2 << 31 | h2", "k1 * c2 ) & 0xFFFFFFFFFFFFFFFF h1 ^= k1 #finalization", "5: k2 ^= key[ tail_index + 4 ] if tail_size", "k1 #finalization h1 ^= length h2 ^= length h1 =", "] << 16 if tail_size >= 10: k3 ^= key[", "the public domain. The authors hereby disclaim copyright to this", "c1 ) & 0xFFFFFFFFFFFFFFFF k1 = ( k1 << 31", "+ h3 ) & 0xFFFFFFFF h1 = ( h1 +", "& 0xFFFFFFFF h4 ^= k4 h4 = ( h4 <<", "14: k4 ^= key[ tail_index + 13 ] << 8", "tail_index + 13 ] << 40 if tail_size >= 13:", "2 * block_start + 0 ] k2 = key[ 2", "c2 * k1 ) & 0xFFFFFFFF h1 ^= k1 h1", "block_start + 4 ] k3 = key[ block_start + 11", "h1 = ( h1 << 27 | h1 >> 37", "( k3 << 17 | k3 >> 15 ) &", "'strings', default = [], nargs='+') opts = parser.parse_args() for str_to_hash", "block_start + 11 ] << 24 | \\ key[ block_start", ">= 2: k1 ^= key[ tail_index + 1 ] <<", "k2 ^= key[ tail_index + 13 ] << 40 if", "+ 12 ] << 32 if tail_size >= 12: k2", "block_start + 11 ] << 24 | \\ key[ 2", ") & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 h1 = ( h1", ") & 0xFFFFFFFF h1 ^= k1 #finalization unsigned_val = fmix(", "] k4 = key[ block_start + 15 ] << 24", "k2 = ( c1 * k2 ) & 0xFFFFFFFFFFFFFFFF h2", "& 0xFFFFFFFFFFFFFFFF k1 = ( k1 << 31 | k1", "5 ] << 40 | \\ key[ 2 * block_start", "0xFFFFFFFFFFFFFFFF # inlined ROTL64 h1 = ( h1 + h2", "tail_index + 14 ] << 48 if tail_size >= 14:", "64 | h1 ) def hash128_x86( key, seed ): '''", "tail_size >= 7: k1 ^= key[ tail_index + 6 ]", "k4 * c1 ) & 0xFFFFFFFF h4 ^= k4 if", "= ( c2 * k1 ) & 0xFFFFFFFF h1 ^=", "h2 * 5 + 0x38495ab5 ) & 0xFFFFFFFFFFFFFFFF #tail tail_index", "return x.encode() else: def xencode(x): return x del _sys def", "<< 24 | \\ key[ 2 * block_start + 10", "0xFFFFFFFF # inlined ROTL32 h1 = ( h1 * 5", "= key[ block_start + 3 ] << 24 | \\", "0 tail_size = length & 15 if tail_size >= 15:", "0xFFFFFFFFFFFFFFFF if unsigned_val1 & 0x8000000000000000 == 0: signed_val1 = unsigned_val1", "mmh3 python package found here for simple conversions: https://pypi.python.org/pypi/mmh3/2.3.1 '''", "return x else: return x.encode() else: def xencode(x): return x", "k * 0xff51afd7ed558ccd ) & 0xFFFFFFFFFFFFFFFF k ^= k >>", "for str_to_hash in opts.strings: sys.stdout.write( '\"%s\" = 0x%08X\\n' % (", "= hash_128 & 0xFFFFFFFFFFFFFFFF if unsigned_val1 & 0x8000000000000000 == 0:", "+ 0 ] if tail_size > 0: k1 = (", "placed in the public domain. The authors hereby disclaim copyright", "nblocks * 4 k1 = 0 tail_size = length &", "& 3 if tail_size >= 3: k1 ^= key[ tail_index", "<< 16 if tail_size >= 2: k1 ^= key[ tail_index", "nblocks * 4, 4 ): # ??? big endian? k1", "key[ 2 * block_start + 2 ] << 16 |", "= ( h1 << 13 | h1 >> 19 )", "3: k1 ^= key[ tail_index + 2 ] << 16", "\\ key[ 2 * block_start + 3 ] << 24", "^= key[ tail_index + 2 ] << 16 if tail_size", "^= key[ tail_index + 12 ] if tail_size > 12:", "block_start + 7 ] << 56 | \\ key[ 2", "c4 ) & 0xFFFFFFFF k4 = ( k4 << 18", "] << 16 | \\ key[ block_start + 5 ]", "seed = 0x0, x64arch = True ): ''' Implements 128bit", "] << 24 if tail_size >= 11: k3 ^= key[", "16 if tail_size >= 2: k1 ^= key[ tail_index +", "c2 ) & 0xFFFFFFFF k2 = ( k2 << 16", "0xc4ceb9fe1a85ec53 ) & 0xFFFFFFFFFFFFFFFF k ^= k >> 33 return", "* k2 ) & 0xFFFFFFFFFFFFFFFF h2 ^= k2 h2 =", "= 0xcc9e2d51 c2 = 0x1b873593 # body for block_start in", "<< 32 | \\ key[ 2 * block_start + 11", "& 0xFFFFFFFFFFFFFFFF h1 = ( h1 * 5 + 0x52dce729", "key[ tail_index + 5 ] << 8 if tail_size >=", "inlined ROTL32 h2 = ( h2 + h3 ) &", "0xa1e38b93 #body for block_start in range( 0, nblocks * 16,", "for block_start in range( 0, nblocks * 16, 16 ):", "& 0xFFFFFFFF h4 = ( h4 * 5 + 0x32ac3b17", ") h3 = fmix( h3 ) h4 = fmix( h4", "'pymurmur3', 'pymurmur [options] \"string to hash\"' ) parser.add_argument( '--seed', type", "k1 #finalization unsigned_val = fmix( h1 ^ length ) if", "] << 48 | \\ key[ 2 * block_start +", "^= length h2 ^= length h3 ^= length h4 ^=", "* c4 ) & 0xFFFFFFFF k4 = ( k4 <<", "tail_size >= 7: k2 ^= key[ tail_index + 6 ]", "h3 ) & 0xFFFFFFFF h1 = ( h1 + h4", "48 | \\ key[ 2 * block_start + 13 ]", "8 | \\ key[ block_start + 4 ] k3 =", "k2 ^= key[ tail_index + 6 ] << 16 if", "''' Implements 32bit murmur3 hash. ''' key = bytearray( xencode(key)", "h2 = seed h3 = seed h4 = seed c1", "^= k2 h2 = ( h2 << 17 | h2", "^= k1 h1 = ( h1 << 13 | h1", "= 0 k2 = 0 tail_size = length & 15", "<< 16 | k2 >> 16 ) & 0xFFFFFFFF #", "^= length h3 ^= length h4 ^= length h1 =", ") def fmix( h ): h ^= h >> 16", "murmur3 hash. Returns a tuple. ''' hash_128 = hash128( key,", "<< 8 if tail_size >= 13: k4 ^= key[ tail_index", ") & 0xFFFFFFFF # inlined ROTL32 h4 = ( h1", "block_start + 6 ] << 16 | \\ key[ block_start", "compile c-code and install modules, and you only want a", "drop-in murmur3 implementation. As this is purely python it is", "= hash128( key, seed, x64arch ) unsigned_val1 = hash_128 &", "9 ] << 8 if tail_size >= 9: k2 ^=", ") & 0xFFFFFFFF # inlined ROTL32 k1 = ( k1", "k4 = ( k4 * c4 ) & 0xFFFFFFFF k4", "h length = len( key ) nblocks = int( length", "tail tail_index = nblocks * 4 k1 = 0 tail_size", "\\ key[ block_start + 4 ] k3 = key[ block_start", "is purely python it is FAR from performant and if", "17 ) & 0xFFFFFFFF # inlined ROTL32 h3 = (", "<< 32 if tail_size >= 12: k2 ^= key[ tail_index", "33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 h2 = (", "tail_size >= 15: k2 ^= key[ tail_index + 14 ]", "10 ] << 16 if tail_size >= 10: k3 ^=", "= ( h2 << 31 | h2 >> 33 )", ">> 15 ) & 0xFFFFFFFF # inlined ROTL32 k3 =", "in range( 0, nblocks * 8, 8 ): # ???", "enhanced by <NAME>, and is placed in the public domain.", "* k1 ) & 0xFFFFFFFF h1 ^= k1 h1 =", "= ( k1 * c1 ) & 0xFFFFFFFFFFFFFFFF k1 =", "& 0xFFFFFFFF h2 ^= k2 h2 = ( h2 <<", "k2 << 16 | k2 >> 16 ) & 0xFFFFFFFF", "11 ] << 24 if tail_size >= 11: k3 ^=", "* 0xc4ceb9fe1a85ec53 ) & 0xFFFFFFFFFFFFFFFF k ^= k >> 33", "return k length = len( key ) nblocks = int(", "block_start + 9 ] << 8 | \\ key[ 2", "11: k3 ^= key[ tail_index + 10 ] << 16", "\\ key[ 2 * block_start + 4 ] << 32", "fmix( h1 ^ length ) if unsigned_val & 0x80000000 ==", "block_start in range( 0, nblocks * 16, 16 ): k1", "): return list(range( a, b, c)) def xencode(x): if isinstance(x,", "<< 8 | \\ key[ block_start + 4 ] k3", "( h1 << 13 | h1 >> 19 ) &", "+ h2 ) & 0xFFFFFFFFFFFFFFFF return ( h2 << 64", "): ''' Implements 128bit murmur3 hash. Returns a byte string.", "] << 24 | \\ key[ block_start + 14 ]", "2 * block_start + 12 ] << 32 | \\", "0xFFFFFFFF h3 ^= k3 h3 = ( h3 << 15", ") & 0xFFFFFFFFFFFFFFFF h2 = ( h1 + h2 )", "# inlined ROTL32 h4 = ( h1 + h4 )", "& 0xFFFFFFFF h3 = ( h1 + h3 ) &", "length h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF", "] << 48 if tail_size >= 14: k2 ^= key[", "= key[ block_start + 7 ] << 24 | \\", "( h4 << 96 | h3 << 64 | h2", "^= h >> 16 return h length = len( key", "56 if tail_size >= 7: k1 ^= key[ tail_index +", "length h3 ^= length h4 ^= length h1 = (", "if tail_size >= 9: k2 ^= key[ tail_index + 8", "h1 ^= k1 h1 = ( h1 << 13 |", "return hash128_x64( key, seed ) else: return hash128_x86( key, seed", "[options] \"string to hash\"' ) parser.add_argument( '--seed', type = int,", "0xFFFFFFFF # inlined ROTL32 k4 = ( c1 * k4", ") if x64arch: return hash128_x64( key, seed ) else: return", "pure python implementation of the murmur3 hash algorithm https://code.google.com/p/smhasher/wiki/MurmurHash3 This", "0 tail_size = length & 3 if tail_size >= 3:", "0 k2 = 0 k3 = 0 k4 = 0", "tail_size >= 6: k2 ^= key[ tail_index + 5 ]", "/ 4 ) h1 = seed c1 = 0xcc9e2d51 c2", "+ 6 ] << 48 | \\ key[ 2 *", "\\ key[ 2 * block_start + 11 ] << 24", "a, b, c)) def xencode(x): if isinstance(x, bytes) or isinstance(x,", "| \\ key[ 2 * block_start + 4 ] <<", "6 ] << 48 | \\ key[ 2 * block_start", "0x4cf5ad432745937f #body for block_start in range( 0, nblocks * 8,", "= 0x87c37b91114253d5 c2 = 0x4cf5ad432745937f #body for block_start in range(", "= ( c1 * k1 ) & 0xFFFFFFFF k1 =", "h1 + h2 ) & 0xFFFFFFFFFFFFFFFF h1 = ( h1", "not want to compile c-code and install modules, and you", "& 0x8000000000000000 == 0: signed_val2 = unsigned_val2 else: signed_val2 =", "Implements 128bit murmur3 hash. Returns a byte string. ''' hash_128", "+ 0xe6546b64 ) & 0xFFFFFFFF # tail tail_index = nblocks", "<< 24 if tail_size >= 7: k2 ^= key[ tail_index", "+ 0x0bcaa747 ) & 0xFFFFFFFF k3 = ( c3 *", "ROTL64 k1 = ( k1 * c2 ) & 0xFFFFFFFFFFFFFFFF", "> 4: k2 = ( k2 * c2 ) &", "= fmix( h2 ) h3 = fmix( h3 ) h4", ">= 15: k2 ^= key[ tail_index + 14 ] <<", "0x38b34ae5 c4 = 0xa1e38b93 #body for block_start in range( 0,", "33 return k length = len( key ) nblocks =", "24 if tail_size >= 7: k2 ^= key[ tail_index +", "The authors hereby disclaim copyright to this source code. pure", "( h1 * 5 + 0xe6546b64 ) & 0xFFFFFFFF #", "k2 * c2 ) & 0xFFFFFFFFFFFFFFFF k2 = ( k2", "> 8: k2 = ( k2 * c2 ) &", "12 ] << 32 if tail_size >= 12: k2 ^=", "( k4 << 18 | k4 >> 14 ) &", "+ 10 ] << 16 | \\ key[ 2 *", "0xFFFFFFFF h4 = ( h1 + h4 ) & 0xFFFFFFFF", "ROTL32 k4 = ( c1 * k4 ) & 0xFFFFFFFF", "length / 4 ) h1 = seed c1 = 0xcc9e2d51", "h1 + h4 ) & 0xFFFFFFFF h2 = ( h1", "in range(0, 16, 1): lsbyte = hash_128 & 0xFF bytestring", "16 | \\ key[ block_start + 5 ] << 8", "key[ 2 * block_start + 10 ] << 16 |", ") & 0xFFFFFFFFFFFFFFFF h1 ^= k1 h1 = ( h1", "c3 = 0x38b34ae5 c4 = 0xa1e38b93 #body for block_start in", "inlined ROTL64 h1 = ( h1 + h2 ) &", "k2 ^= key[ tail_index + 5 ] << 8 if", "k2 = key[ block_start + 7 ] << 24 |", "k3 * c4 ) & 0xFFFFFFFF h3 ^= k3 if", "= ( h2 << 17 | h2 >> 15 )", "+ str( chr( lsbyte ) ) hash_128 = hash_128 >>", "This module is written to have the same format as", "+ 2 ] << 16 | \\ key[ block_start +", "0xc2b2ae35 ) & 0xFFFFFFFF h ^= h >> 16 return", "block_start + 12 ] k1 = ( c1 * k1", ") & 0xFFFFFFFFFFFFFFFF h1 = ( h1 * 5 +", "h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF h", "& 0xFFFFFFFFFFFFFFFF h2 = ( h1 + h2 ) &", "( c1 * k1 ) & 0xFFFFFFFF k1 = (", "<< 24 | \\ key[ block_start + 14 ] <<", ") & 0xFFFFFFFF #tail tail_index = nblocks * 16 k1", "( h1 << 27 | h1 >> 37 ) &", "import sys as _sys if (_sys.version_info > (3, 0)): def", "+ 0x561ccd1b ) & 0xFFFFFFFF k2 = ( c2 *", "for simple conversions: https://pypi.python.org/pypi/mmh3/2.3.1 ''' import sys as _sys if", "^= h >> 13 h = ( h * 0xc2b2ae35", "<< 13 | h1 >> 19 ) & 0xFFFFFFFF #", "16 ): k1 = key[ block_start + 3 ] <<", "= ( c1 * k4 ) & 0xFFFFFFFF h4 ^=", "= fmix( h1 ^ length ) if unsigned_val & 0x80000000", "murmur3 hash. ''' key = bytearray( xencode(key) ) def fmix(", "| h3 >> 17 ) & 0xFFFFFFFF # inlined ROTL32", "5 + 0x38495ab5 ) & 0xFFFFFFFFFFFFFFFF #tail tail_index = nblocks", "33 k = ( k * 0xff51afd7ed558ccd ) & 0xFFFFFFFFFFFFFFFF", "block_start + 0 ] k2 = key[ 2 * block_start", "seed, x64arch ) unsigned_val1 = hash_128 & 0xFFFFFFFFFFFFFFFF if unsigned_val1", "& 0xFFFFFFFFFFFFFFFF # inlined ROTL64 h1 = ( h1 +", "<< 15 | h3 >> 17 ) & 0xFFFFFFFF #", "h2 = ( h2 * 5 + 0x0bcaa747 ) &", "c1 = 0x239b961b c2 = 0xab0e9789 c3 = 0x38b34ae5 c4", "ROTL32 k2 = ( k2 * c3 ) & 0xFFFFFFFF", "+ 0 ] k2 = key[ 2 * block_start +", "13 ] << 40 | \\ key[ 2 * block_start", "inlined ROTL64 k1 = ( k1 * c2 ) &", "( k1 * c2 ) & 0xFFFFFFFF h1 ^= k1", "] << 8 if tail_size >= 13: k4 ^= key[", "''' hash_128 = hash128( key, seed, x64arch ) unsigned_val1 =", "k length = len( key ) nblocks = int( length", "0xFFFFFFFF h2 ^= k2 if tail_size >= 4: k1 ^=", "= ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF h1 =", "0xcc9e2d51 c2 = 0x1b873593 # body for block_start in range(", "tail_index + 12 ] if tail_size > 12: k4 =", "byte string. ''' hash_128 = hash128( key, seed, x64arch )", "bytestring = bytestring + str( chr( lsbyte ) ) hash_128", "times when you do not want to compile c-code and", "if performance is anything that is needed a proper c-module", "= length & 15 if tail_size >= 15: k2 ^=", "k3 << 17 | k3 >> 15 ) & 0xFFFFFFFF", "if (_sys.version_info > (3, 0)): def xrange( a, b, c", "0x0, x64arch = True ): ''' Implements 128bit murmur3 hash.", "import argparse parser = argparse.ArgumentParser( 'pymurmur3', 'pymurmur [options] \"string to", "+ 12 ] k1 = ( c1 * k1 )", "16 | \\ key[ 2 * block_start + 9 ]", "| \\ key[ 2 * block_start + 11 ] <<", "h1 << 19 | h1 >> 13 ) & 0xFFFFFFFF", "block_start + 1 ] << 8 | \\ key[ block_start", ">= 9: k2 ^= key[ tail_index + 8 ] if", "unsigned_val1 else: signed_val1 = -( (unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) + 1", ") & 0xFFFFFFFF # inlined ROTL32 k3 = ( k3", "0x8000000000000000 == 0: signed_val1 = unsigned_val1 else: signed_val1 = -(", "pymmh3 was written by <NAME> and enhanced by <NAME>, and", "performant and if performance is anything that is needed a", "1 ) unsigned_val2 = ( hash_128 >> 64 ) &", "^= key[ tail_index + 5 ] << 40 if tail_size", "* c1 ) & 0xFFFFFFFF h4 ^= k4 if tail_size", "4, 4 ): # ??? big endian? k1 = key[", "15 ] << 56 | \\ key[ 2 * block_start", "h1 = seed h2 = seed c1 = 0x87c37b91114253d5 c2", ") & 0xFFFFFFFF return ( h4 << 96 | h3", "= ( k2 * c3 ) & 0xFFFFFFFF h2 ^=", "14 ] << 16 if tail_size >= 14: k4 ^=", "h1 + h2 ) & 0xFFFFFFFFFFFFFFFF return ( h2 <<", "31 | k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined", "= ( k4 * c1 ) & 0xFFFFFFFF h4 ^=", "h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF return", "& 0xFFFFFFFFFFFFFFFF # inlined ROTL64 h2 = ( h1 +", "h2 ) & 0xFFFFFFFF h1 = ( h1 * 5", "h4 * 5 + 0x32ac3b17 ) & 0xFFFFFFFF #tail tail_index", "* block_start + 11 ] << 24 | \\ key[", "<< 15 | k1 >> 17 ) & 0xFFFFFFFF #", "x del _sys def hash( key, seed = 0x0 ):", "56 | \\ key[ 2 * block_start + 6 ]", ") & 0xFFFFFFFFFFFFFFFF if unsigned_val2 & 0x8000000000000000 == 0: signed_val2", "+ 0 ] k1 = ( c1 * k1 )", "k1 ^= key[ tail_index + 4 ] << 32 if", "( k1 * c1 ) & 0xFFFFFFFFFFFFFFFF k1 = (", "tail_size = length & 15 if tail_size >= 15: k4", "hash_128 & 0xFF bytestring = bytestring + str( chr( lsbyte", "if tail_size >= 7: k1 ^= key[ tail_index + 6", "15 if tail_size >= 15: k4 ^= key[ tail_index +", "<NAME>, and is placed in the public domain. The authors", "2 * block_start + 11 ] << 24 | \\", "32 | h1 ) key = bytearray( xencode(key) ) if", "8 | \\ key[ block_start + 8 ] k4 =", "h3 = ( h3 << 15 | h3 >> 17", "56 | \\ key[ 2 * block_start + 14 ]", "] << 24 | \\ key[ block_start + 6 ]", "murmur3 implementation. As this is purely python it is FAR", "= key[ block_start + 11 ] << 24 | \\", "> (3, 0)): def xrange( a, b, c ): return", "2 * block_start + 2 ] << 16 | \\", "h2 = ( h2 * 5 + 0x38495ab5 ) &", "h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF h2", "opts = parser.parse_args() for str_to_hash in opts.strings: sys.stdout.write( '\"%s\" =", "( h2 * 5 + 0x38495ab5 ) & 0xFFFFFFFFFFFFFFFF #tail", "in range( 0, nblocks * 16, 16 ): k1 =", "^= length h4 ^= length h1 = ( h1 +", "32 | \\ key[ 2 * block_start + 3 ]", ") & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k2 = ( k2", "# body for block_start in range( 0, nblocks * 4,", ">> 33 return k length = len( key ) nblocks", ">> 37 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 h1 =", "sys.stdout.write( '\"%s\" = 0x%08X\\n' % ( str_to_hash, hash( str_to_hash )", "* 5 + 0x32ac3b17 ) & 0xFFFFFFFF #tail tail_index =", "<NAME> and enhanced by <NAME>, and is placed in the", ") & 0xFFFFFFFF h1 ^= k1 h1 = ( h1", "* k3 ) & 0xFFFFFFFF h3 ^= k3 h3 =", ">= 7: k2 ^= key[ tail_index + 6 ] <<", "+ 10 ] << 16 if tail_size >= 10: k2", "type = int, default = 0 ) parser.add_argument( 'strings', default", "32bit murmur3 hash. ''' key = bytearray( xencode(key) ) def", "13 ] << 40 if tail_size >= 13: k2 ^=", "h4 >> 19 ) & 0xFFFFFFFF # inlined ROTL32 h4", "<< 8 | \\ key[ block_start + 0 ] k2", "k2 ^= key[ tail_index + 11 ] << 24 if", "= ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF return (", "0xFFFFFFFF # inlined ROTL32 k3 = ( k3 * c4", "h4 << 96 | h3 << 64 | h2 <<", "h4 ) & 0xFFFFFFFF return ( h4 << 96 |", "if isinstance(x, bytes) or isinstance(x, bytearray): return x else: return", "] << 16 if tail_size >= 2: k1 ^= key[", "written for the times when you do not want to", "c4 ) & 0xFFFFFFFF h3 ^= k3 if tail_size >=", ") & 0xFFFFFFFF h1 = fmix( h1 ) h2 =", "| \\ key[ 2 * block_start + 10 ] <<", "hash. Returns a byte string. ''' hash_128 = hash128( key,", "h4 = ( h1 + h4 ) & 0xFFFFFFFF h4", "( k2 * c1 ) & 0xFFFFFFFFFFFFFFFF h2 ^= k2", "lsbyte ) ) hash_128 = hash_128 >> 8 return bytestring", "& 0xFFFFFFFFFFFFFFFF k2 = ( c2 * k2 ) &", "<< 33 | k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF #", "& 0x80000000 == 0: return unsigned_val else: return -( (unsigned_val", "0xab0e9789 c3 = 0x38b34ae5 c4 = 0xa1e38b93 #body for block_start", "k4 << 18 | k4 >> 14 ) & 0xFFFFFFFF", "): k ^= k >> 33 k = ( k", "inlined ROTL32 k3 = ( k3 * c4 ) &", "\\ key[ block_start + 13 ] << 8 | \\", "= 0 ) parser.add_argument( 'strings', default = [], nargs='+') opts", "0 k4 = 0 tail_size = length & 15 if", "key[ tail_index + 12 ] << 32 if tail_size >=", "^= length h1 = ( h1 + h2 ) &", "32 | \\ key[ 2 * block_start + 11 ]", "signed_val2 = unsigned_val2 else: signed_val2 = -( (unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF)", "tail_size >= 6: k1 ^= key[ tail_index + 5 ]", "== 0: return unsigned_val else: return -( (unsigned_val ^ 0xFFFFFFFF)", "block_start + 10 ] << 16 | \\ key[ 2", "0, nblocks * 16, 16 ): k1 = key[ block_start", "block_start + 0 ] k2 = key[ block_start + 7", "19 ) & 0xFFFFFFFF # inlined ROTL32 h4 = (", "tail_index + 8 ] if tail_size > 8: k3 =", "24 | \\ key[ block_start + 10 ] << 16", "k = ( k * 0xff51afd7ed558ccd ) & 0xFFFFFFFFFFFFFFFF k", "= ( k1 * c1 ) & 0xFFFFFFFF k1 =", "0: signed_val1 = unsigned_val1 else: signed_val1 = -( (unsigned_val1 ^", "key[ tail_index + 6 ] << 16 if tail_size >=", "] << 8 if tail_size >= 9: k3 ^= key[", "h2 = ( h2 << 31 | h2 >> 33", "if tail_size >= 15: k2 ^= key[ tail_index + 14", "+ 0x96cd1c35 ) & 0xFFFFFFFF k4 = ( c4 *", "k2 if tail_size >= 4: k1 ^= key[ tail_index +", "8: k2 ^= key[ tail_index + 7 ] << 24", "= len( key ) nblocks = int( length / 16", "0: k1 = ( k1 * c1 ) & 0xFFFFFFFF", "and you only want a drop-in murmur3 implementation. As this", "= ( h * 0xc2b2ae35 ) & 0xFFFFFFFF h ^=", "0xFFFFFFFFFFFFFFFF k1 = ( k1 << 31 | k1 >>", "+ 1 ) unsigned_val2 = ( hash_128 >> 64 )", "key[ tail_index + 8 ] if tail_size > 8: k2", "2 ] << 16 | \\ key[ 2 * block_start", "1): lsbyte = hash_128 & 0xFF bytestring = bytestring +", "h1 * 5 + 0x561ccd1b ) & 0xFFFFFFFF k2 =", "def hash( key, seed = 0x0 ): ''' Implements 32bit", "( k1 << 31 | k1 >> 33 ) &", ">> 16 return h length = len( key ) nblocks", "2 * block_start + 4 ] << 32 | \\", "if tail_size >= 5: k2 ^= key[ tail_index + 4", "#finalization h1 ^= length h2 ^= length h1 = (", "fmix( h1 ) h2 = fmix( h2 ) h3 =", "& 0xFFFFFFFFFFFFFFFF h1 ^= k1 #finalization h1 ^= length h2", "h2 = ( h2 + h3 ) & 0xFFFFFFFF h2", "in range( 0, nblocks * 4, 4 ): # ???", "2 ] << 16 if tail_size >= 2: k1 ^=", "128bit murmur3 hash for x86. ''' def fmix( h ):", "2 * block_start + 7 ] << 56 | \\", "2 * block_start + 8 ] k1 = ( c1", "hash_bytes( key, seed = 0x0, x64arch = True ): '''", "hash_128 = hash_128 >> 8 return bytestring if __name__ ==", "0xFFFFFFFFFFFFFFFF h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF", "h * 0xc2b2ae35 ) & 0xFFFFFFFF h ^= h >>", "is FAR from performant and if performance is anything that", "unsigned_val2 else: signed_val2 = -( (unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) + 1", "15 ) & 0xFFFFFFFF # inlined ROTL32 h2 = (", "h3 ^= k3 h3 = ( h3 << 15 |", "fmix( h ): h ^= h >> 16 h =", "h3 << 15 | h3 >> 17 ) & 0xFFFFFFFF", "128bit murmur3 hash. ''' def hash128_x64( key, seed ): '''", "a proper c-module is suggested! This module is written to", "h1 >> 37 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 h1", "10: k2 ^= key[ tail_index + 9 ] << 8", "c2 ) & 0xFFFFFFFF h1 ^= k1 #finalization unsigned_val =", "key[ 2 * block_start + 3 ] << 24 |", "+ 11 ] << 24 if tail_size >= 11: k2", "& 0xFFFFFFFFFFFFFFFF h2 ^= k2 h2 = ( h2 <<", "128bit murmur3 hash. Returns a byte string. ''' hash_128 =", "| h1 ) key = bytearray( xencode(key) ) if x64arch:", ") hash_128 = hash_128 >> 8 return bytestring if __name__", "0xFFFFFFFF h1 = ( h1 * 5 + 0x561ccd1b )", "0xFFFFFFFF # inlined ROTL32 k4 = ( k4 * c1", "range( 0, nblocks * 4, 4 ): # ??? big", "h1 = ( h1 * 5 + 0x561ccd1b ) &", "if tail_size >= 7: k2 ^= key[ tail_index + 6", "] << 8 if tail_size >= 5: k2 ^= key[", "+ h2 ) & 0xFFFFFFFF h1 = ( h1 *", "& 0xFFFFFFFF h2 ^= k2 if tail_size >= 4: k1", "k1 ) & 0xFFFFFFFF k1 = ( k1 << 15", "implementation of the murmur3 hash algorithm https://code.google.com/p/smhasher/wiki/MurmurHash3 This was written", "= ( h1 + h3 ) & 0xFFFFFFFF h4 =", "key ) nblocks = int( length / 4 ) h1", "= seed h3 = seed h4 = seed c1 =", "* c1 ) & 0xFFFFFFFFFFFFFFFF h2 ^= k2 if tail_size", "* block_start + 14 ] << 48 | \\ key[", "| k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64", "h2 << 64 | h1 ) def hash128_x86( key, seed", "16 ) h1 = seed h2 = seed h3 =", "h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF h2", "ROTL32 k1 = ( c2 * k1 ) & 0xFFFFFFFF", ">= 8: k2 ^= key[ tail_index + 7 ] <<", "0xFFFFFFFF return ( h4 << 96 | h3 << 64", "( c4 * k3 ) & 0xFFFFFFFF h3 ^= k3", "| \\ key[ block_start + 6 ] << 16 |", "is anything that is needed a proper c-module is suggested!", "= length & 3 if tail_size >= 3: k1 ^=", "64 | h2 << 32 | h1 ) key =", "^= k2 if tail_size >= 4: k1 ^= key[ tail_index", "( k * 0xff51afd7ed558ccd ) & 0xFFFFFFFFFFFFFFFF k ^= k", "signed_val1 ), int( signed_val2 ) ) def hash_bytes( key, seed", "hereby disclaim copyright to this source code. pure python implementation", "tail_index + 6 ] << 48 if tail_size >= 6:", "] << 24 if tail_size >= 7: k2 ^= key[", "modules, and you only want a drop-in murmur3 implementation. As", "( h4 << 13 | h4 >> 19 ) &", "k2 ^= key[ tail_index + 10 ] << 16 if", "= nblocks * 16 k1 = 0 k2 = 0", "c3 * k2 ) & 0xFFFFFFFF h2 ^= k2 h2", "<< 32 | h1 ) key = bytearray( xencode(key) )", "-( (unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) + 1 ) unsigned_val2 = (", ") if unsigned_val & 0x80000000 == 0: return unsigned_val else:", "domain. The authors hereby disclaim copyright to this source code.", "| \\ key[ block_start + 1 ] << 8 |", "^= key[ tail_index + 1 ] << 8 if tail_size", "format as mmh3 python package found here for simple conversions:", "0xFF bytestring = bytestring + str( chr( lsbyte ) )", ") & 0xFFFFFFFFFFFFFFFF k2 = ( k2 << 33 |", "+ 11 ] << 24 | \\ key[ block_start +", "h2 ^= length h3 ^= length h4 ^= length h1", "= ( h1 << 19 | h1 >> 13 )", "8 ] k1 = ( c1 * k1 ) &", "16 k1 = 0 k2 = 0 tail_size = length", "0x0, x64arch = True ): ''' Implements 64bit murmur3 hash.", "k2 ^= key[ tail_index + 9 ] << 8 if", "+ 8 ] k4 = key[ block_start + 15 ]", "tail_size >= 15: k4 ^= key[ tail_index + 14 ]", "# inlined ROTL32 k4 = ( k4 * c1 )", "bytestring if __name__ == \"__main__\": import argparse parser = argparse.ArgumentParser(", "tail_size = length & 3 if tail_size >= 3: k1", "5 ] << 40 if tail_size >= 5: k1 ^=", "& 0xFFFFFFFF h1 ^= k1 #finalization h1 ^= length h2", "& 0xFFFFFFFF # inlined ROTL32 h1 = ( h1 +", "h2 = ( h2 << 17 | h2 >> 15", "k4 ^= key[ tail_index + 13 ] << 8 if", "k1 * c1 ) & 0xFFFFFFFFFFFFFFFF k1 = ( k1", "tail_index + 3 ] << 24 if tail_size >= 3:", "seed = 0x0, x64arch = True ): ''' Implements 64bit", "h4 ) & 0xFFFFFFFF h1 = fmix( h1 ) h2", ") & 0xFFFFFFFFFFFFFFFF return ( h2 << 64 | h1", "# tail tail_index = nblocks * 4 k1 = 0", "& 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k1 = ( k1 *", "= '' for i in range(0, 16, 1): lsbyte =", "h2 << 32 | h1 ) key = bytearray( xencode(key)", "h2 ) h1 = ( h1 + h2 ) &", "authors hereby disclaim copyright to this source code. pure python", "( k3 * c4 ) & 0xFFFFFFFF h3 ^= k3", ") & 0xFFFFFFFFFFFFFFFF h2 = ( h2 * 5 +", "you only want a drop-in murmur3 implementation. As this is", "^= k1 #finalization unsigned_val = fmix( h1 ^ length )", "15 ) & 0xFFFFFFFF # inlined ROTL32 k3 = (", "^= key[ tail_index + 7 ] << 24 if tail_size", "0xFFFFFFFF # inlined ROTL32 k1 = ( k1 * c2", "Returns a tuple. ''' hash_128 = hash128( key, seed, x64arch", "k1 * c2 ) & 0xFFFFFFFF h1 ^= k1 #finalization", "0: signed_val2 = unsigned_val2 else: signed_val2 = -( (unsigned_val2 ^", "10 ] << 16 | \\ key[ block_start + 9", "hash128( key, seed, x64arch ) bytestring = '' for i", "* k1 ) & 0xFFFFFFFFFFFFFFFF k1 = ( k1 <<", "0x0bcaa747 ) & 0xFFFFFFFF k3 = ( c3 * k3", "0x85ebca6b ) & 0xFFFFFFFF h ^= h >> 13 h", "key[ 2 * block_start + 4 ] << 32 |", "k1 ^= key[ tail_index + 1 ] << 8 if", "\\ key[ 2 * block_start + 2 ] << 16", "the times when you do not want to compile c-code", "( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF h1 = (", ">= 6: k1 ^= key[ tail_index + 5 ] <<", "( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF h2 = (", "if tail_size >= 9: k3 ^= key[ tail_index + 8", "= int( length / 4 ) h1 = seed c1", "''' import sys as _sys if (_sys.version_info > (3, 0)):", "k2 h2 = ( h2 << 31 | h2 >>", "k3 ^= key[ tail_index + 9 ] << 8 if", ">= 9: k3 ^= key[ tail_index + 8 ] if", "0xFFFFFFFFFFFFFFFF # inlined ROTL64 k2 = ( k2 * c1", "6 ] << 48 if tail_size >= 6: k1 ^=", "* k2 ) & 0xFFFFFFFFFFFFFFFF k2 = ( k2 <<", "want a drop-in murmur3 implementation. As this is purely python", "13 ) & 0xFFFFFFFF # inlined ROTL32 h1 = (", "def fmix( h ): h ^= h >> 16 h", "8 if tail_size >= 9: k3 ^= key[ tail_index +", "k1 ^= key[ tail_index + 5 ] << 40 if", "= 0 tail_size = length & 3 if tail_size >=", "length = len( key ) nblocks = int( length /", "k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF", ") & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k1 = ( k1", "for x86. ''' def fmix( h ): h ^= h", "inlined ROTL64 h2 = ( h1 + h2 ) &", "| \\ key[ block_start + 9 ] << 8 |", "8 ] if tail_size > 8: k3 = ( k3", "^= k3 if tail_size >= 8: k2 ^= key[ tail_index", "inlined ROTL32 k2 = ( c3 * k2 ) &", "0xFFFFFFFF h2 = ( h1 + h2 ) & 0xFFFFFFFF", "16 ) h1 = seed h2 = seed c1 =", "parser.add_argument( 'strings', default = [], nargs='+') opts = parser.parse_args() for", "tail_index + 10 ] << 16 if tail_size >= 10:", "= ( c1 * k1 ) & 0xFFFFFFFFFFFFFFFF k1 =", "unsigned_val2 = ( hash_128 >> 64 ) & 0xFFFFFFFFFFFFFFFF if", ") & 0xFFFFFFFF # inlined ROTL32 k4 = ( c1", "else: def xencode(x): return x del _sys def hash( key,", "<< 32 if tail_size >= 4: k1 ^= key[ tail_index", "k1 = ( k1 * c1 ) & 0xFFFFFFFFFFFFFFFF k1", "= ( c2 * k2 ) & 0xFFFFFFFFFFFFFFFF k2 =", "| \\ key[ block_start + 0 ] k2 = key[", "32 if tail_size >= 4: k1 ^= key[ tail_index +", ") h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF", "| \\ key[ 2 * block_start + 12 ] <<", ") & 0xFFFFFFFF h3 ^= k3 if tail_size >= 8:", "+ 8 ] if tail_size > 8: k2 = (", "\\ key[ block_start + 5 ] << 8 | \\", "* block_start + 13 ] << 40 | \\ key[", "to hash\"' ) parser.add_argument( '--seed', type = int, default =", "len( key ) nblocks = int( length / 4 )", "tail_index + 13 ] << 8 if tail_size >= 13:", "+ h2 ) & 0xFFFFFFFFFFFFFFFF h2 = ( h2 *", "<< 8 | \\ key[ 2 * block_start + 0", "#finalization h1 ^= length h2 ^= length h3 ^= length", ">> 33 k = ( k * 0xff51afd7ed558ccd ) &", "16 return h length = len( key ) nblocks =", "13: k2 ^= key[ tail_index + 12 ] << 32", "in opts.strings: sys.stdout.write( '\"%s\" = 0x%08X\\n' % ( str_to_hash, hash(", "xencode(x): return x del _sys def hash( key, seed =", "| h1 ) def hash128_x86( key, seed ): ''' Implements", ") key = bytearray( xencode(key) ) if x64arch: return hash128_x64(", "h1 + h4 ) & 0xFFFFFFFF return ( h4 <<", "inlined ROTL32 h1 = ( h1 * 5 + 0xe6546b64", "<< 16 if tail_size >= 10: k2 ^= key[ tail_index", "is written to have the same format as mmh3 python", ") def hash64( key, seed = 0x0, x64arch = True", "0xFFFFFFFFFFFFFFFF h2 ^= k2 h2 = ( h2 << 31", "= bytearray( xencode(key) ) def fmix( h ): h ^=", "block_start in range( 0, nblocks * 4, 4 ): #", "if unsigned_val2 & 0x8000000000000000 == 0: signed_val2 = unsigned_val2 else:", "k2 ^= key[ tail_index + 7 ] << 24 if", "k1 = ( k1 * c1 ) & 0xFFFFFFFF k1", "tail_index + 0 ] if tail_size > 0: k1 =", "<< 18 | k4 >> 14 ) & 0xFFFFFFFF #", "+ h2 ) & 0xFFFFFFFF h1 = ( h1 +", "+ 2 ] << 16 | \\ key[ 2 *", "ROTL64 k2 = ( c1 * k2 ) & 0xFFFFFFFFFFFFFFFF", "= hash_128 >> 8 return bytestring if __name__ == \"__main__\":", "= ( k1 * c2 ) & 0xFFFFFFFF h1 ^=", "tail_size >= 9: k2 ^= key[ tail_index + 8 ]", "= 0x38b34ae5 c4 = 0xa1e38b93 #body for block_start in range(", "c4 * k3 ) & 0xFFFFFFFF h3 ^= k3 h3", "24 | \\ key[ block_start + 2 ] << 16", "+ h3 ) & 0xFFFFFFFF h4 = ( h1 +", "+ 15 ] << 56 | \\ key[ 2 *", "0xFFFFFFFFFFFFFFFF k2 = ( k2 << 33 | k2 >>", "7: k2 ^= key[ tail_index + 6 ] << 16", "5 + 0xe6546b64 ) & 0xFFFFFFFF # tail tail_index =", "7: k1 ^= key[ tail_index + 6 ] << 48", "nblocks = int( length / 4 ) h1 = seed", "ROTL32 h1 = ( h1 * 5 + 0xe6546b64 )", "seed h2 = seed h3 = seed h4 = seed", "key[ block_start + 9 ] << 8 | \\ key[", "if tail_size >= 10: k3 ^= key[ tail_index + 9", "block_start + 4 ] << 32 | \\ key[ 2", "#body for block_start in range( 0, nblocks * 8, 8", "( k3 * c3 ) & 0xFFFFFFFF k3 = (", "<< 8 if tail_size >= 5: k2 ^= key[ tail_index", "0xFFFFFFFFFFFFFFFF # inlined ROTL64 h2 = ( h1 + h2", "c2 * k2 ) & 0xFFFFFFFFFFFFFFFF k2 = ( k2", "* k1 ) & 0xFFFFFFFFFFFFFFFF h1 ^= k1 h1 =", "8 if tail_size >= 5: k2 ^= key[ tail_index +", "here for simple conversions: https://pypi.python.org/pypi/mmh3/2.3.1 ''' import sys as _sys", ") return ( int( signed_val1 ), int( signed_val2 ) )", "24 | \\ key[ block_start + 14 ] << 16", "unsigned_val else: return -( (unsigned_val ^ 0xFFFFFFFF) + 1 )", "key[ tail_index + 5 ] << 40 if tail_size >=", ") ) def hash_bytes( key, seed = 0x0, x64arch =", "def hash128_x86( key, seed ): ''' Implements 128bit murmur3 hash", "bytearray( xencode(key) ) if x64arch: return hash128_x64( key, seed )", "h3 = ( h3 + h4 ) & 0xFFFFFFFF h3", "= 0x0, x64arch = True ): ''' Implements 128bit murmur3", "block_start + 9 ] << 8 | \\ key[ block_start", "c3 ) & 0xFFFFFFFF k3 = ( k3 << 17", "] << 8 if tail_size >= 1: k1 ^= key[", "if tail_size > 8: k3 = ( k3 * c3", "return unsigned_val else: return -( (unsigned_val ^ 0xFFFFFFFF) + 1", "bytestring = '' for i in range(0, 16, 1): lsbyte", "( h2 << 17 | h2 >> 15 ) &", "b, c ): return list(range( a, b, c)) def xencode(x):", "h1 = ( h1 * 5 + 0x52dce729 ) &", "key[ tail_index + 8 ] if tail_size > 8: k3", "key[ 2 * block_start + 9 ] << 8 |", "hash. ''' key = bytearray( xencode(key) ) def fmix( h", "+ 3 ] << 24 | \\ key[ block_start +", "if tail_size >= 5: k1 ^= key[ tail_index + 4", "0xFFFFFFFFFFFFFFFF return ( h2 << 64 | h1 ) def", "+ 9 ] << 8 | \\ key[ 2 *", "xencode(key) ) if x64arch: return hash128_x64( key, seed ) else:", "c2 * k1 ) & 0xFFFFFFFFFFFFFFFF h1 ^= k1 h1", "( h * 0xc2b2ae35 ) & 0xFFFFFFFF h ^= h", "+ h2 ) & 0xFFFFFFFFFFFFFFFF h1 = ( h1 *", "4 ] if tail_size > 4: k2 = ( k2", "c2 ) & 0xFFFFFFFF h1 ^= k1 #finalization h1 ^=", "k3 = ( k3 * c4 ) & 0xFFFFFFFF h3", "and install modules, and you only want a drop-in murmur3", "\\ key[ 2 * block_start + 8 ] k1 =", "\\ key[ 2 * block_start + 1 ] << 8", "''' Implements 128bit murmur3 hash. Returns a byte string. '''", "48 | \\ key[ 2 * block_start + 5 ]", "0xFFFFFFFF k3 = ( k3 << 17 | k3 >>", "8 ] if tail_size > 8: k2 = ( k2", "h1 ) key = bytearray( xencode(key) ) if x64arch: return", ") & 0xFFFFFFFF k2 = ( k2 << 16 |", "( k4 * c4 ) & 0xFFFFFFFF k4 = (", "0xFFFFFFFFFFFFFFFF) + 1 ) unsigned_val2 = ( hash_128 >> 64", ") h1 = seed h2 = seed c1 = 0x87c37b91114253d5", "k1 = 0 k2 = 0 k3 = 0 k4", "+ 13 ] << 8 if tail_size >= 13: k4", "] << 48 if tail_size >= 6: k1 ^= key[", "c1 * k1 ) & 0xFFFFFFFFFFFFFFFF k1 = ( k1", "^= k1 #finalization h1 ^= length h2 ^= length h1", "+ 10 ] << 16 | \\ key[ block_start +", "hash. ''' def hash128_x64( key, seed ): ''' Implements 128bit", "( c4 * k4 ) & 0xFFFFFFFF k4 = (", "c2 = 0x1b873593 # body for block_start in range( 0,", "3 if tail_size >= 3: k1 ^= key[ tail_index +", "( hash_128 >> 64 ) & 0xFFFFFFFFFFFFFFFF if unsigned_val2 &", "= 0x%08X\\n' % ( str_to_hash, hash( str_to_hash ) ) )", ") & 0xFFFFFFFF h3 = ( h3 * 5 +", "(_sys.version_info > (3, 0)): def xrange( a, b, c ):", "= parser.parse_args() for str_to_hash in opts.strings: sys.stdout.write( '\"%s\" = 0x%08X\\n'", "for block_start in range( 0, nblocks * 4, 4 ):", "0xFFFFFFFF k1 = ( k1 << 15 | k1 >>", "key[ tail_index + 1 ] << 8 if tail_size >=", "tail_size > 0: k1 = ( k1 * c1 )", "(unsigned_val ^ 0xFFFFFFFF) + 1 ) def hash128( key, seed", "^= key[ tail_index + 10 ] << 16 if tail_size", "+ h4 ) & 0xFFFFFFFF h4 = ( h4 *", "+ 8 ] k1 = ( c1 * k1 )", "+ 11 ] << 24 if tail_size >= 11: k3", "& 0xFFFFFFFFFFFFFFFF if unsigned_val2 & 0x8000000000000000 == 0: signed_val2 =", ">= 15: k4 ^= key[ tail_index + 14 ] <<", "2: k1 ^= key[ tail_index + 1 ] << 8", "k3 ) & 0xFFFFFFFF h3 ^= k3 h3 = (", "this source code. pure python implementation of the murmur3 hash", "length / 16 ) h1 = seed h2 = seed", "+ 9 ] << 8 if tail_size >= 9: k2", "5 + 0x561ccd1b ) & 0xFFFFFFFF k2 = ( c2", "+ 9 ] << 8 if tail_size >= 9: k3", "implementation. As this is purely python it is FAR from", "int( length / 16 ) h1 = seed h2 =", "] << 24 | \\ key[ block_start + 10 ]", "tail_size >= 14: k4 ^= key[ tail_index + 13 ]", "] << 16 if tail_size >= 6: k2 ^= key[", "): ''' Implements 128bit murmur3 hash for x86. ''' def", "+ 6 ] << 16 if tail_size >= 6: k2", "<< 31 | h2 >> 33 ) & 0xFFFFFFFFFFFFFFFF #", "_sys if (_sys.version_info > (3, 0)): def xrange( a, b,", "+ 8 ] if tail_size > 8: k3 = (", "xencode(key) ) def fmix( h ): h ^= h >>", ") & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k2 = ( c1", "tail_size >= 8: k1 ^= key[ tail_index + 7 ]", ">= 8: k1 ^= key[ tail_index + 7 ] <<", "k4 ) & 0xFFFFFFFF h4 ^= k4 h4 = (", "0xFFFFFFFFFFFFFFFF h1 ^= k1 #finalization h1 ^= length h2 ^=", "# inlined ROTL64 k1 = ( c2 * k1 )", "k2 if tail_size >= 8: k1 ^= key[ tail_index +", "# inlined ROTL32 k2 = ( k2 * c3 )", "& 0xFFFFFFFF # inlined ROTL32 h4 = ( h1 +", "length h2 ^= length h3 ^= length h4 ^= length", "nblocks * 16, 16 ): k1 = key[ block_start +", "python implementation of the murmur3 hash algorithm https://code.google.com/p/smhasher/wiki/MurmurHash3 This was", "k1 h1 = ( h1 << 19 | h1 >>", ">> 14 ) & 0xFFFFFFFF # inlined ROTL32 k4 =", "0: k1 = ( k1 * c1 ) & 0xFFFFFFFFFFFFFFFF", "c1 * k4 ) & 0xFFFFFFFF h4 ^= k4 h4", "^= key[ tail_index + 4 ] << 32 if tail_size", "24 if tail_size >= 11: k3 ^= key[ tail_index +", "& 0xFFFFFFFF h3 ^= k3 if tail_size >= 8: k2", "if tail_size > 4: k2 = ( k2 * c2", "<< 24 if tail_size >= 11: k3 ^= key[ tail_index", "= len( key ) nblocks = int( length / 4", ">> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k2 =", "signed_val2 ) ) def hash_bytes( key, seed = 0x0, x64arch", "\\ key[ block_start + 10 ] << 16 | \\", "h4 ^= length h1 = ( h1 + h2 )", "= hash_128 & 0xFF bytestring = bytestring + str( chr(", "h2 = seed c1 = 0x87c37b91114253d5 c2 = 0x4cf5ad432745937f #body", "key, seed = 0x0, x64arch = True ): ''' Implements", "+ 10 ] << 16 if tail_size >= 10: k3", "= ( k * 0xc4ceb9fe1a85ec53 ) & 0xFFFFFFFFFFFFFFFF k ^=", "] k2 = key[ 2 * block_start + 15 ]", "needed a proper c-module is suggested! This module is written", "key[ block_start + 8 ] k4 = key[ block_start +", "True ): ''' Implements 128bit murmur3 hash. ''' def hash128_x64(", "k2 * c3 ) & 0xFFFFFFFF h2 ^= k2 if", "27 | h1 >> 37 ) & 0xFFFFFFFFFFFFFFFF # inlined", "| \\ key[ 2 * block_start + 14 ] <<", "0xFFFFFFFF # inlined ROTL32 k1 = ( c2 * k1", "8 return bytestring if __name__ == \"__main__\": import argparse parser", "hash( key, seed = 0x0 ): ''' Implements 32bit murmur3", "& 0xFFFFFFFF h1 ^= k1 h1 = ( h1 <<", "seed ) def hash64( key, seed = 0x0, x64arch =", "( k * 0xc4ceb9fe1a85ec53 ) & 0xFFFFFFFFFFFFFFFF k ^= k", "^ 0xFFFFFFFFFFFFFFFF) + 1 ) unsigned_val2 = ( hash_128 >>", "] << 16 | \\ key[ 2 * block_start +", "tail_size >= 11: k2 ^= key[ tail_index + 10 ]", "= -( (unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) + 1 ) unsigned_val2 =", "| \\ key[ 2 * block_start + 6 ] <<", "This was written for the times when you do not", "opts.strings: sys.stdout.write( '\"%s\" = 0x%08X\\n' % ( str_to_hash, hash( str_to_hash", "= 0xab0e9789 c3 = 0x38b34ae5 c4 = 0xa1e38b93 #body for", "9: k3 ^= key[ tail_index + 8 ] if tail_size", "= 0x1b873593 # body for block_start in range( 0, nblocks", ") & 0xFFFFFFFF # inlined ROTL32 k2 = ( c3", "h4 = seed c1 = 0x239b961b c2 = 0xab0e9789 c3", "\"__main__\": import argparse parser = argparse.ArgumentParser( 'pymurmur3', 'pymurmur [options] \"string", "k3 h3 = ( h3 << 15 | h3 >>", "] << 8 if tail_size >= 9: k2 ^= key[", "h1 ) def hash128_x86( key, seed ): ''' Implements 128bit", "+ 5 ] << 8 | \\ key[ block_start +", "0xFFFFFFFF h1 ^= k1 #finalization unsigned_val = fmix( h1 ^", "#tail tail_index = nblocks * 16 k1 = 0 k2", "x64arch ) bytestring = '' for i in range(0, 16,", "block_start + 5 ] << 8 | \\ key[ block_start", "isinstance(x, bytes) or isinstance(x, bytearray): return x else: return x.encode()", "= ( c4 * k4 ) & 0xFFFFFFFF k4 =", "c)) def xencode(x): if isinstance(x, bytes) or isinstance(x, bytearray): return", "tail_index + 12 ] << 32 if tail_size >= 12:", "c ): return list(range( a, b, c)) def xencode(x): if", "h1 = fmix( h1 ) h2 = fmix( h2 )", "h2 ^= length h1 = ( h1 + h2 )", ") & 0xFFFFFFFF h2 ^= k2 if tail_size >= 4:", "] << 24 if tail_size >= 11: k2 ^= key[", "c4 = 0xa1e38b93 #body for block_start in range( 0, nblocks", "( k4 * c1 ) & 0xFFFFFFFF h4 ^= k4", "( c2 * k1 ) & 0xFFFFFFFFFFFFFFFF h1 ^= k1", "<< 16 if tail_size >= 14: k4 ^= key[ tail_index", ">= 10: k2 ^= key[ tail_index + 9 ] <<", "k1 #finalization h1 ^= length h2 ^= length h3 ^=", "= key[ 2 * block_start + 15 ] << 56", "k1 = ( k1 * c2 ) & 0xFFFFFFFFFFFFFFFF h1", "( h3 + h4 ) & 0xFFFFFFFF h3 = (", "tail_size > 8: k2 = ( k2 * c2 )", "def hash128_x64( key, seed ): ''' Implements 128bit murmur3 hash", "= ( h1 + h4 ) & 0xFFFFFFFF h2 =", "k1 ) & 0xFFFFFFFF h1 ^= k1 h1 = (", "( h1 + h4 ) & 0xFFFFFFFF h2 = (", "& 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k2 = ( k2 *", "\\ key[ 2 * block_start + 10 ] << 16", "xrange( a, b, c ): return list(range( a, b, c))", "list(range( a, b, c)) def xencode(x): if isinstance(x, bytes) or", "# ??? big endian? k1 = key[ block_start + 3", "seed ): ''' Implements 128bit murmur3 hash for x86. '''", "+ 1 ) return ( int( signed_val1 ), int( signed_val2", "k1 = ( k1 * c2 ) & 0xFFFFFFFF h1", ") & 0xFFFFFFFF h ^= h >> 16 return h", "* 8, 8 ): # ??? big endian? k1 =", "= seed h4 = seed c1 = 0x239b961b c2 =", "<< 48 if tail_size >= 6: k1 ^= key[ tail_index", "| h2 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64", "<< 24 | \\ key[ block_start + 10 ] <<", "# inlined ROTL64 k2 = ( c1 * k2 )", "h3 >> 17 ) & 0xFFFFFFFF # inlined ROTL32 h3", "suggested! This module is written to have the same format", "length ) if unsigned_val & 0x80000000 == 0: return unsigned_val", "0x32ac3b17 ) & 0xFFFFFFFF #tail tail_index = nblocks * 16", "\\ key[ block_start + 1 ] << 8 | \\", "16 k1 = 0 k2 = 0 k3 = 0", "8 if tail_size >= 13: k4 ^= key[ tail_index +", "0xFFFFFFFF k2 = ( c2 * k2 ) & 0xFFFFFFFF", "key, seed, x64arch ) unsigned_val1 = hash_128 & 0xFFFFFFFFFFFFFFFF if", "<< 16 if tail_size >= 6: k2 ^= key[ tail_index", "0xFFFFFFFF h1 ^= k1 h1 = ( h1 << 13", ") h4 = fmix( h4 ) h1 = ( h1", "x64arch: return hash128_x64( key, seed ) else: return hash128_x86( key,", "* 5 + 0xe6546b64 ) & 0xFFFFFFFF # tail tail_index", ") & 0xFFFFFFFF h3 = ( h1 + h3 )", "* block_start + 3 ] << 24 | \\ key[", "| \\ key[ block_start + 0 ] k1 = (", "murmur3 hash for x64. ''' def fmix( k ): k", "\"string to hash\"' ) parser.add_argument( '--seed', type = int, default", "^= key[ tail_index + 3 ] << 24 if tail_size", "h1 ^ length ) if unsigned_val & 0x80000000 == 0:", "3 ] << 24 | \\ key[ 2 * block_start", "key[ 2 * block_start + 11 ] << 24 |", "8: k1 ^= key[ tail_index + 7 ] << 56", "12 ] k1 = ( c1 * k1 ) &", "from performant and if performance is anything that is needed", "16 | k2 >> 16 ) & 0xFFFFFFFF # inlined", "inlined ROTL32 h1 = ( h1 + h2 ) &", "0xFFFFFFFF k2 = ( k2 << 16 | k2 >>", ">> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 h2 =", "| \\ key[ block_start + 4 ] k3 = key[", ">> 17 ) & 0xFFFFFFFF # inlined ROTL32 k1 =", "h1 * 5 + 0xe6546b64 ) & 0xFFFFFFFF # tail", "h * 0x85ebca6b ) & 0xFFFFFFFF h ^= h >>", "0xFFFFFFFFFFFFFFFF) + 1 ) return ( int( signed_val1 ), int(", "+ h3 ) & 0xFFFFFFFF h2 = ( h2 *", "^= key[ tail_index + 4 ] if tail_size > 4:", "# inlined ROTL32 h2 = ( h2 + h3 )", "hash128( key, seed = 0x0, x64arch = True ): '''", "> 8: k3 = ( k3 * c3 ) &", "h2 + h3 ) & 0xFFFFFFFF h2 = ( h2", "else: signed_val1 = -( (unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) + 1 )", "* 5 + 0x561ccd1b ) & 0xFFFFFFFF k2 = (", "def xencode(x): if isinstance(x, bytes) or isinstance(x, bytearray): return x", "key[ 2 * block_start + 1 ] << 8 |", "14 ] << 48 | \\ key[ 2 * block_start", ") & 0xFFFFFFFFFFFFFFFF #tail tail_index = nblocks * 16 k1", "hash for x64. ''' def fmix( k ): k ^=", "<< 17 | k3 >> 15 ) & 0xFFFFFFFF #", "0xFFFFFFFFFFFFFFFF k ^= k >> 33 k = ( k", "0xFFFFFFFFFFFFFFFF if unsigned_val2 & 0x8000000000000000 == 0: signed_val2 = unsigned_val2", "0 ] if tail_size > 0: k1 = ( k1", "\\ key[ 2 * block_start + 12 ] << 32", "h1 + h4 ) & 0xFFFFFFFF h4 = ( h4", ") & 0xFFFFFFFF k1 = ( k1 << 15 |", "if tail_size >= 8: k1 ^= key[ tail_index + 7", "range( 0, nblocks * 16, 16 ): k1 = key[", "h2 ^= k2 h2 = ( h2 << 31 |", "bytestring + str( chr( lsbyte ) ) hash_128 = hash_128", "x64arch = True ): ''' Implements 128bit murmur3 hash. Returns", "2 * block_start + 1 ] << 8 | \\", "tail_index + 5 ] << 8 if tail_size >= 5:", "& 0xFFFFFFFF k3 = ( k3 << 17 | k3", ") & 0xFFFFFFFF h1 ^= k1 #finalization h1 ^= length", "<< 8 | \\ key[ block_start + 0 ] k1", "<< 16 | \\ key[ block_start + 1 ] <<", "12 ] << 32 | \\ key[ 2 * block_start", ") & 0xFFFFFFFFFFFFFFFF k ^= k >> 33 return k", "seed h3 = seed h4 = seed c1 = 0x239b961b", "15 | k1 >> 17 ) & 0xFFFFFFFF # inlined", "block_start + 7 ] << 24 | \\ key[ block_start", "24 if tail_size >= 3: k1 ^= key[ tail_index +", "isinstance(x, bytearray): return x else: return x.encode() else: def xencode(x):", ">> 15 ) & 0xFFFFFFFF # inlined ROTL32 h2 =", "if tail_size >= 10: k2 ^= key[ tail_index + 9", "it is FAR from performant and if performance is anything", "( k1 << 15 | k1 >> 17 ) &", "h1 = ( h1 + h2 ) & 0xFFFFFFFF h1", "inlined ROTL64 k2 = ( c1 * k2 ) &", "0xFFFFFFFFFFFFFFFF k ^= k >> 33 return k length =", ") & 0xFFFFFFFF k2 = ( c2 * k2 )", "was written by <NAME> and enhanced by <NAME>, and is", "hash128_x86( key, seed ) def hash64( key, seed = 0x0,", "= argparse.ArgumentParser( 'pymurmur3', 'pymurmur [options] \"string to hash\"' ) parser.add_argument(", "^= k1 h1 = ( h1 << 19 | h1", ") & 0xFFFFFFFF h4 ^= k4 h4 = ( h4", "& 0xFFFFFFFF h1 ^= k1 #finalization unsigned_val = fmix( h1", "= True ): ''' Implements 128bit murmur3 hash. Returns a", "): ''' Implements 32bit murmur3 hash. ''' key = bytearray(", "inlined ROTL64 k2 = ( k2 * c1 ) &", "14 ] << 48 if tail_size >= 14: k2 ^=", "* block_start + 15 ] << 56 | \\ key[", "h3 = ( h1 + h3 ) & 0xFFFFFFFF h4", "( h1 * 5 + 0x561ccd1b ) & 0xFFFFFFFF k2", "= ( k1 * c2 ) & 0xFFFFFFFFFFFFFFFF h1 ^=", "block_start + 14 ] << 16 | \\ key[ block_start", "10 ] << 16 if tail_size >= 10: k2 ^=", ") def hash128( key, seed = 0x0, x64arch = True", "12 ] if tail_size > 12: k4 = ( k4", "( h2 + h3 ) & 0xFFFFFFFF h2 = (", "33 k = ( k * 0xc4ceb9fe1a85ec53 ) & 0xFFFFFFFFFFFFFFFF", "* block_start + 4 ] << 32 | \\ key[", "return x del _sys def hash( key, seed = 0x0", "inlined ROTL32 k3 = ( c4 * k3 ) &", "( h1 + h4 ) & 0xFFFFFFFF return ( h4", "+ 1 ] << 8 | \\ key[ 2 *", "was written for the times when you do not want", "k2 = ( c2 * k2 ) & 0xFFFFFFFF k2", "block_start + 13 ] << 40 | \\ key[ 2", "& 0xFFFFFFFF #tail tail_index = nblocks * 16 k1 =", "h4 ) h1 = ( h1 + h2 ) &", ">= 11: k3 ^= key[ tail_index + 10 ] <<", "def fmix( k ): k ^= k >> 33 k", "& 0xFFFFFFFF k4 = ( k4 << 18 | k4", "if tail_size >= 15: k4 ^= key[ tail_index + 14", "length & 15 if tail_size >= 15: k2 ^= key[", "def xrange( a, b, c ): return list(range( a, b,", ">> 13 ) & 0xFFFFFFFF # inlined ROTL32 h1 =", "key, seed ): ''' Implements 128bit murmur3 hash for x64.", "| \\ key[ block_start + 12 ] k1 = (", "] << 24 | \\ key[ 2 * block_start +", "block_start + 2 ] << 16 | \\ key[ 2", "= fmix( h1 ) h2 = fmix( h2 ) h1", "& 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k2 = ( c1 *", "# inlined ROTL64 k2 = ( k2 * c1 )", "signed_val1 = unsigned_val1 else: signed_val1 = -( (unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF)", "+ 3 ] << 24 if tail_size >= 3: k1", "block_start + 5 ] << 40 | \\ key[ 2", "parser.add_argument( '--seed', type = int, default = 0 ) parser.add_argument(", "signed_val1 = -( (unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) + 1 ) unsigned_val2", "h >> 16 return h length = len( key )", "* block_start + 6 ] << 48 | \\ key[", "endian? k1 = key[ 2 * block_start + 7 ]", "hash128_x64( key, seed ) else: return hash128_x86( key, seed )", "^= k2 if tail_size >= 8: k1 ^= key[ tail_index", "h4 ) & 0xFFFFFFFF h3 = ( h3 * 5", "tail_size >= 1: k1 ^= key[ tail_index + 0 ]", "= 0 k4 = 0 tail_size = length & 15", "tail_index + 9 ] << 8 if tail_size >= 9:", "unsigned_val = fmix( h1 ^ length ) if unsigned_val &", "& 0xFFFFFFFF # inlined ROTL32 k1 = ( c2 *", "int( length / 4 ) h1 = seed c1 =", "key[ block_start + 14 ] << 16 | \\ key[", "* k1 ) & 0xFFFFFFFF k1 = ( k1 <<", "= ( k3 * c4 ) & 0xFFFFFFFF h3 ^=", "tail_size > 12: k4 = ( k4 * c4 )", "= ( h3 * 5 + 0x96cd1c35 ) & 0xFFFFFFFF", "<< 8 if tail_size >= 9: k2 ^= key[ tail_index", "* 0x85ebca6b ) & 0xFFFFFFFF h ^= h >> 13", "+ 14 ] << 16 | \\ key[ block_start +", "h1 = ( h1 * 5 + 0xe6546b64 ) &", "ROTL64 k1 = ( c2 * k1 ) & 0xFFFFFFFFFFFFFFFF", "and enhanced by <NAME>, and is placed in the public", "k1 ) & 0xFFFFFFFFFFFFFFFF h1 ^= k1 h1 = (", "= ( h2 * 5 + 0x38495ab5 ) & 0xFFFFFFFFFFFFFFFF", "] << 40 if tail_size >= 13: k2 ^= key[", ") & 0xFFFFFFFF h4 = ( h1 + h4 )", "k4 ^= key[ tail_index + 14 ] << 16 if", "+ 5 ] << 40 if tail_size >= 5: k1", ") & 0xFFFFFFFF k4 = ( k4 << 18 |", "= ( k2 * c1 ) & 0xFFFFFFFFFFFFFFFF h2 ^=", "h1 ) h2 = fmix( h2 ) h1 = (", "\\ key[ block_start + 14 ] << 16 | \\", ") & 0xFFFFFFFF # inlined ROTL32 k1 = ( c2", "c2 ) & 0xFFFFFFFFFFFFFFFF h1 ^= k1 #finalization h1 ^=", "'--seed', type = int, default = 0 ) parser.add_argument( 'strings',", "key[ tail_index + 10 ] << 16 if tail_size >=", "python package found here for simple conversions: https://pypi.python.org/pypi/mmh3/2.3.1 ''' import", "( h1 * 5 + 0x52dce729 ) & 0xFFFFFFFFFFFFFFFF k2", "0xe6546b64 ) & 0xFFFFFFFF # tail tail_index = nblocks *", ") & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 h2 = ( h1", "return list(range( a, b, c)) def xencode(x): if isinstance(x, bytes)", "33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k1 = (", "6 ] << 16 | \\ key[ block_start + 5", "h1 ^= k1 #finalization h1 ^= length h2 ^= length", "= ( h4 * 5 + 0x32ac3b17 ) & 0xFFFFFFFF", "<< 31 | k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF #", "12: k2 ^= key[ tail_index + 11 ] << 24", "hash algorithm https://code.google.com/p/smhasher/wiki/MurmurHash3 This was written for the times when", "else: signed_val2 = -( (unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) + 1 )", "h3 = ( h3 * 5 + 0x96cd1c35 ) &", "& 0xFFFFFFFF # inlined ROTL32 k2 = ( c3 *", "24 | \\ key[ 2 * block_start + 2 ]", "0 ] k2 = key[ 2 * block_start + 15", "& 15 if tail_size >= 15: k4 ^= key[ tail_index", "& 0xFFFFFFFF # inlined ROTL32 k4 = ( c1 *", "* 4 k1 = 0 tail_size = length & 3", "] << 16 if tail_size >= 14: k4 ^= key[", "16 | \\ key[ 2 * block_start + 1 ]", "k1 = 0 tail_size = length & 3 if tail_size", "key, seed ) else: return hash128_x86( key, seed ) def", "= seed h2 = seed h3 = seed h4 =", "as _sys if (_sys.version_info > (3, 0)): def xrange( a,", "c1 ) & 0xFFFFFFFF h4 ^= k4 if tail_size >=", "ROTL32 h4 = ( h1 + h4 ) & 0xFFFFFFFF", "h ): h ^= h >> 16 h = (", "h4 ^= k4 if tail_size >= 12: k3 ^= key[", "33 | k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined", "hash\"' ) parser.add_argument( '--seed', type = int, default = 0", "tail_size >= 8: k2 ^= key[ tail_index + 7 ]", "return -( (unsigned_val ^ 0xFFFFFFFF) + 1 ) def hash128(", "8 ): # ??? big endian? k1 = key[ 2", "k3 >> 15 ) & 0xFFFFFFFF # inlined ROTL32 k3", "tuple. ''' hash_128 = hash128( key, seed, x64arch ) unsigned_val1", "0xFFFFFFFF h3 = ( h1 + h3 ) & 0xFFFFFFFF", "unsigned_val1 & 0x8000000000000000 == 0: signed_val1 = unsigned_val1 else: signed_val1", "5 + 0x0bcaa747 ) & 0xFFFFFFFF k3 = ( c3", "tail_index + 2 ] << 16 if tail_size >= 2:", "0xFFFFFFFF h1 = ( h1 + h4 ) & 0xFFFFFFFF", "* c2 ) & 0xFFFFFFFFFFFFFFFF h1 ^= k1 #finalization h1", "fmix( h3 ) h4 = fmix( h4 ) h1 =", "key[ block_start + 3 ] << 24 | \\ key[", "written by <NAME> and enhanced by <NAME>, and is placed", "c2 * k2 ) & 0xFFFFFFFF k2 = ( k2", ") & 0xFFFFFFFF k4 = ( c4 * k4 )", "k1 = ( c2 * k1 ) & 0xFFFFFFFFFFFFFFFF h1", "0xFFFFFFFFFFFFFFFF h2 = ( h2 * 5 + 0x38495ab5 )", "17 | k3 >> 15 ) & 0xFFFFFFFF # inlined", "ROTL32 k3 = ( c4 * k3 ) & 0xFFFFFFFF", "nblocks = int( length / 16 ) h1 = seed", "* 5 + 0x0bcaa747 ) & 0xFFFFFFFF k3 = (", "): ''' Implements 128bit murmur3 hash. ''' def hash128_x64( key,", "] << 16 | \\ key[ block_start + 13 ]", "2 * block_start + 15 ] << 56 | \\", "0: return unsigned_val else: return -( (unsigned_val ^ 0xFFFFFFFF) +", "ROTL32 k2 = ( c3 * k2 ) & 0xFFFFFFFF", ") nblocks = int( length / 16 ) h1 =", "h1 = ( h1 << 19 | h1 >> 13", "^= key[ tail_index + 14 ] << 16 if tail_size", "k4 ^= key[ tail_index + 12 ] if tail_size >", "= fmix( h1 ) h2 = fmix( h2 ) h3", "the same format as mmh3 python package found here for", "k2 = ( k2 * c3 ) & 0xFFFFFFFF h2", "= 0x0, x64arch = True ): ''' Implements 64bit murmur3", "key[ tail_index + 13 ] << 8 if tail_size >=", ">= 5: k1 ^= key[ tail_index + 4 ] <<", "k3 = ( c4 * k3 ) & 0xFFFFFFFF h3", "as mmh3 python package found here for simple conversions: https://pypi.python.org/pypi/mmh3/2.3.1", "key[ tail_index + 0 ] if tail_size > 0: k1", "0 ) parser.add_argument( 'strings', default = [], nargs='+') opts =", "= ( h1 + h4 ) & 0xFFFFFFFF h4 =", "= -( (unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) + 1 ) return (", "& 0xFFFFFFFF k4 = ( c4 * k4 ) &", "9 ] << 8 | \\ key[ 2 * block_start", ") parser.add_argument( '--seed', type = int, default = 0 )", "14: k2 ^= key[ tail_index + 13 ] << 40", "murmur3 hash algorithm https://code.google.com/p/smhasher/wiki/MurmurHash3 This was written for the times", "\\ key[ block_start + 9 ] << 8 | \\", "fmix( k ): k ^= k >> 33 k =", "] << 40 | \\ key[ 2 * block_start +", "h ^= h >> 16 h = ( h *", "4: k1 ^= key[ tail_index + 3 ] << 24", "h3 = fmix( h3 ) h4 = fmix( h4 )", "k4 = ( c4 * k4 ) & 0xFFFFFFFF k4", "& 0xFFFFFFFF h2 = ( h1 + h2 ) &", "k4 * c4 ) & 0xFFFFFFFF k4 = ( k4", "tail_size >= 3: k1 ^= key[ tail_index + 2 ]", "16 h = ( h * 0x85ebca6b ) & 0xFFFFFFFF", "+ 1 ] << 8 if tail_size >= 1: k1", "big endian? k1 = key[ 2 * block_start + 7", "13 ] << 8 if tail_size >= 13: k4 ^=", "] << 56 | \\ key[ 2 * block_start +", "k2 ) & 0xFFFFFFFFFFFFFFFF k2 = ( k2 << 33", "k3 = key[ block_start + 11 ] << 24 |", "+ 15 ] << 24 | \\ key[ block_start +", "): # ??? big endian? k1 = key[ block_start +", "24 if tail_size >= 11: k2 ^= key[ tail_index +", "7 ] << 24 if tail_size >= 7: k2 ^=", "* c4 ) & 0xFFFFFFFF h3 ^= k3 if tail_size", "+ 1 ] << 8 | \\ key[ block_start +", "= ( k2 << 16 | k2 >> 16 )", "& 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k1 = ( c2 *", "int( signed_val2 ) ) def hash_bytes( key, seed = 0x0,", "+ h2 ) & 0xFFFFFFFFFFFFFFFF h1 = fmix( h1 )", "+ 0x32ac3b17 ) & 0xFFFFFFFF #tail tail_index = nblocks *", "( c2 * k2 ) & 0xFFFFFFFF k2 = (", "& 0xFFFFFFFF return ( h4 << 96 | h3 <<", "h1 ^= length h2 ^= length h3 ^= length h4", "bytes) or isinstance(x, bytearray): return x else: return x.encode() else:", "17 | h2 >> 15 ) & 0xFFFFFFFF # inlined", ") h1 = seed c1 = 0xcc9e2d51 c2 = 0x1b873593", "if unsigned_val & 0x80000000 == 0: return unsigned_val else: return", "19 | h1 >> 13 ) & 0xFFFFFFFF # inlined", "40 | \\ key[ 2 * block_start + 12 ]", "0x561ccd1b ) & 0xFFFFFFFF k2 = ( c2 * k2", "128bit murmur3 hash for x64. ''' def fmix( k ):", "a, b, c ): return list(range( a, b, c)) def", ") & 0xFFFFFFFFFFFFFFFF k ^= k >> 33 k =", "= hash128( key, seed, x64arch ) bytestring = '' for", "k1 = key[ 2 * block_start + 7 ] <<", "10 ] << 16 | \\ key[ 2 * block_start", "64 ) & 0xFFFFFFFFFFFFFFFF if unsigned_val2 & 0x8000000000000000 == 0:", "seed ): ''' Implements 128bit murmur3 hash for x64. '''", "<< 8 | \\ key[ 2 * block_start + 8", "0xFFFFFFFF h1 = fmix( h1 ) h2 = fmix( h2", "<< 48 if tail_size >= 14: k2 ^= key[ tail_index", "& 0x8000000000000000 == 0: signed_val1 = unsigned_val1 else: signed_val1 =", "& 0xFFFFFFFF # inlined ROTL32 k3 = ( c4 *", "str( chr( lsbyte ) ) hash_128 = hash_128 >> 8", ">> 19 ) & 0xFFFFFFFF # inlined ROTL32 h4 =", "15 | h3 >> 17 ) & 0xFFFFFFFF # inlined", "= 0x0 ): ''' Implements 32bit murmur3 hash. ''' key", "+ 0x38495ab5 ) & 0xFFFFFFFFFFFFFFFF #tail tail_index = nblocks *", "= 0 k2 = 0 k3 = 0 k4 =", "15: k4 ^= key[ tail_index + 14 ] << 16", "= int, default = 0 ) parser.add_argument( 'strings', default =", "+ h4 ) & 0xFFFFFFFF h1 = fmix( h1 )", "ROTL32 h2 = ( h2 + h3 ) & 0xFFFFFFFF", "hash_128 >> 8 return bytestring if __name__ == \"__main__\": import", "ROTL32 k1 = ( k1 * c2 ) & 0xFFFFFFFF", "<< 8 if tail_size >= 9: k3 ^= key[ tail_index", "in the public domain. The authors hereby disclaim copyright to", "# inlined ROTL64 h2 = ( h1 + h2 )", "# inlined ROTL64 k1 = ( k1 * c2 )", "== 0: signed_val2 = unsigned_val2 else: signed_val2 = -( (unsigned_val2", "& 0xFFFFFFFFFFFFFFFF k ^= k >> 33 k = (", "+ h4 ) & 0xFFFFFFFF return ( h4 << 96", "h2 ^= k2 h2 = ( h2 << 17 |", "5 + 0x32ac3b17 ) & 0xFFFFFFFF #tail tail_index = nblocks", "& 0xFFFFFFFF h ^= h >> 16 return h length", "0xFFFFFFFFFFFFFFFF # inlined ROTL64 k1 = ( c2 * k1", "k1 ^= key[ tail_index + 7 ] << 56 if", ") else: return hash128_x86( key, seed ) def hash64( key,", "| h1 >> 19 ) & 0xFFFFFFFF # inlined ROTL32", "* c2 ) & 0xFFFFFFFF k2 = ( k2 <<", "* 0xc2b2ae35 ) & 0xFFFFFFFF h ^= h >> 16", "12: k3 ^= key[ tail_index + 11 ] << 24", "): k1 = key[ block_start + 3 ] << 24", "h1 ^= k1 #finalization unsigned_val = fmix( h1 ^ length", "ROTL64 h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF", "copyright to this source code. pure python implementation of the", "^= k4 if tail_size >= 12: k3 ^= key[ tail_index", "h1 + h2 ) & 0xFFFFFFFFFFFFFFFF h2 = ( h2", "* k4 ) & 0xFFFFFFFF k4 = ( k4 <<", "= ( k4 << 18 | k4 >> 14 )", "^= key[ tail_index + 12 ] << 32 if tail_size", "0xFFFFFFFF h2 ^= k2 h2 = ( h2 << 17", "h1 + h4 ) & 0xFFFFFFFF h1 = fmix( h1", "nargs='+') opts = parser.parse_args() for str_to_hash in opts.strings: sys.stdout.write( '\"%s\"", "h2 ) & 0xFFFFFFFFFFFFFFFF h1 = ( h1 * 5", "16 if tail_size >= 10: k2 ^= key[ tail_index +", "hash64( key, seed = 0x0, x64arch = True ): '''", "| \\ key[ 2 * block_start + 3 ] <<", "& 0xFFFFFFFFFFFFFFFF #tail tail_index = nblocks * 16 k1 =", "a drop-in murmur3 implementation. As this is purely python it", "= nblocks * 4 k1 = 0 tail_size = length", "str_to_hash in opts.strings: sys.stdout.write( '\"%s\" = 0x%08X\\n' % ( str_to_hash,", "= ( h * 0x85ebca6b ) & 0xFFFFFFFF h ^=", "( h * 0x85ebca6b ) & 0xFFFFFFFF h ^= h", "16 if tail_size >= 14: k4 ^= key[ tail_index +", "<< 48 | \\ key[ 2 * block_start + 5", "x64arch ) unsigned_val1 = hash_128 & 0xFFFFFFFFFFFFFFFF if unsigned_val1 &", "??? big endian? k1 = key[ 2 * block_start +", "* block_start + 5 ] << 40 | \\ key[", "5: k1 ^= key[ tail_index + 4 ] << 32", "10: k3 ^= key[ tail_index + 9 ] << 8", "| \\ key[ block_start + 14 ] << 16 |", "if __name__ == \"__main__\": import argparse parser = argparse.ArgumentParser( 'pymurmur3',", "8: k3 = ( k3 * c3 ) & 0xFFFFFFFF", "( k1 * c2 ) & 0xFFFFFFFFFFFFFFFF h1 ^= k1", "inlined ROTL32 k1 = ( c2 * k1 ) &", "for i in range(0, 16, 1): lsbyte = hash_128 &", "tail_index + 1 ] << 8 if tail_size >= 1:", "\\ key[ 2 * block_start + 9 ] << 8", "( c3 * k3 ) & 0xFFFFFFFF k3 = (", "or isinstance(x, bytearray): return x else: return x.encode() else: def", "block_start + 3 ] << 24 | \\ key[ block_start", "length & 15 if tail_size >= 15: k4 ^= key[", "''' def fmix( k ): k ^= k >> 33", "+ 12 ] << 32 | \\ key[ 2 *", "c2 ) & 0xFFFFFFFFFFFFFFFF k2 = ( k2 << 33", "tail_size >= 13: k2 ^= key[ tail_index + 12 ]", "return bytestring if __name__ == \"__main__\": import argparse parser =", "k1 = ( k1 << 15 | k1 >> 17", "0)): def xrange( a, b, c ): return list(range( a,", "^= k >> 33 k = ( k * 0xc4ceb9fe1a85ec53", "( c1 * k4 ) & 0xFFFFFFFF h4 ^= k4", "+ 14 ] << 16 if tail_size >= 14: k4", "[], nargs='+') opts = parser.parse_args() for str_to_hash in opts.strings: sys.stdout.write(", "k2 = ( k2 * c1 ) & 0xFFFFFFFFFFFFFFFF h2", "the murmur3 hash algorithm https://code.google.com/p/smhasher/wiki/MurmurHash3 This was written for the", "4 ): # ??? big endian? k1 = key[ block_start", "''' def hash128_x64( key, seed ): ''' Implements 128bit murmur3", "8 | \\ key[ 2 * block_start + 8 ]", "1 ] << 8 | \\ key[ 2 * block_start", "): ''' Implements 128bit murmur3 hash for x64. ''' def", "6: k2 ^= key[ tail_index + 5 ] << 8", "+ 7 ] << 56 | \\ key[ 2 *", "k2 = 0 tail_size = length & 15 if tail_size", "] << 16 | \\ key[ block_start + 1 ]", "c3 * k3 ) & 0xFFFFFFFF k3 = ( k3", "): ''' Implements 64bit murmur3 hash. Returns a tuple. '''", "9 ] << 8 | \\ key[ block_start + 8", "^ 0xFFFFFFFF) + 1 ) def hash128( key, seed =", "0 ] k2 = key[ block_start + 7 ] <<", "( h3 << 15 | h3 >> 17 ) &", "<< 32 | \\ key[ 2 * block_start + 3", "| h1 >> 37 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64", "13 h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF", "seed c1 = 0x87c37b91114253d5 c2 = 0x4cf5ad432745937f #body for block_start", "key[ block_start + 12 ] k1 = ( c1 *", "x64arch = True ): ''' Implements 64bit murmur3 hash. Returns", "+ 2 ] << 16 if tail_size >= 2: k1", "& 0xFFFFFFFF h1 = ( h1 * 5 + 0x561ccd1b", "<< 24 if tail_size >= 11: k2 ^= key[ tail_index", ") & 0xFFFFFFFFFFFFFFFF k2 = ( c2 * k2 )", "= seed c1 = 0xcc9e2d51 c2 = 0x1b873593 # body", "1 ) return ( int( signed_val1 ), int( signed_val2 )", "k3 = 0 k4 = 0 tail_size = length &", "length h2 ^= length h1 = ( h1 + h2", "tail_size = length & 15 if tail_size >= 15: k2", "i in range(0, 16, 1): lsbyte = hash_128 & 0xFF", "k2 * c2 ) & 0xFFFFFFFF k2 = ( k2", "] if tail_size > 0: k1 = ( k1 *", "for x64. ''' def fmix( k ): k ^= k", ") & 0xFFFFFFFF h1 = ( h1 + h4 )", "11 ] << 24 if tail_size >= 11: k2 ^=", "^= k4 h4 = ( h4 << 13 | h4", "<< 40 | \\ key[ 2 * block_start + 4", "-( (unsigned_val ^ 0xFFFFFFFF) + 1 ) def hash128( key,", "block_start + 10 ] << 16 | \\ key[ block_start", "( c1 * k1 ) & 0xFFFFFFFFFFFFFFFF k1 = (", "^= key[ tail_index + 13 ] << 8 if tail_size", "k2 = 0 k3 = 0 k4 = 0 tail_size", "<< 8 | \\ key[ block_start + 8 ] k4", "key[ 2 * block_start + 12 ] << 32 |", "+ 3 ] << 24 | \\ key[ 2 *", "( int( signed_val1 ), int( signed_val2 ) ) def hash_bytes(", "* 16 k1 = 0 k2 = 0 k3 =", "True ): ''' Implements 128bit murmur3 hash. Returns a byte", "body for block_start in range( 0, nblocks * 4, 4", "0xFFFFFFFF # inlined ROTL32 h1 = ( h1 + h2", "<< 19 | h1 >> 13 ) & 0xFFFFFFFF #", "key[ tail_index + 6 ] << 48 if tail_size >=", "+ 4 ] << 32 | \\ key[ 2 *", "written to have the same format as mmh3 python package", "( k2 * c2 ) & 0xFFFFFFFF k2 = (", "k1 = ( c1 * k1 ) & 0xFFFFFFFF k1", "simple conversions: https://pypi.python.org/pypi/mmh3/2.3.1 ''' import sys as _sys if (_sys.version_info", "k2 = ( c3 * k2 ) & 0xFFFFFFFF h2", "] << 8 | \\ key[ block_start + 4 ]", "h3 ^= length h4 ^= length h1 = ( h1", "16 | \\ key[ block_start + 13 ] << 8", ">= 12: k2 ^= key[ tail_index + 11 ] <<", "( h1 + h4 ) & 0xFFFFFFFF h4 = (", "h3 ^= k3 if tail_size >= 8: k2 ^= key[", "40 if tail_size >= 13: k2 ^= key[ tail_index +", "* block_start + 10 ] << 16 | \\ key[", "= ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF h2 =", "k4 = ( k4 << 18 | k4 >> 14", "+ 4 ] << 32 if tail_size >= 4: k1", "length & 3 if tail_size >= 3: k1 ^= key[", "hash. Returns a tuple. ''' hash_128 = hash128( key, seed,", "+ h2 ) & 0xFFFFFFFF h3 = ( h1 +", "# inlined ROTL32 k2 = ( c3 * k2 )", "+ 5 ] << 40 | \\ key[ 2 *", "0xFFFFFFFF h ^= h >> 13 h = ( h", "inlined ROTL64 k1 = ( c2 * k1 ) &", ">> 16 ) & 0xFFFFFFFF # inlined ROTL32 k2 =", "k4 = 0 tail_size = length & 15 if tail_size", ") & 0xFFFFFFFFFFFFFFFF h1 ^= k1 #finalization h1 ^= length", "+ 14 ] << 48 if tail_size >= 14: k2", "= fmix( h4 ) h1 = ( h1 + h2", "7 ] << 56 if tail_size >= 7: k1 ^=", "ROTL32 h1 = ( h1 + h2 ) & 0xFFFFFFFF", "11 ] << 24 | \\ key[ block_start + 10", "| \\ key[ block_start + 8 ] k4 = key[", "k1 ^= key[ tail_index + 3 ] << 24 if", "tail_size >= 4: k1 ^= key[ tail_index + 3 ]", "if tail_size >= 14: k4 ^= key[ tail_index + 13", "<< 56 if tail_size >= 7: k1 ^= key[ tail_index", "if tail_size >= 8: k2 ^= key[ tail_index + 7", "= ( h1 + h4 ) & 0xFFFFFFFF h1 =", "h1 ^= length h2 ^= length h1 = ( h1", ") & 0xFFFFFFFF h1 = ( h1 * 5 +", "& 0xFFFFFFFF k2 = ( k2 << 16 | k2", "k4 = key[ block_start + 15 ] << 24 |", "= ( h1 * 5 + 0xe6546b64 ) & 0xFFFFFFFF", "( h2 * 5 + 0x0bcaa747 ) & 0xFFFFFFFF k3", "19 ) & 0xFFFFFFFF # inlined ROTL32 h1 = (", "c1 ) & 0xFFFFFFFF k1 = ( k1 << 15", "* k4 ) & 0xFFFFFFFF h4 ^= k4 h4 =", "| h2 << 32 | h1 ) key = bytearray(", ">= 14: k2 ^= key[ tail_index + 13 ] <<", "= seed c1 = 0x87c37b91114253d5 c2 = 0x4cf5ad432745937f #body for", "* 5 + 0x38495ab5 ) & 0xFFFFFFFFFFFFFFFF #tail tail_index =", "= seed c1 = 0x239b961b c2 = 0xab0e9789 c3 =", "block_start + 14 ] << 48 | \\ key[ 2", "= ( h1 * 5 + 0x52dce729 ) & 0xFFFFFFFFFFFFFFFF", "= ( k1 << 31 | k1 >> 33 )", "h3 + h4 ) & 0xFFFFFFFF h3 = ( h3", "key[ 2 * block_start + 5 ] << 40 |", "0 k3 = 0 k4 = 0 tail_size = length", "int( signed_val1 ), int( signed_val2 ) ) def hash_bytes( key,", "k2 = ( k2 << 16 | k2 >> 16", "tail_index + 14 ] << 16 if tail_size >= 14:", "k = ( k * 0xc4ceb9fe1a85ec53 ) & 0xFFFFFFFFFFFFFFFF k", "key[ block_start + 4 ] k3 = key[ block_start +", "h2 * 5 + 0x0bcaa747 ) & 0xFFFFFFFF k3 =", "= seed h2 = seed c1 = 0x87c37b91114253d5 c2 =", "tail_size >= 12: k2 ^= key[ tail_index + 11 ]", "& 0xFFFFFFFF h1 = ( h1 + h4 ) &", "] << 56 if tail_size >= 7: k1 ^= key[", "^= k >> 33 k = ( k * 0xff51afd7ed558ccd", "15: k2 ^= key[ tail_index + 14 ] << 48", "| \\ key[ 2 * block_start + 5 ] <<", "\\ key[ block_start + 12 ] k1 = ( c1", "5 + 0x96cd1c35 ) & 0xFFFFFFFF k4 = ( c4", ") & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k1 = ( c2", "key = bytearray( xencode(key) ) if x64arch: return hash128_x64( key,", ") & 0xFFFFFFFF k3 = ( c3 * k3 )", "& 15 if tail_size >= 15: k2 ^= key[ tail_index", "h2 >> 15 ) & 0xFFFFFFFF # inlined ROTL32 h2", "+ h4 ) & 0xFFFFFFFF h3 = ( h3 *", ">> 8 return bytestring if __name__ == \"__main__\": import argparse", ">= 12: k3 ^= key[ tail_index + 11 ] <<", "0xFFFFFFFFFFFFFFFF k2 = ( c2 * k2 ) & 0xFFFFFFFFFFFFFFFF", ") nblocks = int( length / 4 ) h1 =", "h = ( h * 0x85ebca6b ) & 0xFFFFFFFF h", "13 ] << 8 | \\ key[ block_start + 12", "key[ block_start + 5 ] << 8 | \\ key[", "16 ) & 0xFFFFFFFF # inlined ROTL32 k2 = (", "^= key[ tail_index + 11 ] << 24 if tail_size", "): h ^= h >> 16 h = ( h", "<< 27 | h1 >> 37 ) & 0xFFFFFFFFFFFFFFFF #", "hash_128 = hash128( key, seed, x64arch ) unsigned_val1 = hash_128", "2 * block_start + 13 ] << 40 | \\", "1 ] << 8 | \\ key[ block_start + 0", "= 0xa1e38b93 #body for block_start in range( 0, nblocks *", "* c2 ) & 0xFFFFFFFFFFFFFFFF k2 = ( k2 <<", "True ): ''' Implements 64bit murmur3 hash. Returns a tuple.", "murmur3 hash. ''' def hash128_x64( key, seed ): ''' Implements", "( h2 << 64 | h1 ) def hash128_x86( key,", "x86. ''' def fmix( h ): h ^= h >>", "* block_start + 7 ] << 56 | \\ key[", "k1 ) & 0xFFFFFFFFFFFFFFFF k1 = ( k1 << 31", "0xFFFFFFFF h3 ^= k3 if tail_size >= 8: k2 ^=", "24 | \\ key[ block_start + 6 ] << 16", ">= 1: k1 ^= key[ tail_index + 0 ] if", "\\ key[ block_start + 8 ] k4 = key[ block_start", "install modules, and you only want a drop-in murmur3 implementation.", ">> 17 ) & 0xFFFFFFFF # inlined ROTL32 h3 =", "+ 7 ] << 24 | \\ key[ block_start +", "h1 >> 13 ) & 0xFFFFFFFF # inlined ROTL32 h1", "want to compile c-code and install modules, and you only", "by <NAME> and enhanced by <NAME>, and is placed in", "+ 13 ] << 40 if tail_size >= 13: k2", "h4 ) & 0xFFFFFFFF h2 = ( h1 + h2", "0xFFFFFFFF h3 = ( h3 * 5 + 0x96cd1c35 )", "( h1 + h3 ) & 0xFFFFFFFF h4 = (", "& 0xFFFFFFFF h3 = ( h3 * 5 + 0x96cd1c35", "(3, 0)): def xrange( a, b, c ): return list(range(", "\\ key[ block_start + 0 ] k2 = key[ block_start", "= ( h4 << 13 | h4 >> 19 )", "| h4 >> 19 ) & 0xFFFFFFFF # inlined ROTL32", "key[ tail_index + 7 ] << 24 if tail_size >=", "0xFFFFFFFF # inlined ROTL32 h4 = ( h1 + h4", "+ 14 ] << 48 | \\ key[ 2 *", "* k2 ) & 0xFFFFFFFF k2 = ( k2 <<", "seed c1 = 0x239b961b c2 = 0xab0e9789 c3 = 0x38b34ae5", "hash_128 & 0xFFFFFFFFFFFFFFFF if unsigned_val1 & 0x8000000000000000 == 0: signed_val1", "13: k4 ^= key[ tail_index + 12 ] if tail_size", "''' key = bytearray( xencode(key) ) def fmix( h ):", "block_start + 15 ] << 56 | \\ key[ 2", "k2 ) & 0xFFFFFFFF k2 = ( k2 << 16", "h1 + h2 ) & 0xFFFFFFFFFFFFFFFF h1 = fmix( h1", "= ( k3 << 17 | k3 >> 15 )", "31 | h2 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined", "xencode(x): if isinstance(x, bytes) or isinstance(x, bytearray): return x else:", "1: k1 ^= key[ tail_index + 0 ] if tail_size", "else: return -( (unsigned_val ^ 0xFFFFFFFF) + 1 ) def", "h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF h1", "+ 1 ) def hash128( key, seed = 0x0, x64arch", "* c1 ) & 0xFFFFFFFF k1 = ( k1 <<", ") & 0xFFFFFFFFFFFFFFFF h1 = fmix( h1 ) h2 =", "k2 ^= key[ tail_index + 12 ] << 32 if", "# inlined ROTL32 k3 = ( c4 * k3 )", "else: return hash128_x86( key, seed ) def hash64( key, seed", "block_start + 2 ] << 16 | \\ key[ block_start", "tail_size >= 9: k3 ^= key[ tail_index + 8 ]", "= 0x4cf5ad432745937f #body for block_start in range( 0, nblocks *", "== \"__main__\": import argparse parser = argparse.ArgumentParser( 'pymurmur3', 'pymurmur [options]", "tail_size > 4: k2 = ( k2 * c2 )", "h1 + h2 ) & 0xFFFFFFFF h1 = ( h1", "h3 ) h4 = fmix( h4 ) h1 = (", "nblocks * 16 k1 = 0 k2 = 0 k3", "# inlined ROTL32 k4 = ( c1 * k4 )", "& 0xFFFFFFFFFFFFFFFF k ^= k >> 33 return k length", "8 | \\ key[ 2 * block_start + 0 ]", "tail_index + 7 ] << 56 if tail_size >= 7:", "inlined ROTL32 h3 = ( h3 + h4 ) &", "c1 = 0xcc9e2d51 c2 = 0x1b873593 # body for block_start", "h2 ) h3 = fmix( h3 ) h4 = fmix(", "): # ??? big endian? k1 = key[ 2 *", "inlined ROTL32 k2 = ( k2 * c3 ) &", "key[ block_start + 11 ] << 24 | \\ key[", "] << 8 | \\ key[ block_start + 12 ]", "block_start + 6 ] << 48 | \\ key[ 2", "python it is FAR from performant and if performance is", "( k2 << 16 | k2 >> 16 ) &", "h1 + h3 ) & 0xFFFFFFFF h4 = ( h1", "8: k2 = ( k2 * c2 ) & 0xFFFFFFFFFFFFFFFF", "] << 40 if tail_size >= 5: k1 ^= key[", "= bytearray( xencode(key) ) if x64arch: return hash128_x64( key, seed", "40 | \\ key[ 2 * block_start + 4 ]", "8 | \\ key[ block_start + 0 ] k1 =", "string. ''' hash_128 = hash128( key, seed, x64arch ) bytestring", "k1 * c1 ) & 0xFFFFFFFF k1 = ( k1", "<< 24 | \\ key[ 2 * block_start + 2", "& 0xFFFFFFFF # inlined ROTL32 k3 = ( k3 *", "= ( k4 * c4 ) & 0xFFFFFFFF k4 =", ") h2 = fmix( h2 ) h1 = ( h1", "tail_index + 7 ] << 24 if tail_size >= 7:", "\\ key[ block_start + 6 ] << 16 | \\", "+ 6 ] << 16 | \\ key[ block_start +", ") & 0xFFFFFFFF # inlined ROTL32 k2 = ( k2", "parser = argparse.ArgumentParser( 'pymurmur3', 'pymurmur [options] \"string to hash\"' )", "] if tail_size > 8: k2 = ( k2 *", "( c3 * k2 ) & 0xFFFFFFFF h2 ^= k2", "tail_size >= 10: k2 ^= key[ tail_index + 9 ]", "= ( h2 * 5 + 0x0bcaa747 ) & 0xFFFFFFFF", "+ 9 ] << 8 | \\ key[ block_start +", "| k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64", "tail_size >= 5: k2 ^= key[ tail_index + 4 ]", "& 0xFFFFFFFF k3 = ( c3 * k3 ) &", "& 0xFFFFFFFF # inlined ROTL32 k4 = ( k4 *", "k4 ) & 0xFFFFFFFF k4 = ( k4 << 18", "tail_size >= 14: k2 ^= key[ tail_index + 13 ]", "fmix( h1 ) h2 = fmix( h2 ) h1 =", "| k3 >> 15 ) & 0xFFFFFFFF # inlined ROTL32", "3 ] << 24 if tail_size >= 3: k1 ^=", "key[ tail_index + 7 ] << 56 if tail_size >=", "key[ tail_index + 14 ] << 16 if tail_size >=", "6 ] << 16 if tail_size >= 6: k2 ^=", "h1 + h3 ) & 0xFFFFFFFF h1 = ( h1", "& 0xFFFFFFFF # inlined ROTL32 k1 = ( k1 *", "16 | \\ key[ block_start + 9 ] << 8", "h ^= h >> 13 h = ( h *", "0xFFFFFFFFFFFFFFFF # inlined ROTL64 k2 = ( c1 * k2", "* 16 k1 = 0 k2 = 0 tail_size =", "seed, x64arch ) bytestring = '' for i in range(0,", "* c2 ) & 0xFFFFFFFF h1 ^= k1 #finalization h1", ") h1 = ( h1 + h2 ) & 0xFFFFFFFF", "& 0xFFFFFFFFFFFFFFFF return ( h2 << 64 | h1 )", "^= k3 h3 = ( h3 << 15 | h3", "if tail_size >= 14: k2 ^= key[ tail_index + 13", "algorithm https://code.google.com/p/smhasher/wiki/MurmurHash3 This was written for the times when you", "when you do not want to compile c-code and install", "k3 * c3 ) & 0xFFFFFFFF k3 = ( k3", "h >> 13 h = ( h * 0xc2b2ae35 )", "package found here for simple conversions: https://pypi.python.org/pypi/mmh3/2.3.1 ''' import sys", "k4 h4 = ( h4 << 13 | h4 >>", "& 0xFFFFFFFF # inlined ROTL32 k2 = ( k2 *", ">> 19 ) & 0xFFFFFFFF # inlined ROTL32 h1 =", "fmix( h2 ) h1 = ( h1 + h2 )", "/ 16 ) h1 = seed h2 = seed c1", "32 if tail_size >= 12: k2 ^= key[ tail_index +", "k2 = ( k2 << 33 | k2 >> 31", "^= k >> 33 return k length = len( key", "def xencode(x): return x del _sys def hash( key, seed", "<< 24 | \\ key[ block_start + 6 ] <<", "https://code.google.com/p/smhasher/wiki/MurmurHash3 This was written for the times when you do", "tail_size >= 11: k3 ^= key[ tail_index + 10 ]", "h2 ) & 0xFFFFFFFF h1 = ( h1 + h3", "tail_index = nblocks * 16 k1 = 0 k2 =", "= length & 15 if tail_size >= 15: k4 ^=", "c2 = 0x4cf5ad432745937f #body for block_start in range( 0, nblocks", "| \\ key[ 2 * block_start + 2 ] <<", "0x38495ab5 ) & 0xFFFFFFFFFFFFFFFF #tail tail_index = nblocks * 16", "you do not want to compile c-code and install modules,", "+ 0 ] k2 = key[ block_start + 7 ]", "key[ block_start + 15 ] << 24 | \\ key[", "^= key[ tail_index + 14 ] << 48 if tail_size", "] << 8 | \\ key[ 2 * block_start +", "k4 = ( c1 * k4 ) & 0xFFFFFFFF h4", "to have the same format as mmh3 python package found", "h ^= h >> 16 return h length = len(", ") & 0xFFFFFFFF # inlined ROTL32 h1 = ( h1", "#finalization unsigned_val = fmix( h1 ^ length ) if unsigned_val", "24 | \\ key[ 2 * block_start + 10 ]", "k3 if tail_size >= 8: k2 ^= key[ tail_index +", "( c2 * k1 ) & 0xFFFFFFFF h1 ^= k1", "& 0xFFFFFFFF h1 = fmix( h1 ) h2 = fmix(", "by <NAME>, and is placed in the public domain. The", "key[ tail_index + 2 ] << 16 if tail_size >=", "8 | \\ key[ block_start + 0 ] k2 =", "chr( lsbyte ) ) hash_128 = hash_128 >> 8 return", "key[ tail_index + 14 ] << 48 if tail_size >=", "8 ] k4 = key[ block_start + 15 ] <<", "def hash128( key, seed = 0x0, x64arch = True ):", "if tail_size >= 13: k2 ^= key[ tail_index + 12", "key[ 2 * block_start + 15 ] << 56 |", "h1 ^= k1 h1 = ( h1 << 19 |", "k1 = ( c2 * k1 ) & 0xFFFFFFFF h1", "12: k4 = ( k4 * c4 ) & 0xFFFFFFFF", "block_start + 13 ] << 8 | \\ key[ block_start", "b, c)) def xencode(x): if isinstance(x, bytes) or isinstance(x, bytearray):", "code. pure python implementation of the murmur3 hash algorithm https://code.google.com/p/smhasher/wiki/MurmurHash3", ") & 0xFFFFFFFF # inlined ROTL32 k4 = ( k4", "block_start + 1 ] << 8 | \\ key[ 2", "7 ] << 24 | \\ key[ block_start + 6", "& 0xFFFFFFFF k1 = ( k1 << 15 | k1", "15 if tail_size >= 15: k2 ^= key[ tail_index +", "seed h4 = seed c1 = 0x239b961b c2 = 0xab0e9789", "\\ key[ 2 * block_start + 14 ] << 48", ") & 0xFFFFFFFFFFFFFFFF k1 = ( k1 << 31 |", "tail_index + 4 ] << 32 if tail_size >= 4:", "* k2 ) & 0xFFFFFFFF h2 ^= k2 h2 =", "h4 = ( h4 * 5 + 0x32ac3b17 ) &", "tail_index + 6 ] << 16 if tail_size >= 6:", "length h4 ^= length h1 = ( h1 + h2", ">= 13: k2 ^= key[ tail_index + 12 ] <<", "default = [], nargs='+') opts = parser.parse_args() for str_to_hash in", "2 * block_start + 10 ] << 16 | \\", "key[ block_start + 2 ] << 16 | \\ key[", "<< 96 | h3 << 64 | h2 << 32", "2 ] << 16 | \\ key[ block_start + 1", "= ( h2 + h3 ) & 0xFFFFFFFF h2 =", "is suggested! This module is written to have the same", "sys as _sys if (_sys.version_info > (3, 0)): def xrange(", "key[ block_start + 1 ] << 8 | \\ key[", "h3 ) & 0xFFFFFFFF h4 = ( h1 + h4", "13 | h4 >> 19 ) & 0xFFFFFFFF # inlined", "0x87c37b91114253d5 c2 = 0x4cf5ad432745937f #body for block_start in range( 0,", "block_start + 3 ] << 24 | \\ key[ 2", "= ( h3 << 15 | h3 >> 17 )", "= ( h3 + h4 ) & 0xFFFFFFFF h3 =", "1 ] << 8 if tail_size >= 1: k1 ^=", "( h1 << 19 | h1 >> 13 ) &", "| \\ key[ block_start + 13 ] << 8 |", "k1 h1 = ( h1 << 13 | h1 >>", "8 if tail_size >= 1: k1 ^= key[ tail_index +", "* block_start + 1 ] << 8 | \\ key[", "#body for block_start in range( 0, nblocks * 16, 16", ") & 0xFFFFFFFFFFFFFFFF h2 ^= k2 h2 = ( h2", "key[ tail_index + 3 ] << 24 if tail_size >=", "+ h4 ) & 0xFFFFFFFF h2 = ( h1 +", "for block_start in range( 0, nblocks * 8, 8 ):", "+ h2 ) & 0xFFFFFFFFFFFFFFFF h2 = ( h1 +", "* 5 + 0x96cd1c35 ) & 0xFFFFFFFF k4 = (", "0, nblocks * 8, 8 ): # ??? big endian?", "tail_index + 11 ] << 24 if tail_size >= 11:", "ROTL64 k2 = ( k2 * c1 ) & 0xFFFFFFFFFFFFFFFF", "] if tail_size > 12: k4 = ( k4 *", "k2 * c1 ) & 0xFFFFFFFFFFFFFFFF h2 ^= k2 if", "range( 0, nblocks * 8, 8 ): # ??? big", "key[ block_start + 6 ] << 16 | \\ key[", "k2 = ( k2 * c2 ) & 0xFFFFFFFFFFFFFFFF k2", ">= 3: k1 ^= key[ tail_index + 2 ] <<", "'' for i in range(0, 16, 1): lsbyte = hash_128", "* 4, 4 ): # ??? big endian? k1 =", "key[ block_start + 13 ] << 8 | \\ key[", ") & 0xFFFFFFFF # tail tail_index = nblocks * 4", "# inlined ROTL32 k1 = ( c2 * k1 )", "is placed in the public domain. The authors hereby disclaim", "h2 ) & 0xFFFFFFFFFFFFFFFF h2 = ( h2 * 5", "= 0x239b961b c2 = 0xab0e9789 c3 = 0x38b34ae5 c4 =", "h1 >> 19 ) & 0xFFFFFFFF # inlined ROTL32 h1", "https://pypi.python.org/pypi/mmh3/2.3.1 ''' import sys as _sys if (_sys.version_info > (3,", "0x96cd1c35 ) & 0xFFFFFFFF k4 = ( c4 * k4", "fmix( h2 ) h3 = fmix( h3 ) h4 =", "if tail_size >= 1: k1 ^= key[ tail_index + 0", "k2 h2 = ( h2 << 17 | h2 >>", "disclaim copyright to this source code. pure python implementation of", "k3 ^= key[ tail_index + 11 ] << 24 if", ">= 10: k3 ^= key[ tail_index + 9 ] <<", "if tail_size > 0: k1 = ( k1 * c1", "| k2 >> 16 ) & 0xFFFFFFFF # inlined ROTL32", "is needed a proper c-module is suggested! This module is", "37 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 h1 = (", "1 ) def hash128( key, seed = 0x0, x64arch =", "k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32 k1", "0xFFFFFFFF h4 ^= k4 h4 = ( h4 << 13", "| h3 << 64 | h2 << 32 | h1", "hash_128 = hash128( key, seed, x64arch ) bytestring = ''", "c-code and install modules, and you only want a drop-in", "ROTL64 h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF", "* 5 + 0x52dce729 ) & 0xFFFFFFFFFFFFFFFF k2 = (", "and is placed in the public domain. The authors hereby", "0xff51afd7ed558ccd ) & 0xFFFFFFFFFFFFFFFF k ^= k >> 33 k", "0xFFFFFFFF # inlined ROTL32 h3 = ( h3 + h4", "(unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) + 1 ) unsigned_val2 = ( hash_128", "key, seed, x64arch ) bytestring = '' for i in", "= ( c4 * k3 ) & 0xFFFFFFFF h3 ^=", "k1 = ( k1 << 31 | k1 >> 33", ">> 16 h = ( h * 0x85ebca6b ) &", "<< 48 | \\ key[ 2 * block_start + 13", "block_start + 8 ] k1 = ( c1 * k1", "( c1 * k2 ) & 0xFFFFFFFFFFFFFFFF h2 ^= k2", "k1 ^= key[ tail_index + 2 ] << 16 if", "h1 << 13 | h1 >> 19 ) & 0xFFFFFFFF", "if tail_size >= 13: k4 ^= key[ tail_index + 12", "# inlined ROTL32 h1 = ( h1 + h2 )", ") def hash128_x86( key, seed ): ''' Implements 128bit murmur3", ") & 0xFFFFFFFFFFFFFFFF h2 ^= k2 if tail_size >= 8:", "k1 = key[ block_start + 3 ] << 24 |", "+ 4 ] k3 = key[ block_start + 11 ]", "bytearray( xencode(key) ) def fmix( h ): h ^= h", "key[ tail_index + 4 ] << 32 if tail_size >=", "( h1 + h2 ) & 0xFFFFFFFF h3 = (", "16 if tail_size >= 10: k3 ^= key[ tail_index +", "h2 ) & 0xFFFFFFFFFFFFFFFF return ( h2 << 64 |", "hash128_x86( key, seed ): ''' Implements 128bit murmur3 hash for", "/ 16 ) h1 = seed h2 = seed h3", "purely python it is FAR from performant and if performance", "17 ) & 0xFFFFFFFF # inlined ROTL32 k1 = (", ">= 14: k4 ^= key[ tail_index + 13 ] <<", "k4 >> 14 ) & 0xFFFFFFFF # inlined ROTL32 k4", ">= 6: k2 ^= key[ tail_index + 5 ] <<", "| \\ key[ 2 * block_start + 9 ] <<", "int, default = 0 ) parser.add_argument( 'strings', default = [],", "9: k2 ^= key[ tail_index + 8 ] if tail_size", "key[ block_start + 7 ] << 24 | \\ key[", "key[ 2 * block_start + 13 ] << 40 |", "0xFFFFFFFF # inlined ROTL32 h2 = ( h2 + h3", "h1 ^= k1 h1 = ( h1 << 27 |", "0xFFFFFFFF) + 1 ) def hash128( key, seed = 0x0,", "& 0xFFFFFFFF h ^= h >> 13 h = (", "''' Implements 128bit murmur3 hash for x86. ''' def fmix(", "seed c1 = 0xcc9e2d51 c2 = 0x1b873593 # body for", "key[ block_start + 10 ] << 16 | \\ key[", "parser.parse_args() for str_to_hash in opts.strings: sys.stdout.write( '\"%s\" = 0x%08X\\n' %", "k1 = ( c1 * k1 ) & 0xFFFFFFFFFFFFFFFF k1", "| \\ key[ block_start + 10 ] << 16 |", "0xFFFFFFFFFFFFFFFF h1 = fmix( h1 ) h2 = fmix( h2", "if tail_size >= 12: k3 ^= key[ tail_index + 11", "tail_size >= 12: k3 ^= key[ tail_index + 11 ]", "| k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32", "0x80000000 == 0: return unsigned_val else: return -( (unsigned_val ^", "5 ] << 8 if tail_size >= 5: k2 ^=", "18 | k4 >> 14 ) & 0xFFFFFFFF # inlined", "of the murmur3 hash algorithm https://code.google.com/p/smhasher/wiki/MurmurHash3 This was written for", "h1 + h2 ) & 0xFFFFFFFFFFFFFFFF h2 = ( h1", "k3 ^= key[ tail_index + 10 ] << 16 if", "key[ tail_index + 11 ] << 24 if tail_size >=", "''' hash_128 = hash128( key, seed, x64arch ) bytestring =", "FAR from performant and if performance is anything that is", ">= 7: k1 ^= key[ tail_index + 6 ] <<", "As this is purely python it is FAR from performant", "return ( h4 << 96 | h3 << 64 |", "return h length = len( key ) nblocks = int(", "key[ block_start + 0 ] k1 = ( c1 *", "0xFFFFFFFFFFFFFFFF #tail tail_index = nblocks * 16 k1 = 0", ">= 13: k4 ^= key[ tail_index + 12 ] if", "len( key ) nblocks = int( length / 16 )", "default = 0 ) parser.add_argument( 'strings', default = [], nargs='+')", "''' Implements 128bit murmur3 hash for x64. ''' def fmix(", "key[ 2 * block_start + 0 ] k2 = key[", "0xFFFFFFFF h ^= h >> 16 return h length =", "Implements 64bit murmur3 hash. Returns a tuple. ''' hash_128 =", "x.encode() else: def xencode(x): return x del _sys def hash(", ">> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k1 =", "] << 16 if tail_size >= 10: k2 ^= key[", "k1 << 31 | k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF", "h4 ) & 0xFFFFFFFF h4 = ( h4 * 5", "<< 16 if tail_size >= 10: k3 ^= key[ tail_index", "k2 ^= key[ tail_index + 4 ] if tail_size >", "= fmix( h3 ) h4 = fmix( h4 ) h1", "* block_start + 2 ] << 16 | \\ key[", "0 k2 = 0 tail_size = length & 15 if", "<< 56 | \\ key[ 2 * block_start + 6", "16, 1): lsbyte = hash_128 & 0xFF bytestring = bytestring", ") & 0xFFFFFFFF # inlined ROTL32 k3 = ( c4", "if tail_size >= 6: k2 ^= key[ tail_index + 5", "argparse.ArgumentParser( 'pymurmur3', 'pymurmur [options] \"string to hash\"' ) parser.add_argument( '--seed',", "key[ 2 * block_start + 8 ] k1 = (", "\\ key[ 2 * block_start + 5 ] << 40", "return hash128_x86( key, seed ) def hash64( key, seed =", "and if performance is anything that is needed a proper", "0xFFFFFFFFFFFFFFFF h1 = ( h1 * 5 + 0x52dce729 )", "hash128( key, seed, x64arch ) unsigned_val1 = hash_128 & 0xFFFFFFFFFFFFFFFF", "| \\ key[ block_start + 2 ] << 16 |", "= ( k * 0xff51afd7ed558ccd ) & 0xFFFFFFFFFFFFFFFF k ^=", "= ( h1 + h2 ) & 0xFFFFFFFF h1 =", "unsigned_val2 & 0x8000000000000000 == 0: signed_val2 = unsigned_val2 else: signed_val2", ") ) hash_128 = hash_128 >> 8 return bytestring if", "k * 0xc4ceb9fe1a85ec53 ) & 0xFFFFFFFFFFFFFFFF k ^= k >>", "( h1 + h2 ) & 0xFFFFFFFF h1 = (", "performance is anything that is needed a proper c-module is", "# ??? big endian? k1 = key[ 2 * block_start", ") & 0xFFFFFFFF h4 = ( h4 * 5 +", "k >> 33 k = ( k * 0xff51afd7ed558ccd )", "\\ key[ 2 * block_start + 0 ] k2 =", "inlined ROTL32 k4 = ( c1 * k4 ) &", "Implements 128bit murmur3 hash. ''' def hash128_x64( key, seed ):", "| \\ key[ 2 * block_start + 8 ] k1", "^= h >> 16 h = ( h * 0x85ebca6b", "block_start in range( 0, nblocks * 8, 8 ): #", "k ^= k >> 33 k = ( k *", "a tuple. ''' hash_128 = hash128( key, seed, x64arch )", "<< 64 | h2 << 32 | h1 ) key", ">> 33 k = ( k * 0xc4ceb9fe1a85ec53 ) &", "= ( hash_128 >> 64 ) & 0xFFFFFFFFFFFFFFFF if unsigned_val2", "14 ] << 16 | \\ key[ block_start + 13", "# inlined ROTL64 h1 = ( h1 + h2 )", "found here for simple conversions: https://pypi.python.org/pypi/mmh3/2.3.1 ''' import sys as", "0xFFFFFFFF # inlined ROTL32 k3 = ( c4 * k3", "h2 << 17 | h2 >> 15 ) & 0xFFFFFFFF", "h1 * 5 + 0x52dce729 ) & 0xFFFFFFFFFFFFFFFF k2 =", "ROTL32 h3 = ( h3 + h4 ) & 0xFFFFFFFF", "__name__ == \"__main__\": import argparse parser = argparse.ArgumentParser( 'pymurmur3', 'pymurmur", "c1 * k2 ) & 0xFFFFFFFFFFFFFFFF h2 ^= k2 h2", "c1 = 0x87c37b91114253d5 c2 = 0x4cf5ad432745937f #body for block_start in", "0x239b961b c2 = 0xab0e9789 c3 = 0x38b34ae5 c4 = 0xa1e38b93", "if tail_size >= 3: k1 ^= key[ tail_index + 2", "inlined ROTL32 k1 = ( k1 * c2 ) &", "+ 7 ] << 24 if tail_size >= 7: k2", "| k4 >> 14 ) & 0xFFFFFFFF # inlined ROTL32", "4 ] << 32 | \\ key[ 2 * block_start", "??? big endian? k1 = key[ block_start + 3 ]", "( h4 * 5 + 0x32ac3b17 ) & 0xFFFFFFFF #tail", "( k1 * c1 ) & 0xFFFFFFFF k1 = (", "to compile c-code and install modules, and you only want", "signed_val2 = -( (unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) + 1 ) return", ") h1 = seed h2 = seed h3 = seed", "9 ] << 8 if tail_size >= 9: k3 ^=", "# inlined ROTL32 h1 = ( h1 * 5 +", "0xFFFFFFFF h4 ^= k4 if tail_size >= 12: k3 ^=", "only want a drop-in murmur3 implementation. As this is purely", "( h1 + h4 ) & 0xFFFFFFFF h1 = fmix(", "^= key[ tail_index + 7 ] << 56 if tail_size", "_sys def hash( key, seed = 0x0 ): ''' Implements", "c2 = 0xab0e9789 c3 = 0x38b34ae5 c4 = 0xa1e38b93 #body", "h2 ) & 0xFFFFFFFFFFFFFFFF h1 = fmix( h1 ) h2", ">= 11: k2 ^= key[ tail_index + 10 ] <<", "k3 = ( k3 * c3 ) & 0xFFFFFFFF k3", "Implements 128bit murmur3 hash for x86. ''' def fmix( h", "h4 = ( h1 + h4 ) & 0xFFFFFFFF return", "0xFFFFFFFF h1 = ( h1 + h3 ) & 0xFFFFFFFF", ") unsigned_val2 = ( hash_128 >> 64 ) & 0xFFFFFFFFFFFFFFFF", "* c1 ) & 0xFFFFFFFFFFFFFFFF k1 = ( k1 <<", "0x1b873593 # body for block_start in range( 0, nblocks *", "if tail_size >= 11: k3 ^= key[ tail_index + 10", "( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF h1 = fmix(", "* c2 ) & 0xFFFFFFFF h1 ^= k1 #finalization unsigned_val", "a byte string. ''' hash_128 = hash128( key, seed, x64arch", "''' Implements 64bit murmur3 hash. Returns a tuple. ''' hash_128", "tail_size >= 10: k3 ^= key[ tail_index + 9 ]", "( c2 * k2 ) & 0xFFFFFFFFFFFFFFFF k2 = (", "= 0 k3 = 0 k4 = 0 tail_size =", "= 0 tail_size = length & 15 if tail_size >=", "seed = 0x0 ): ''' Implements 32bit murmur3 hash. '''", "c4 * k4 ) & 0xFFFFFFFF k4 = ( k4", "14 ) & 0xFFFFFFFF # inlined ROTL32 k4 = (", "if tail_size >= 12: k2 ^= key[ tail_index + 11", "0, nblocks * 4, 4 ): # ??? big endian?", "if tail_size >= 2: k1 ^= key[ tail_index + 1", "2 * block_start + 6 ] << 48 | \\", "k ): k ^= k >> 33 k = (", "0xFFFFFFFFFFFFFFFF # inlined ROTL64 k1 = ( k1 * c2", "& 0xFFFFFFFF # inlined ROTL32 h1 = ( h1 *", "+ 6 ] << 48 if tail_size >= 6: k1", "0xFFFFFFFFFFFFFFFF h1 ^= k1 h1 = ( h1 << 27", "k2 >> 16 ) & 0xFFFFFFFF # inlined ROTL32 k2", ") parser.add_argument( 'strings', default = [], nargs='+') opts = parser.parse_args()", "nblocks * 16 k1 = 0 k2 = 0 tail_size", "= fmix( h2 ) h1 = ( h1 + h2", "h2 ^= k2 if tail_size >= 8: k1 ^= key[", "that is needed a proper c-module is suggested! This module", "def hash_bytes( key, seed = 0x0, x64arch = True ):", "] << 16 | \\ key[ block_start + 9 ]", "return ( int( signed_val1 ), int( signed_val2 ) ) def", "] << 24 | \\ key[ block_start + 2 ]", "11: k2 ^= key[ tail_index + 10 ] << 16", "4 k1 = 0 tail_size = length & 3 if", "key[ tail_index + 12 ] if tail_size > 12: k4", "0xFFFFFFFF k4 = ( c4 * k4 ) & 0xFFFFFFFF", "if tail_size > 8: k2 = ( k2 * c2", "& 0xFFFFFFFFFFFFFFFF h2 = ( h2 * 5 + 0x38495ab5", "<< 17 | h2 >> 15 ) & 0xFFFFFFFF #", "c1 ) & 0xFFFFFFFFFFFFFFFF h2 ^= k2 if tail_size >=", "^ length ) if unsigned_val & 0x80000000 == 0: return", "<< 64 | h1 ) def hash128_x86( key, seed ):", "h2 ) & 0xFFFFFFFF h3 = ( h1 + h3", "h2 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 h2", ") & 0xFFFFFFFF h1 = ( h1 + h3 )", "+ 12 ] if tail_size > 12: k4 = (", ") & 0xFFFFFFFF h4 ^= k4 if tail_size >= 12:", "5 + 0x52dce729 ) & 0xFFFFFFFFFFFFFFFF k2 = ( c2", "<< 16 | \\ key[ block_start + 13 ] <<", "k1 = 0 k2 = 0 tail_size = length &", ") & 0xFFFFFFFF h3 ^= k3 h3 = ( h3", "16 | \\ key[ block_start + 1 ] << 8", "\\ key[ 2 * block_start + 13 ] << 40", "x else: return x.encode() else: def xencode(x): return x del", "= ( k1 << 15 | k1 >> 17 )", "+ 0x52dce729 ) & 0xFFFFFFFFFFFFFFFF k2 = ( c2 *", "block_start + 12 ] << 32 | \\ key[ 2", "] << 32 if tail_size >= 12: k2 ^= key[", "= True ): ''' Implements 64bit murmur3 hash. Returns a", "64bit murmur3 hash. Returns a tuple. ''' hash_128 = hash128(", "k2 ) & 0xFFFFFFFFFFFFFFFF h2 ^= k2 h2 = (", "block_start + 8 ] k4 = key[ block_start + 15", "| \\ key[ 2 * block_start + 1 ] <<", "5 ] << 8 | \\ key[ block_start + 4", "0x0 ): ''' Implements 32bit murmur3 hash. ''' key =", "0xFFFFFFFF # inlined ROTL32 k2 = ( c3 * k2", "16, 16 ): k1 = key[ block_start + 3 ]", "<< 24 | \\ key[ block_start + 2 ] <<", "h1 = ( h1 + h4 ) & 0xFFFFFFFF h2", "3 ] << 24 | \\ key[ block_start + 2", "> 12: k4 = ( k4 * c4 ) &", "& 0xFFFFFFFFFFFFFFFF if unsigned_val1 & 0x8000000000000000 == 0: signed_val1 =", "7 ] << 56 | \\ key[ 2 * block_start", ") & 0xFFFFFFFF h2 ^= k2 h2 = ( h2", "proper c-module is suggested! This module is written to have", "for the times when you do not want to compile", "= ( c2 * k2 ) & 0xFFFFFFFF k2 =", "hash for x86. ''' def fmix( h ): h ^=", "<< 16 | \\ key[ block_start + 5 ] <<", "] << 32 | \\ key[ 2 * block_start +", "0xFFFFFFFF # inlined ROTL32 k2 = ( k2 * c3", "( k2 * c3 ) & 0xFFFFFFFF h2 ^= k2", "k4 = ( k4 * c1 ) & 0xFFFFFFFF h4", "<< 40 | \\ key[ 2 * block_start + 12", "tail_index + 4 ] if tail_size > 4: k2 =", ") bytestring = '' for i in range(0, 16, 1):", ">> 13 h = ( h * 0xc2b2ae35 ) &", "^= key[ tail_index + 0 ] if tail_size > 0:", "k1 h1 = ( h1 << 27 | h1 >>", "= ( k2 << 33 | k2 >> 31 )", "8 if tail_size >= 9: k2 ^= key[ tail_index +", "Implements 32bit murmur3 hash. ''' key = bytearray( xencode(key) )", "= ( h1 + h4 ) & 0xFFFFFFFF return (", "if tail_size >= 6: k1 ^= key[ tail_index + 5", "k3 ^= key[ tail_index + 8 ] if tail_size >", "k2 ^= key[ tail_index + 8 ] if tail_size >", "0xFFFFFFFF k3 = ( c3 * k3 ) & 0xFFFFFFFF", ") & 0xFFFFFFFF k3 = ( k3 << 17 |", "& 0xFFFFFFFF # inlined ROTL32 h2 = ( h2 +", "<< 24 if tail_size >= 3: k1 ^= key[ tail_index", "key, seed ): ''' Implements 128bit murmur3 hash for x86.", "^= k1 #finalization h1 ^= length h2 ^= length h3", "k2 = key[ 2 * block_start + 15 ] <<", "tail_size >= 5: k1 ^= key[ tail_index + 4 ]", "] if tail_size > 4: k2 = ( k2 *", "unsigned_val1 = hash_128 & 0xFFFFFFFFFFFFFFFF if unsigned_val1 & 0x8000000000000000 ==", ">= 4: k1 ^= key[ tail_index + 3 ] <<", "<< 16 | \\ key[ block_start + 9 ] <<", "conversions: https://pypi.python.org/pypi/mmh3/2.3.1 ''' import sys as _sys if (_sys.version_info >", ") h2 = fmix( h2 ) h3 = fmix( h3", "<< 8 | \\ key[ block_start + 12 ] k1", "k1 ^= key[ tail_index + 0 ] if tail_size >", "# inlined ROTL32 k1 = ( k1 * c2 )", "40 if tail_size >= 5: k1 ^= key[ tail_index +", "\\ key[ block_start + 0 ] k1 = ( c1", "( h1 + h3 ) & 0xFFFFFFFF h1 = (", "] << 8 | \\ key[ block_start + 0 ]", "h2 = fmix( h2 ) h1 = ( h1 +", "key[ 2 * block_start + 6 ] << 48 |", "] k3 = key[ block_start + 11 ] << 24", "k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k2", "96 | h3 << 64 | h2 << 32 |", "^= key[ tail_index + 9 ] << 8 if tail_size", "to this source code. pure python implementation of the murmur3", "2 * block_start + 3 ] << 24 | \\", "anything that is needed a proper c-module is suggested! This", "0x52dce729 ) & 0xFFFFFFFFFFFFFFFF k2 = ( c2 * k2", "+ 7 ] << 56 if tail_size >= 7: k1", "this is purely python it is FAR from performant and", "48 if tail_size >= 14: k2 ^= key[ tail_index +", "6: k1 ^= key[ tail_index + 5 ] << 40", "k2 = ( c2 * k2 ) & 0xFFFFFFFFFFFFFFFF k2", "( k2 * c2 ) & 0xFFFFFFFFFFFFFFFF k2 = (", "h4 = fmix( h4 ) h1 = ( h1 +", "key, seed ) def hash64( key, seed = 0x0, x64arch", "<< 40 if tail_size >= 13: k2 ^= key[ tail_index", "hash128_x64( key, seed ): ''' Implements 128bit murmur3 hash for", "| h1 >> 13 ) & 0xFFFFFFFF # inlined ROTL32", "''' Implements 128bit murmur3 hash. ''' def hash128_x64( key, seed", "seed ) else: return hash128_x86( key, seed ) def hash64(", "''' def fmix( h ): h ^= h >> 16", "endian? k1 = key[ block_start + 3 ] << 24", "8, 8 ): # ??? big endian? k1 = key[", "k3 ) & 0xFFFFFFFF k3 = ( k3 << 17", "> 0: k1 = ( k1 * c1 ) &", "16 if tail_size >= 6: k2 ^= key[ tail_index +", "key[ tail_index + 4 ] if tail_size > 4: k2", "h2 = ( h1 + h2 ) & 0xFFFFFFFF h3", "= ( h1 + h3 ) & 0xFFFFFFFF h1 =", "+ 13 ] << 40 | \\ key[ 2 *", "= ( c1 * k2 ) & 0xFFFFFFFFFFFFFFFF h2 ^=", "del _sys def hash( key, seed = 0x0 ): '''", "-( (unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) + 1 ) return ( int(", "| \\ key[ 2 * block_start + 13 ] <<", "( h2 << 31 | h2 >> 33 ) &", "if tail_size >= 11: k2 ^= key[ tail_index + 10", "4 ) h1 = seed c1 = 0xcc9e2d51 c2 =", "argparse parser = argparse.ArgumentParser( 'pymurmur3', 'pymurmur [options] \"string to hash\"'", "= ( c3 * k3 ) & 0xFFFFFFFF k3 =", "source code. pure python implementation of the murmur3 hash algorithm", "else: return x.encode() else: def xencode(x): return x del _sys", "^= k1 h1 = ( h1 << 27 | h1", "0xFFFFFFFF h4 = ( h4 * 5 + 0x32ac3b17 )", "if tail_size > 12: k4 = ( k4 * c4", "seed h2 = seed c1 = 0x87c37b91114253d5 c2 = 0x4cf5ad432745937f", "# inlined ROTL32 k3 = ( k3 * c4 )", "11 ] << 24 | \\ key[ 2 * block_start", "( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF return ( h2", "murmur3 hash for x86. ''' def fmix( h ): h", "= ( h1 * 5 + 0x561ccd1b ) & 0xFFFFFFFF", "c3 ) & 0xFFFFFFFF h2 ^= k2 if tail_size >=", "] k1 = ( c1 * k1 ) & 0xFFFFFFFFFFFFFFFF", "& 0xFFFFFFFF h1 = ( h1 + h3 ) &", "^= key[ tail_index + 8 ] if tail_size > 8:", "k1 ^= key[ tail_index + 6 ] << 48 if", "key ) nblocks = int( length / 16 ) h1", "* k3 ) & 0xFFFFFFFF k3 = ( k3 <<", "(unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) + 1 ) return ( int( signed_val1", "= ( c2 * k1 ) & 0xFFFFFFFFFFFFFFFF h1 ^=", "return ( h2 << 64 | h1 ) def hash128_x86(", "* block_start + 9 ] << 8 | \\ key[", "h2 ^= k2 if tail_size >= 4: k1 ^= key[", "h1 = ( h1 + h3 ) & 0xFFFFFFFF h1", ") & 0xFFFFFFFF h2 = ( h1 + h2 )", "h1 << 27 | h1 >> 37 ) & 0xFFFFFFFFFFFFFFFF", "* 0xff51afd7ed558ccd ) & 0xFFFFFFFFFFFFFFFF k ^= k >> 33", "2 * block_start + 5 ] << 40 | \\", "2 * block_start + 14 ] << 48 | \\", "] k1 = ( c1 * k1 ) & 0xFFFFFFFF", "15 ] << 24 | \\ key[ block_start + 14", "length h1 = ( h1 + h2 ) & 0xFFFFFFFF", "+ 11 ] << 24 | \\ key[ 2 *", "def hash64( key, seed = 0x0, x64arch = True ):", "\\ key[ 2 * block_start + 6 ] << 48", "& 0xFFFFFFFF h4 = ( h1 + h4 ) &", "if unsigned_val1 & 0x8000000000000000 == 0: signed_val1 = unsigned_val1 else:", "<< 13 | h4 >> 19 ) & 0xFFFFFFFF #", ") unsigned_val1 = hash_128 & 0xFFFFFFFFFFFFFFFF if unsigned_val1 & 0x8000000000000000", "4 ] << 32 if tail_size >= 4: k1 ^=", "h1 = seed c1 = 0xcc9e2d51 c2 = 0x1b873593 #", "x64arch = True ): ''' Implements 128bit murmur3 hash. '''", ") & 0xFFFFFFFF h2 = ( h2 * 5 +", "* c3 ) & 0xFFFFFFFF k3 = ( k3 <<", "bytearray): return x else: return x.encode() else: def xencode(x): return", "] << 32 if tail_size >= 4: k1 ^= key[", ") & 0xFFFFFFFF # inlined ROTL32 h2 = ( h2", "do not want to compile c-code and install modules, and", "& 0xFFFFFFFF # inlined ROTL32 h3 = ( h3 +", "= unsigned_val1 else: signed_val1 = -( (unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) +", "x64. ''' def fmix( k ): k ^= k >>", "key[ 2 * block_start + 14 ] << 48 |", "& 0xFFFFFFFFFFFFFFFF h2 ^= k2 if tail_size >= 8: k1", "k3 = ( k3 << 17 | k3 >> 15", "== 0: signed_val1 = unsigned_val1 else: signed_val1 = -( (unsigned_val1", "\\ key[ block_start + 2 ] << 16 | \\", "= ( k3 * c3 ) & 0xFFFFFFFF k3 =", "nblocks * 8, 8 ): # ??? big endian? k1", "key[ tail_index + 13 ] << 40 if tail_size >=", "] << 24 if tail_size >= 3: k1 ^= key[", "h1 + h2 ) & 0xFFFFFFFF h3 = ( h1", "), int( signed_val2 ) ) def hash_bytes( key, seed =", "h1 ) h2 = fmix( h2 ) h3 = fmix(", "h1 = seed h2 = seed h3 = seed h4", "inlined ROTL32 h4 = ( h1 + h4 ) &", "k4 if tail_size >= 12: k3 ^= key[ tail_index +", "= ( h1 << 27 | h1 >> 37 )", "h2 ) & 0xFFFFFFFFFFFFFFFF h2 = ( h1 + h2", "ROTL32 k4 = ( k4 * c1 ) & 0xFFFFFFFF", "h3 = seed h4 = seed c1 = 0x239b961b c2", "0xFFFFFFFFFFFFFFFF h2 ^= k2 if tail_size >= 8: k1 ^=", "= unsigned_val2 else: signed_val2 = -( (unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) +", "^= key[ tail_index + 6 ] << 16 if tail_size", "8 | \\ key[ block_start + 12 ] k1 =", "h4 ^= k4 h4 = ( h4 << 13 |", "ROTL32 k3 = ( k3 * c4 ) & 0xFFFFFFFF", "^= key[ tail_index + 13 ] << 40 if tail_size", "h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF h1", "big endian? k1 = key[ block_start + 3 ] <<", "& 0xFFFFFFFFFFFFFFFF h1 = fmix( h1 ) h2 = fmix(", "& 0xFFFFFFFF h3 ^= k3 h3 = ( h3 <<", "0xFFFFFFFF #tail tail_index = nblocks * 16 k1 = 0", "^= length h2 ^= length h1 = ( h1 +", "= key[ block_start + 15 ] << 24 | \\", "h4 = ( h1 + h4 ) & 0xFFFFFFFF h1", "0 ] k1 = ( c1 * k1 ) &", "if tail_size >= 4: k1 ^= key[ tail_index + 3", "k >> 33 k = ( k * 0xc4ceb9fe1a85ec53 )", "key[ block_start + 0 ] k2 = key[ block_start +", "<< 56 | \\ key[ 2 * block_start + 14", "( k2 << 33 | k2 >> 31 ) &", ">> 64 ) & 0xFFFFFFFFFFFFFFFF if unsigned_val2 & 0x8000000000000000 ==", "Implements 128bit murmur3 hash for x64. ''' def fmix( k", "* block_start + 0 ] k2 = key[ 2 *", "= int( length / 16 ) h1 = seed h2", "c1 * k1 ) & 0xFFFFFFFF k1 = ( k1", "range(0, 16, 1): lsbyte = hash_128 & 0xFF bytestring =", "'\"%s\" = 0x%08X\\n' % ( str_to_hash, hash( str_to_hash ) )", "have the same format as mmh3 python package found here", "= [], nargs='+') opts = parser.parse_args() for str_to_hash in opts.strings:", "key[ 2 * block_start + 7 ] << 56 |", "c-module is suggested! This module is written to have the", "tail_size > 8: k3 = ( k3 * c3 )", "+ 5 ] << 8 if tail_size >= 5: k2", "hash_128 >> 64 ) & 0xFFFFFFFFFFFFFFFF if unsigned_val2 & 0x8000000000000000", ") & 0xFFFFFFFF # inlined ROTL32 h3 = ( h3", "& 0xFFFFFFFFFFFFFFFF k2 = ( k2 << 33 | k2", "<< 40 if tail_size >= 5: k1 ^= key[ tail_index", "k3 = ( c3 * k3 ) & 0xFFFFFFFF k3", "<< 16 | \\ key[ 2 * block_start + 9", "h3 ) & 0xFFFFFFFF h2 = ( h2 * 5", "| \\ key[ 2 * block_start + 0 ] k2", "| \\ key[ block_start + 5 ] << 8 |", "( h3 * 5 + 0x96cd1c35 ) & 0xFFFFFFFF k4", "^= key[ tail_index + 5 ] << 8 if tail_size", "# inlined ROTL32 h3 = ( h3 + h4 )", "^= key[ tail_index + 6 ] << 48 if tail_size", "key, seed = 0x0 ): ''' Implements 32bit murmur3 hash.", "h4 << 13 | h4 >> 19 ) & 0xFFFFFFFF", "k2 << 33 | k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF", "fmix( h4 ) h1 = ( h1 + h2 )", "tail_size >= 2: k1 ^= key[ tail_index + 1 ]", "h3 * 5 + 0x96cd1c35 ) & 0xFFFFFFFF k4 =", "''' pymmh3 was written by <NAME> and enhanced by <NAME>,", "* block_start + 8 ] k1 = ( c1 *", "& 0xFFFFFFFF h2 = ( h2 * 5 + 0x0bcaa747", "tail_index + 8 ] if tail_size > 8: k2 =", "* c3 ) & 0xFFFFFFFF h2 ^= k2 if tail_size", "0x8000000000000000 == 0: signed_val2 = unsigned_val2 else: signed_val2 = -(", "k2 = ( k2 * c2 ) & 0xFFFFFFFF k2", "^ 0xFFFFFFFFFFFFFFFF) + 1 ) return ( int( signed_val1 ),", "= ( c3 * k2 ) & 0xFFFFFFFF h2 ^=", "h3 << 64 | h2 << 32 | h1 )", "unsigned_val & 0x80000000 == 0: return unsigned_val else: return -(", ">= 5: k2 ^= key[ tail_index + 4 ] if", "& 0xFFFFFFFF h4 ^= k4 if tail_size >= 12: k3", "public domain. The authors hereby disclaim copyright to this source", "<< 8 if tail_size >= 1: k1 ^= key[ tail_index", "k2 ) & 0xFFFFFFFF h2 ^= k2 h2 = (", "0xFFFFFFFF h1 ^= k1 h1 = ( h1 << 19", "] if tail_size > 8: k3 = ( k3 *", "h2 << 31 | h2 >> 33 ) & 0xFFFFFFFFFFFFFFFF", "13 | h1 >> 19 ) & 0xFFFFFFFF # inlined", "* 16, 16 ): k1 = key[ block_start + 3", "tail_size >= 13: k4 ^= key[ tail_index + 12 ]", "k ^= k >> 33 return k length = len(", "= ( k2 * c2 ) & 0xFFFFFFFFFFFFFFFF k2 =", "+ 4 ] if tail_size > 4: k2 = (", "tail_index = nblocks * 4 k1 = 0 tail_size =", "k >> 33 return k length = len( key )", "4 ] k3 = key[ block_start + 11 ] <<", "4: k2 = ( k2 * c2 ) & 0xFFFFFFFF", "Returns a byte string. ''' hash_128 = hash128( key, seed,", "key = bytearray( xencode(key) ) def fmix( h ): h", "h2 = fmix( h2 ) h3 = fmix( h3 )", "h4 = ( h4 << 13 | h4 >> 19", "key[ tail_index + 9 ] << 8 if tail_size >=", "31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k2 = (" ]
[ "'/Person.json' print('-- create: ExPerson') ExPerson = dlite.classfactory(Person, url=url) print('-- create:", "+ thisdir + '/Person.json' print('-- create: ExPerson') ExPerson = dlite.classfactory(Person,", "json-representation of person2 using dlite print(person2.dlite_inst.asjson(indent=2)) person3 = dlite.loadfactory(Person, 'json://persons.json')", "thisdir = os.path.abspath(os.path.dirname(__file__)) class Person: def __init__(self, name, age, skills):", "(self.name, self.age, list(self.skills)) url = 'json://' + thisdir + '/Person.json'", "'tasting']) print('-- create: person2') person2 = ExPerson('<NAME>', 42, ['distilling', 'tasting'])", "['distilling', 'tasting']) print('-- create: person2') person2 = ExPerson('<NAME>', 42, ['distilling',", "= Person('<NAME>', 42, ['distilling', 'tasting']) print('-- create: person2') person2 =", "= skills def __repr__(self): return 'Person(%r, %r, %r)' % (self.name,", "def __init__(self, name, age, skills): self.name = name self.age =", "'Person(%r, %r, %r)' % (self.name, self.age, list(self.skills)) url = 'json://'", "person1 = Person('<NAME>', 42, ['distilling', 'tasting']) print('-- create: person2') person2", "ExPerson = dlite.classfactory(Person, url=url) print('-- create: person1') person1 = Person('<NAME>',", "age, skills): self.name = name self.age = age self.skills =", "coding: utf-8 -*- import os import dlite thisdir = os.path.abspath(os.path.dirname(__file__))", "skills): self.name = name self.age = age self.skills = skills", "__init__(self, name, age, skills): self.name = name self.age = age", "42, ['distilling', 'tasting']) print('-- create: person2') person2 = ExPerson('<NAME>', 42,", "= age self.skills = skills def __repr__(self): return 'Person(%r, %r,", "ExPerson') ExPerson = dlite.classfactory(Person, url=url) print('-- create: person1') person1 =", "class Person: def __init__(self, name, age, skills): self.name = name", "os import dlite thisdir = os.path.abspath(os.path.dirname(__file__)) class Person: def __init__(self,", "os.path.abspath(os.path.dirname(__file__)) class Person: def __init__(self, name, age, skills): self.name =", "age self.skills = skills def __repr__(self): return 'Person(%r, %r, %r)'", "'json://' + thisdir + '/Person.json' print('-- create: ExPerson') ExPerson =", "url=url) print('-- create: person1') person1 = Person('<NAME>', 42, ['distilling', 'tasting'])", "create: person1') person1 = Person('<NAME>', 42, ['distilling', 'tasting']) print('-- create:", "print('-- create: person1') person1 = Person('<NAME>', 42, ['distilling', 'tasting']) print('--", "import os import dlite thisdir = os.path.abspath(os.path.dirname(__file__)) class Person: def", "Person: def __init__(self, name, age, skills): self.name = name self.age", "url = 'json://' + thisdir + '/Person.json' print('-- create: ExPerson')", "print('-- create: ExPerson') ExPerson = dlite.classfactory(Person, url=url) print('-- create: person1')", "import dlite thisdir = os.path.abspath(os.path.dirname(__file__)) class Person: def __init__(self, name,", "self.skills = skills def __repr__(self): return 'Person(%r, %r, %r)' %", "def __repr__(self): return 'Person(%r, %r, %r)' % (self.name, self.age, list(self.skills))", "python # -*- coding: utf-8 -*- import os import dlite", "self.name = name self.age = age self.skills = skills def", "__repr__(self): return 'Person(%r, %r, %r)' % (self.name, self.age, list(self.skills)) url", "= ExPerson('<NAME>', 42, ['distilling', 'tasting']) person2.dlite_inst.save('json', 'persons.json', 'mode=w') # Print", "person2 using dlite print(person2.dlite_inst.asjson(indent=2)) person3 = dlite.loadfactory(Person, 'json://persons.json') person4 =", "person2.dlite_inst.save('json', 'persons.json', 'mode=w') # Print json-representation of person2 using dlite", "self.age = age self.skills = skills def __repr__(self): return 'Person(%r,", "create: person2') person2 = ExPerson('<NAME>', 42, ['distilling', 'tasting']) person2.dlite_inst.save('json', 'persons.json',", "'mode=w') # Print json-representation of person2 using dlite print(person2.dlite_inst.asjson(indent=2)) person3", "-*- import os import dlite thisdir = os.path.abspath(os.path.dirname(__file__)) class Person:", "<reponame>pscff/dlite<gh_stars>1-10 #!/usr/bin/env python # -*- coding: utf-8 -*- import os", "create: ExPerson') ExPerson = dlite.classfactory(Person, url=url) print('-- create: person1') person1", "of person2 using dlite print(person2.dlite_inst.asjson(indent=2)) person3 = dlite.loadfactory(Person, 'json://persons.json') person4", "dlite thisdir = os.path.abspath(os.path.dirname(__file__)) class Person: def __init__(self, name, age,", "% (self.name, self.age, list(self.skills)) url = 'json://' + thisdir +", "= 'json://' + thisdir + '/Person.json' print('-- create: ExPerson') ExPerson", "#!/usr/bin/env python # -*- coding: utf-8 -*- import os import", "= dlite.classfactory(Person, url=url) print('-- create: person1') person1 = Person('<NAME>', 42,", "person1') person1 = Person('<NAME>', 42, ['distilling', 'tasting']) print('-- create: person2')", "['distilling', 'tasting']) person2.dlite_inst.save('json', 'persons.json', 'mode=w') # Print json-representation of person2", "%r)' % (self.name, self.age, list(self.skills)) url = 'json://' + thisdir", "list(self.skills)) url = 'json://' + thisdir + '/Person.json' print('-- create:", "self.age, list(self.skills)) url = 'json://' + thisdir + '/Person.json' print('--", "person2') person2 = ExPerson('<NAME>', 42, ['distilling', 'tasting']) person2.dlite_inst.save('json', 'persons.json', 'mode=w')", "using dlite print(person2.dlite_inst.asjson(indent=2)) person3 = dlite.loadfactory(Person, 'json://persons.json') person4 = dlite.objectfactory(person1,", "= os.path.abspath(os.path.dirname(__file__)) class Person: def __init__(self, name, age, skills): self.name", "thisdir + '/Person.json' print('-- create: ExPerson') ExPerson = dlite.classfactory(Person, url=url)", "%r, %r)' % (self.name, self.age, list(self.skills)) url = 'json://' +", "# -*- coding: utf-8 -*- import os import dlite thisdir", "# Print json-representation of person2 using dlite print(person2.dlite_inst.asjson(indent=2)) person3 =", "dlite.classfactory(Person, url=url) print('-- create: person1') person1 = Person('<NAME>', 42, ['distilling',", "Person('<NAME>', 42, ['distilling', 'tasting']) print('-- create: person2') person2 = ExPerson('<NAME>',", "name, age, skills): self.name = name self.age = age self.skills", "'tasting']) person2.dlite_inst.save('json', 'persons.json', 'mode=w') # Print json-representation of person2 using", "print('-- create: person2') person2 = ExPerson('<NAME>', 42, ['distilling', 'tasting']) person2.dlite_inst.save('json',", "-*- coding: utf-8 -*- import os import dlite thisdir =", "return 'Person(%r, %r, %r)' % (self.name, self.age, list(self.skills)) url =", "dlite print(person2.dlite_inst.asjson(indent=2)) person3 = dlite.loadfactory(Person, 'json://persons.json') person4 = dlite.objectfactory(person1, meta=person2.dlite_meta)", "person2 = ExPerson('<NAME>', 42, ['distilling', 'tasting']) person2.dlite_inst.save('json', 'persons.json', 'mode=w') #", "name self.age = age self.skills = skills def __repr__(self): return", "= name self.age = age self.skills = skills def __repr__(self):", "42, ['distilling', 'tasting']) person2.dlite_inst.save('json', 'persons.json', 'mode=w') # Print json-representation of", "'persons.json', 'mode=w') # Print json-representation of person2 using dlite print(person2.dlite_inst.asjson(indent=2))", "ExPerson('<NAME>', 42, ['distilling', 'tasting']) person2.dlite_inst.save('json', 'persons.json', 'mode=w') # Print json-representation", "skills def __repr__(self): return 'Person(%r, %r, %r)' % (self.name, self.age,", "utf-8 -*- import os import dlite thisdir = os.path.abspath(os.path.dirname(__file__)) class", "+ '/Person.json' print('-- create: ExPerson') ExPerson = dlite.classfactory(Person, url=url) print('--", "Print json-representation of person2 using dlite print(person2.dlite_inst.asjson(indent=2)) person3 = dlite.loadfactory(Person," ]
[ "있다고 하자. 1 2 -4 5 3 -2 9 -10", "-4 5 3 -2 9 -10 이 때, 연속 부분이란", "작성하세요. ''' dp = [0] * len(data) dp[0] = data[0]", "최대화 하는 프로그램을 작성하시오. 예를 들어, 다음과 같이 8개의 숫자가", "개수는 최대 100개입니다. ''' import sys def getSubsum(data) : '''", "[5, 3, -2, 9], [9, -10] 등이 있을 수 있다.", "반환하는 함수를 작성하세요. ''' dp = [0] * len(data) dp[0]", "예를 들어, 다음과 같이 8개의 숫자가 있다고 하자. 1 2", "중에서 가장 합이 큰 연속 부분은 [5, 3, -2, 9]", "합이 큰 연속 부분은 [5, 3, -2, 9] 이며, 이보다", "수의 개수는 최대 100개입니다. ''' import sys def getSubsum(data) :", "2 -4 5 3 -2 9 -10 출력 예시 15", "가장 합이 큰 연속 부분은 [5, 3, -2, 9] 이며,", "9 -10 출력 예시 15 문제 조건 입력되는 수의 개수는", "함수를 작성하세요. ''' dp = [0] * len(data) dp[0] =", "= max(dp[i-1] + data[i], data[i]) return max(dp) def main(): '''", "3 -2 9 -10 이 때, 연속 부분이란 연속하여 숫자를", "9 -10 이 때, 연속 부분이란 연속하여 숫자를 선택하는 것을", "-2 9 -10 이 때, 연속 부분이란 연속하여 숫자를 선택하는", "''' 이 부분은 수정하지 마세요. ''' data = [int(x) for", "말한다. 가능한 연속 부분으로써 [1, 2, -4], [5, 3, -2,", "조건 입력되는 수의 개수는 최대 100개입니다. ''' import sys def", "이 부분은 수정하지 마세요. ''' data = [int(x) for x", "100개입니다. ''' import sys def getSubsum(data) : ''' n개의 숫자가", "15 문제 조건 입력되는 수의 개수는 최대 100개입니다. ''' import", "[1, 2, -4], [5, 3, -2, 9], [9, -10] 등이", "연속 부분을 선택하여 그 합을 최대화 하는 프로그램을 작성하시오. 예를", "합을 최대화 하는 프로그램을 작성하시오. 예를 들어, 다음과 같이 8개의", "부분 최대합을 반환하는 함수를 작성하세요. ''' dp = [0] *", "등이 있을 수 있다. 이 연속 부분들 중에서 가장 합이", "따라서 연속 부분 최대합은 5+3+(-2)+9 = 15 이다. 입력 예시", "할 수는 없다. 따라서 연속 부분 최대합은 5+3+(-2)+9 = 15", "다음과 같이 8개의 숫자가 있다고 하자. 1 2 -4 5", "크게 할 수는 없다. 따라서 연속 부분 최대합은 5+3+(-2)+9 =", "연속 부분이란 연속하여 숫자를 선택하는 것을 말한다. 가능한 연속 부분으로써", "import sys def getSubsum(data) : ''' n개의 숫자가 list로 주어질", "max(dp) def main(): ''' 이 부분은 수정하지 마세요. ''' data", "max(dp[i-1] + data[i], data[i]) return max(dp) def main(): ''' 이", "부분 최대합은 5+3+(-2)+9 = 15 이다. 입력 예시 1 2", "주어질 때, 연속 부분을 선택하여 그 합을 최대화 하는 프로그램을", "for i in range(1, len(data)): dp[i] = max(dp[i-1] + data[i],", "부분들 중에서 가장 합이 큰 연속 부분은 [5, 3, -2,", "부분을 선택하여 그 합을 최대화 하는 프로그램을 작성하시오. 예를 들어,", "8개의 숫자가 있다고 하자. 1 2 -4 5 3 -2", "때, 연속 부분이란 연속하여 숫자를 선택하는 것을 말한다. 가능한 연속", "작성하시오. 예를 들어, 다음과 같이 8개의 숫자가 있다고 하자. 1", "최대합은 5+3+(-2)+9 = 15 이다. 입력 예시 1 2 -4", "숫자가 있다고 하자. 1 2 -4 5 3 -2 9", "이다. 입력 예시 1 2 -4 5 3 -2 9", "list로 주어질 때, 그 연속 부분 최대합을 반환하는 함수를 작성하세요.", "dp[i] = max(dp[i-1] + data[i], data[i]) return max(dp) def main():", "5 3 -2 9 -10 출력 예시 15 문제 조건", "data[i]) return max(dp) def main(): ''' 이 부분은 수정하지 마세요.", "그 합을 최대화 하는 프로그램을 작성하시오. 예를 들어, 다음과 같이", "9], [9, -10] 등이 있을 수 있다. 이 연속 부분들", "부분은 [5, 3, -2, 9] 이며, 이보다 더 합을 크게", "연속 부분 최대합은 5+3+(-2)+9 = 15 이다. 입력 예시 1", "프로그램을 작성하시오. 예를 들어, 다음과 같이 8개의 숫자가 있다고 하자.", "getSubsum(data) : ''' n개의 숫자가 list로 주어질 때, 그 연속", "data[0] for i in range(1, len(data)): dp[i] = max(dp[i-1] +", "-4], [5, 3, -2, 9], [9, -10] 등이 있을 수", "''' n개의 숫자가 list로 주어질 때, 그 연속 부분 최대합을", "-2, 9] 이며, 이보다 더 합을 크게 할 수는 없다.", "-4 5 3 -2 9 -10 출력 예시 15 문제", "9] 이며, 이보다 더 합을 크게 할 수는 없다. 따라서", "합을 크게 할 수는 없다. 따라서 연속 부분 최대합은 5+3+(-2)+9", "이 연속 부분들 중에서 가장 합이 큰 연속 부분은 [5,", "연속 부분으로써 [1, 2, -4], [5, 3, -2, 9], [9,", "연속 부분 최대합 nn개의 숫자가 주어질 때, 연속 부분을 선택하여", "-2 9 -10 출력 예시 15 문제 조건 입력되는 수의", "하는 프로그램을 작성하시오. 예를 들어, 다음과 같이 8개의 숫자가 있다고", "부분이란 연속하여 숫자를 선택하는 것을 말한다. 가능한 연속 부분으로써 [1,", "최대 100개입니다. ''' import sys def getSubsum(data) : ''' n개의", "-10 출력 예시 15 문제 조건 입력되는 수의 개수는 최대", "15 이다. 입력 예시 1 2 -4 5 3 -2", ": ''' n개의 숫자가 list로 주어질 때, 그 연속 부분", "data = [int(x) for x in input().split()] print(getSubsum(data)) if __name__", "-10 이 때, 연속 부분이란 연속하여 숫자를 선택하는 것을 말한다.", "이보다 더 합을 크게 할 수는 없다. 따라서 연속 부분", "n개의 숫자가 list로 주어질 때, 그 연속 부분 최대합을 반환하는", "1 2 -4 5 3 -2 9 -10 출력 예시", "[9, -10] 등이 있을 수 있다. 이 연속 부분들 중에서", "5 3 -2 9 -10 이 때, 연속 부분이란 연속하여", "있을 수 있다. 이 연속 부분들 중에서 가장 합이 큰", "입력되는 수의 개수는 최대 100개입니다. ''' import sys def getSubsum(data)", "최대합을 반환하는 함수를 작성하세요. ''' dp = [0] * len(data)", "5+3+(-2)+9 = 15 이다. 입력 예시 1 2 -4 5", "return max(dp) def main(): ''' 이 부분은 수정하지 마세요. '''", "''' 연속 부분 최대합 nn개의 숫자가 주어질 때, 연속 부분을", "문제 조건 입력되는 수의 개수는 최대 100개입니다. ''' import sys", "* len(data) dp[0] = data[0] for i in range(1, len(data)):", "i in range(1, len(data)): dp[i] = max(dp[i-1] + data[i], data[i])", "예시 15 문제 조건 입력되는 수의 개수는 최대 100개입니다. '''", "이며, 이보다 더 합을 크게 할 수는 없다. 따라서 연속", "+ data[i], data[i]) return max(dp) def main(): ''' 이 부분은", "nn개의 숫자가 주어질 때, 연속 부분을 선택하여 그 합을 최대화", "3, -2, 9], [9, -10] 등이 있을 수 있다. 이", "그 연속 부분 최대합을 반환하는 함수를 작성하세요. ''' dp =", "range(1, len(data)): dp[i] = max(dp[i-1] + data[i], data[i]) return max(dp)", "수정하지 마세요. ''' data = [int(x) for x in input().split()]", "-10] 등이 있을 수 있다. 이 연속 부분들 중에서 가장", "-2, 9], [9, -10] 등이 있을 수 있다. 이 연속", "가능한 연속 부분으로써 [1, 2, -4], [5, 3, -2, 9],", "숫자가 주어질 때, 연속 부분을 선택하여 그 합을 최대화 하는", "dp = [0] * len(data) dp[0] = data[0] for i", "1 2 -4 5 3 -2 9 -10 이 때,", "때, 그 연속 부분 최대합을 반환하는 함수를 작성하세요. ''' dp", "in range(1, len(data)): dp[i] = max(dp[i-1] + data[i], data[i]) return", "def main(): ''' 이 부분은 수정하지 마세요. ''' data =", "선택하여 그 합을 최대화 하는 프로그램을 작성하시오. 예를 들어, 다음과", "main(): ''' 이 부분은 수정하지 마세요. ''' data = [int(x)", "연속 부분들 중에서 가장 합이 큰 연속 부분은 [5, 3,", "하자. 1 2 -4 5 3 -2 9 -10 이", "연속 부분은 [5, 3, -2, 9] 이며, 이보다 더 합을", "= [int(x) for x in input().split()] print(getSubsum(data)) if __name__ ==", "큰 연속 부분은 [5, 3, -2, 9] 이며, 이보다 더", "선택하는 것을 말한다. 가능한 연속 부분으로써 [1, 2, -4], [5,", "연속 부분 최대합을 반환하는 함수를 작성하세요. ''' dp = [0]", "''' dp = [0] * len(data) dp[0] = data[0] for", "len(data) dp[0] = data[0] for i in range(1, len(data)): dp[i]", "= data[0] for i in range(1, len(data)): dp[i] = max(dp[i-1]", "같이 8개의 숫자가 있다고 하자. 1 2 -4 5 3", "''' data = [int(x) for x in input().split()] print(getSubsum(data)) if", "최대합 nn개의 숫자가 주어질 때, 연속 부분을 선택하여 그 합을", "있다. 이 연속 부분들 중에서 가장 합이 큰 연속 부분은", "더 합을 크게 할 수는 없다. 따라서 연속 부분 최대합은", "for x in input().split()] print(getSubsum(data)) if __name__ == \"__main__\": main()", "없다. 따라서 연속 부분 최대합은 5+3+(-2)+9 = 15 이다. 입력", "3 -2 9 -10 출력 예시 15 문제 조건 입력되는", "부분 최대합 nn개의 숫자가 주어질 때, 연속 부분을 선택하여 그", "숫자를 선택하는 것을 말한다. 가능한 연속 부분으로써 [1, 2, -4],", "들어, 다음과 같이 8개의 숫자가 있다고 하자. 1 2 -4", "= [0] * len(data) dp[0] = data[0] for i in", "2, -4], [5, 3, -2, 9], [9, -10] 등이 있을", "입력 예시 1 2 -4 5 3 -2 9 -10", "[5, 3, -2, 9] 이며, 이보다 더 합을 크게 할", "때, 연속 부분을 선택하여 그 합을 최대화 하는 프로그램을 작성하시오.", "예시 1 2 -4 5 3 -2 9 -10 출력", "이 때, 연속 부분이란 연속하여 숫자를 선택하는 것을 말한다. 가능한", "3, -2, 9] 이며, 이보다 더 합을 크게 할 수는", "= 15 이다. 입력 예시 1 2 -4 5 3", "부분은 수정하지 마세요. ''' data = [int(x) for x in", "dp[0] = data[0] for i in range(1, len(data)): dp[i] =", "sys def getSubsum(data) : ''' n개의 숫자가 list로 주어질 때,", "것을 말한다. 가능한 연속 부분으로써 [1, 2, -4], [5, 3,", "수는 없다. 따라서 연속 부분 최대합은 5+3+(-2)+9 = 15 이다.", "출력 예시 15 문제 조건 입력되는 수의 개수는 최대 100개입니다.", "[0] * len(data) dp[0] = data[0] for i in range(1,", "''' import sys def getSubsum(data) : ''' n개의 숫자가 list로", "부분으로써 [1, 2, -4], [5, 3, -2, 9], [9, -10]", "[int(x) for x in input().split()] print(getSubsum(data)) if __name__ == \"__main__\":", "2 -4 5 3 -2 9 -10 이 때, 연속", "마세요. ''' data = [int(x) for x in input().split()] print(getSubsum(data))", "연속하여 숫자를 선택하는 것을 말한다. 가능한 연속 부분으로써 [1, 2,", "def getSubsum(data) : ''' n개의 숫자가 list로 주어질 때, 그", "len(data)): dp[i] = max(dp[i-1] + data[i], data[i]) return max(dp) def", "숫자가 list로 주어질 때, 그 연속 부분 최대합을 반환하는 함수를", "수 있다. 이 연속 부분들 중에서 가장 합이 큰 연속", "주어질 때, 그 연속 부분 최대합을 반환하는 함수를 작성하세요. '''", "data[i], data[i]) return max(dp) def main(): ''' 이 부분은 수정하지" ]
[ "DNSTest(unittest.TestCase): def test_build_packet(self): data = b\"^4\\x01\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x06google\\x03com\\x00\\x00\\x01\\x00\\x01\" packet = dns.build_packet(data, \"192.168.0.1\")", "class DNSTest(unittest.TestCase): def test_build_packet(self): data = b\"^4\\x01\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x06google\\x03com\\x00\\x00\\x01\\x00\\x01\" packet = dns.build_packet(data,", "test_build_packet(self): data = b\"^4\\x01\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x06google\\x03com\\x00\\x00\\x01\\x00\\x01\" packet = dns.build_packet(data, \"192.168.0.1\") expeced_result =", "def test_build_packet(self): data = b\"^4\\x01\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x06google\\x03com\\x00\\x00\\x01\\x00\\x01\" packet = dns.build_packet(data, \"192.168.0.1\") expeced_result", "#!/usr/bin/env python3 import unittest from mockdock import dns class DNSTest(unittest.TestCase):", "unittest from mockdock import dns class DNSTest(unittest.TestCase): def test_build_packet(self): data", "<reponame>jensstein/mockdock #!/usr/bin/env python3 import unittest from mockdock import dns class", "python3 import unittest from mockdock import dns class DNSTest(unittest.TestCase): def", "data = b\"^4\\x01\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x06google\\x03com\\x00\\x00\\x01\\x00\\x01\" packet = dns.build_packet(data, \"192.168.0.1\") expeced_result = b\"^4\\x81\\x80\\x00\\x01\\x00\\x01\\x00\\x00\\x00\\x00\\x06google\\x03com\\x00\\x00\\x01\\x00\\x01\\xc0\\x0c\\x00\\x01\\x00\\x01\\x00\\x00\\x00<\\x00\\x04\\xc0\\xa8\\x00\\x01\"", "= b\"^4\\x01\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x06google\\x03com\\x00\\x00\\x01\\x00\\x01\" packet = dns.build_packet(data, \"192.168.0.1\") expeced_result = b\"^4\\x81\\x80\\x00\\x01\\x00\\x01\\x00\\x00\\x00\\x00\\x06google\\x03com\\x00\\x00\\x01\\x00\\x01\\xc0\\x0c\\x00\\x01\\x00\\x01\\x00\\x00\\x00<\\x00\\x04\\xc0\\xa8\\x00\\x01\" self.assertEqual(packet,", "from mockdock import dns class DNSTest(unittest.TestCase): def test_build_packet(self): data =", "mockdock import dns class DNSTest(unittest.TestCase): def test_build_packet(self): data = b\"^4\\x01\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x06google\\x03com\\x00\\x00\\x01\\x00\\x01\"", "import dns class DNSTest(unittest.TestCase): def test_build_packet(self): data = b\"^4\\x01\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x06google\\x03com\\x00\\x00\\x01\\x00\\x01\" packet", "dns class DNSTest(unittest.TestCase): def test_build_packet(self): data = b\"^4\\x01\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x06google\\x03com\\x00\\x00\\x01\\x00\\x01\" packet =", "b\"^4\\x01\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x06google\\x03com\\x00\\x00\\x01\\x00\\x01\" packet = dns.build_packet(data, \"192.168.0.1\") expeced_result = b\"^4\\x81\\x80\\x00\\x01\\x00\\x01\\x00\\x00\\x00\\x00\\x06google\\x03com\\x00\\x00\\x01\\x00\\x01\\xc0\\x0c\\x00\\x01\\x00\\x01\\x00\\x00\\x00<\\x00\\x04\\xc0\\xa8\\x00\\x01\" self.assertEqual(packet, expeced_result)", "import unittest from mockdock import dns class DNSTest(unittest.TestCase): def test_build_packet(self):" ]
[ "conf home so user conf will not be used os.environ[\"XDG_CONFIG_HOME\"]", "language governing permissions and limitations under the License. \"\"\" \"\"\"", "Unless required by applicable law or agreed to in writing,", "by applicable law or agreed to in writing, software distributed", "software distributed under the License is distributed on an \"AS", "distributed under the License is distributed on an \"AS IS\"", "os.path.dirname(package_root) default_config = os.path.join(package_root, \"esclirc\") yield default_config @pytest.fixture(scope=\"session\", autouse=True) def", "this file to make them accessible across multiple test modules.", "test modules. \"\"\" import os import pytest from utils import", "yield test_connection delete_index(test_connection) @pytest.fixture(scope=\"function\") def default_config_location(): from escli.conf import __file__", "@pytest.fixture(scope=\"function\") def default_config_location(): from escli.conf import __file__ as package_root package_root", "CONDITIONS OF ANY KIND, either express or implied. See the", "limitations under the License. \"\"\" \"\"\" We can define the", "\"esclirc\") yield default_config @pytest.fixture(scope=\"session\", autouse=True) def temp_config(tmpdir_factory): # this function", "We can define the fixture functions in this file to", "Version 2.0 (the \"License\"); you may not use this file", "def connection(): test_connection = get_connection() create_index(test_connection) yield test_connection delete_index(test_connection) @pytest.fixture(scope=\"function\")", "writing, software distributed under the License is distributed on an", "make them accessible across multiple test modules. \"\"\" import os", "test_connection = get_connection() create_index(test_connection) yield test_connection delete_index(test_connection) @pytest.fixture(scope=\"function\") def default_config_location():", "not use this file except in compliance with the License.", "2.0 (the \"License\"); you may not use this file except", "2019, Amazon Web Services Inc. Licensed under the Apache License,", "Apache License, Version 2.0 (the \"License\"); you may not use", "copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "express or implied. See the License for the specific language", "yield default_config @pytest.fixture(scope=\"session\", autouse=True) def temp_config(tmpdir_factory): # this function runs", "def temp_config(tmpdir_factory): # this function runs on start of test", "Inc. Licensed under the Apache License, Version 2.0 (the \"License\");", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "can define the fixture functions in this file to make", "in compliance with the License. You may obtain a copy", "import create_index, delete_index, get_connection @pytest.fixture(scope=\"function\") def connection(): test_connection = get_connection()", "of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "you may not use this file except in compliance with", "= os.path.dirname(package_root) default_config = os.path.join(package_root, \"esclirc\") yield default_config @pytest.fixture(scope=\"session\", autouse=True)", "from utils import create_index, delete_index, get_connection @pytest.fixture(scope=\"function\") def connection(): test_connection", "create_index, delete_index, get_connection @pytest.fixture(scope=\"function\") def connection(): test_connection = get_connection() create_index(test_connection)", "start of test session. # use temporary directory for conf", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "the License. You may obtain a copy of the License", "agreed to in writing, software distributed under the License is", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "License. \"\"\" \"\"\" We can define the fixture functions in", "session. # use temporary directory for conf home so user", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "default_config_location(): from escli.conf import __file__ as package_root package_root = os.path.dirname(package_root)", "temp_config(tmpdir_factory): # this function runs on start of test session.", "use this file except in compliance with the License. You", "\"\"\" We can define the fixture functions in this file", "the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "autouse=True) def temp_config(tmpdir_factory): # this function runs on start of", "ANY KIND, either express or implied. See the License for", "file to make them accessible across multiple test modules. \"\"\"", "import pytest from utils import create_index, delete_index, get_connection @pytest.fixture(scope=\"function\") def", "http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "utils import create_index, delete_index, get_connection @pytest.fixture(scope=\"function\") def connection(): test_connection =", "may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless", "define the fixture functions in this file to make them", "obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required", "across multiple test modules. \"\"\" import os import pytest from", "get_connection() create_index(test_connection) yield test_connection delete_index(test_connection) @pytest.fixture(scope=\"function\") def default_config_location(): from escli.conf", "either express or implied. See the License for the specific", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "runs on start of test session. # use temporary directory", "os import pytest from utils import create_index, delete_index, get_connection @pytest.fixture(scope=\"function\")", "under the License is distributed on an \"AS IS\" BASIS,", "\"License\"); you may not use this file except in compliance", "of test session. # use temporary directory for conf home", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "with the License. You may obtain a copy of the", "__file__ as package_root package_root = os.path.dirname(package_root) default_config = os.path.join(package_root, \"esclirc\")", "under the License. \"\"\" \"\"\" We can define the fixture", "functions in this file to make them accessible across multiple", "License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "function runs on start of test session. # use temporary", "License for the specific language governing permissions and limitations under", "Web Services Inc. Licensed under the Apache License, Version 2.0", "get_connection @pytest.fixture(scope=\"function\") def connection(): test_connection = get_connection() create_index(test_connection) yield test_connection", "escli.conf import __file__ as package_root package_root = os.path.dirname(package_root) default_config =", "<filename>tests/conftest.py \"\"\" Copyright 2019, Amazon Web Services Inc. Licensed under", "import __file__ as package_root package_root = os.path.dirname(package_root) default_config = os.path.join(package_root,", "default_config @pytest.fixture(scope=\"session\", autouse=True) def temp_config(tmpdir_factory): # this function runs on", "os.path.join(package_root, \"esclirc\") yield default_config @pytest.fixture(scope=\"session\", autouse=True) def temp_config(tmpdir_factory): # this", "package_root package_root = os.path.dirname(package_root) default_config = os.path.join(package_root, \"esclirc\") yield default_config", "this file except in compliance with the License. You may", "default_config = os.path.join(package_root, \"esclirc\") yield default_config @pytest.fixture(scope=\"session\", autouse=True) def temp_config(tmpdir_factory):", "the License. \"\"\" \"\"\" We can define the fixture functions", "specific language governing permissions and limitations under the License. \"\"\"", "(the \"License\"); you may not use this file except in", "use temporary directory for conf home so user conf will", "on start of test session. # use temporary directory for", "@pytest.fixture(scope=\"function\") def connection(): test_connection = get_connection() create_index(test_connection) yield test_connection delete_index(test_connection)", "\"\"\" Copyright 2019, Amazon Web Services Inc. Licensed under the", "test_connection delete_index(test_connection) @pytest.fixture(scope=\"function\") def default_config_location(): from escli.conf import __file__ as", "# use temporary directory for conf home so user conf", "fixture functions in this file to make them accessible across", "applicable law or agreed to in writing, software distributed under", "connection(): test_connection = get_connection() create_index(test_connection) yield test_connection delete_index(test_connection) @pytest.fixture(scope=\"function\") def", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "test session. # use temporary directory for conf home so", "so user conf will not be used os.environ[\"XDG_CONFIG_HOME\"] = str(tmpdir_factory.mktemp(\"data\"))", "You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0", "the specific language governing permissions and limitations under the License.", "package_root = os.path.dirname(package_root) default_config = os.path.join(package_root, \"esclirc\") yield default_config @pytest.fixture(scope=\"session\",", "= get_connection() create_index(test_connection) yield test_connection delete_index(test_connection) @pytest.fixture(scope=\"function\") def default_config_location(): from", "\"\"\" \"\"\" We can define the fixture functions in this", "pytest from utils import create_index, delete_index, get_connection @pytest.fixture(scope=\"function\") def connection():", "the Apache License, Version 2.0 (the \"License\"); you may not", "file except in compliance with the License. You may obtain", "Amazon Web Services Inc. Licensed under the Apache License, Version", "except in compliance with the License. You may obtain a", "KIND, either express or implied. See the License for the", "or implied. See the License for the specific language governing", "the fixture functions in this file to make them accessible", "\"\"\" import os import pytest from utils import create_index, delete_index,", "to in writing, software distributed under the License is distributed", "= os.path.join(package_root, \"esclirc\") yield default_config @pytest.fixture(scope=\"session\", autouse=True) def temp_config(tmpdir_factory): #", "modules. \"\"\" import os import pytest from utils import create_index,", "or agreed to in writing, software distributed under the License", "directory for conf home so user conf will not be", "law or agreed to in writing, software distributed under the", "OR CONDITIONS OF ANY KIND, either express or implied. See", "import os import pytest from utils import create_index, delete_index, get_connection", "compliance with the License. You may obtain a copy of", "create_index(test_connection) yield test_connection delete_index(test_connection) @pytest.fixture(scope=\"function\") def default_config_location(): from escli.conf import", "OF ANY KIND, either express or implied. See the License", "under the Apache License, Version 2.0 (the \"License\"); you may", "governing permissions and limitations under the License. \"\"\" \"\"\" We", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "Copyright 2019, Amazon Web Services Inc. Licensed under the Apache", "License, Version 2.0 (the \"License\"); you may not use this", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "for the specific language governing permissions and limitations under the", "See the License for the specific language governing permissions and", "a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by", "delete_index, get_connection @pytest.fixture(scope=\"function\") def connection(): test_connection = get_connection() create_index(test_connection) yield", "multiple test modules. \"\"\" import os import pytest from utils", "home so user conf will not be used os.environ[\"XDG_CONFIG_HOME\"] =", "# this function runs on start of test session. #", "temporary directory for conf home so user conf will not", "@pytest.fixture(scope=\"session\", autouse=True) def temp_config(tmpdir_factory): # this function runs on start", "as package_root package_root = os.path.dirname(package_root) default_config = os.path.join(package_root, \"esclirc\") yield", "License. You may obtain a copy of the License at", "the License for the specific language governing permissions and limitations", "may not use this file except in compliance with the", "in writing, software distributed under the License is distributed on", "required by applicable law or agreed to in writing, software", "def default_config_location(): from escli.conf import __file__ as package_root package_root =", "implied. See the License for the specific language governing permissions", "this function runs on start of test session. # use", "and limitations under the License. \"\"\" \"\"\" We can define", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "them accessible across multiple test modules. \"\"\" import os import", "in this file to make them accessible across multiple test", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "to make them accessible across multiple test modules. \"\"\" import", "permissions and limitations under the License. \"\"\" \"\"\" We can", "for conf home so user conf will not be used", "Services Inc. Licensed under the Apache License, Version 2.0 (the", "delete_index(test_connection) @pytest.fixture(scope=\"function\") def default_config_location(): from escli.conf import __file__ as package_root", "accessible across multiple test modules. \"\"\" import os import pytest", "from escli.conf import __file__ as package_root package_root = os.path.dirname(package_root) default_config" ]
[ "== guild_name.lower(): guild = g break if str(g.id) == str(guild_name):", "Region\", value=guild.region, inline=True) server_embed.add_field(name=\"Considered Large\", value=guild.large, inline=True) # Find out", "import asyncio import discord from datetime import datetime from operator", "all servers I'm connected to.\"\"\" message = await Message.EmbedText(title=\"Counting users...\",", "= '__**First {} of {} Servers I Joined:**__\\n\\n'.format(number, len(joinedList))+msg else:", "message(self, message): # Check the message and see if we", "server in self.bot.guilds: serverList.append({ 'Name' : server.name, 'Users' : len(server.members)", "number : int = 10): \"\"\"Lists the most recent users", "'__**First {} Servers I Joined:**__\\n\\n'.format(len(joinedList))+msg # Check for suppress if", "online ({:,g}%)\".format( bot_online, bot_member, b_string, round((bot_online/bot_member)*100, 2) ) #server_embed.add_field(name=\"Members\", value=\"{:,}/{:,}", "== discord.Status.offline: online_members += 1 # bot_percent = \"{:,g}%\".format((bot_member/len(guild.members))*100) user_string", "messages I've seen on this sever so far. (only applies", "= \"{:,}/{:,} online ({:,g}%)\".format( online_members, len(guild.members) - bot_member, round((online_members/(len(guild.members) -", ": guild.me.joined_at } total = len(joinedList) position = joinedList.index(check_item) +", "msg = Nullify.clean(msg) await ctx.send(msg) return member = member_check if", "color=ctx.message.author).edit(ctx, message) '''userCount = 0 serverCount = 0 counted_users =", "guilds by join date joinedList = sorted(joinedList, key=lambda x:x['Joined']) popList", "join date joinedList = sorted(joinedList, key=lambda x:x['Joined'], reverse=True) i =", "\"Joined\" : member.joined_at } total = len(joinedList) position = joinedList.index(check_item)", "and deps settings = bot.get_cog(\"Settings\") bot.add_cog(ServerStats(bot, settings)) class ServerStats: def", "50 if number < 1: await ctx.channel.send('Oookay - look! No", "= '__**Top {} of {} Servers:**__\\n\\n'.format(number, len(serverList))+msg else: msg =", "if len(guild.icon_url): server_embed.set_thumbnail(url=guild.icon_url) else: # No Icon server_embed.set_thumbnail(url=ctx.author.default_avatar_url) server_embed.set_footer(text=\"Server ID:", "x:x['Joined']) popList = sorted(popList, key=lambda x:x['Population'], reverse=True) check_item = {", "else: msg = '__**Top {} Servers:**__\\n\\n'.format(len(serverList))+msg # Check for suppress", "if number < 1: await ctx.channel.send('Oookay - look! No servers!", ": member.joined_at }) # sort the users by join date", "out where in our join position this server is joinedList", "= g break if str(g.id) == str(guild_name): guild = g", "= joinedList.index(check_item) + 1 before = \"\" after = \"\"", "2) ) b_string = \"bot\" if bot_member == 1 else", "len(counted_bots), round(len(counted_bots)/bots*100, 2)), \"inline\" : False}, { \"name\" : \"Total\",", "len(server.members) for member in server.members: if not member.id in counted_users:", "after msg += \"\\n\\n{} joined after.\".format(after) await ctx.send(msg) @commands.command(pass_context=True) async", "couldn't find that guild...\") return server_embed = discord.Embed(color=ctx.author.color) server_embed.title =", "guild.owner.discriminator, inline=True) server_embed.add_field(name=\"AFK Channel\", value=guild.afk_channel, inline=True) server_embed.add_field(name=\"Verification\", value=guild.verification_level, inline=True) server_embed.add_field(name=\"Voice", "inception, and if I'm online)\"\"\" messages = 0 for guild", "you wanted!') return joinedList = [] for member in ctx.message.guild.members:", "{:,}/{:,} online ({:,g}%) - {:,} unique ({:,g}%)\".format(botsOnline, bots, round((botsOnline/bots)*100, 2),", "== None: messages = 0 if messages == 1: await", "fields=[ { \"name\" : \"Servers\", \"value\" : \"└─ {:,}\".format(servers), \"inline\"", "number = len(serverList) i = 1 msg = '' for", "wanted!') return serverList = [] for server in self.bot.guilds: memberCount", "ctx.channel.send(msg) @commands.command(pass_context=True) async def bottomservers(self, ctx, number : int =", "discord.Status.offline: membersOnline += 1 await Message.Embed( title=\"Member Stats\", description=\"Current User", "joined after.\".format(after) await ctx.send(msg) @commands.command(pass_context=True) async def firstjoins(self, ctx, number", "to Join:**__\\n\\n'.format(len(joinedList))+msg # Check for suppress if suppress: msg =", "position = joinedList.index(check_item) + 1 server_embed.add_field(name=\"Join Position\", value=\"{:,} of {:,}\".format(position,", "where in our join position this server is joinedList =", "after = \"\" msg = \"*{}'s* join position is **{:,}**.\".format(DisplayName.name(member),", "temp = 0 if self.settings.getServerStat(guild, \"TotalMessages\") is None else self.settings.getServerStat(guild,", "ctx.message.guild.members: joinedList.append({ 'ID' : mem.id, 'Joined' : mem.joined_at }) #", "msg = '__**First {} Members to Join:**__\\n\\n'.format(len(joinedList))+msg # Check for", "ctx.channel.send('So far, I\\'ve witnessed *{:,} message across all servers!*'.format(messages)) else:", "< len(joinedList): msg = '__**First {} of {} Servers I", "number > len(serverList): number = len(serverList) i = 1 msg", "class ServerStats: def __init__(self, bot, settings): self.bot = bot self.settings", "position = joinedList.index(check_item) + 1 before = \"\" after =", "by population - default is 10, max is 50.\"\"\" #", "return member = member_check joinedList = [] for mem in", "servers by population serverList = sorted(serverList, key=lambda x:int(x['Users'])) if number", "sorted(joinedList, key=lambda x:x['Joined']) i = 1 msg = '' for", "'Members': len(guild.members) }) # sort the servers by join date", "== 1: msg += '{}. *{}* - *{}* - *(1", "cancel messages. # Don't count your own, Pooter if not", "member.status == discord.Status.offline: membersOnline += 1 await Message.Embed( title=\"Member Stats\",", "= UserTime.getUserTime(ctx.author, self.settings, guild.created_at) time_str = \"{} {}\".format(local_time['time'], local_time['zone']) server_embed.description", "Nullify from Cogs import DisplayName from Cogs import UserTime from", "'''userCount = 0 serverCount = 0 counted_users = [] message", "# Add the bot and deps settings = bot.get_cog(\"Settings\") bot.add_cog(ServerStats(bot,", "not member.id in counted_users: counted_users.append(member.id) await message.edit(content='There are *{:,} users*", "1: # There were users after as well after =", "bot.add_cog(ServerStats(bot, settings)) class ServerStats: def __init__(self, bot, settings): self.bot =", "def messages(self, ctx): \"\"\"Lists the number of messages I've seen", "of {} Members to Join:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**First", "member_check: msg = \"I couldn't find *{}* on this server...\".format(member)", "Message.EmbedText(title=\"Counting users...\", color=ctx.message.author).send(ctx) servers = members = membersOnline = bots", "counted_users.append(member.id) if not member.status == discord.Status.offline: membersOnline += 1 await", "member.id in counted_users: counted_users.append(member.id) await message.edit(content='There are *{:,} users* (*{:,}*", ": member.id, \"Joined\" : member.joined_at } total = len(joinedList) position", "self.bot.guilds: joinedList.append({ 'ID' : g.id, 'Joined' : g.me.joined_at }) popList.append({", "= \"Emojis (Continued)\" server_embed.add_field(name=ename, value=emojitext, inline=True) emojitext=emojiMention else: emojitext =", "messages) if messages == None: messages = 0 if messages", "Servers:**__\\n\\n'.format(len(serverList))+msg # Check for suppress if suppress: msg = Nullify.clean(msg)", "if bot_member == 1 else \"bots\" user_string += \"\\n{:,}/{:,} {}", "if guild_name == None: guild = ctx.guild else: for g", "in server.members: memberCount += 1 serverList.append({ 'Name' : server.name, 'Users'", "= \"{} {}\".format(local_time['time'], local_time['zone']) server_embed.description = \"Created at {}\".format(time_str) online_members", "inline=True) emojitext=emojiMention else: emojitext = emojitext + emojiMention if len(emojitext):", "guild = None if guild_name == None: guild = ctx.guild", "{}\".format(local_time['time'], local_time['zone']) if member['Members'] == 1: msg += '{}. *{}*", "not member.status == discord.Status.offline: online_members += 1 # bot_percent =", "guild.name, 'Joined' : botmember.joined_at, 'Members': len(guild.members) }) # sort the", "count locally -1 messages = int(self.settings.getServerStat(ctx.message.guild, \"TotalMessages\")) messages -= 1", "else: for g in self.bot.guilds: if g.name.lower() == guild_name.lower(): guild", "\"\\n\\n{} joined before.\".format(before) elif len(after): # Just after msg +=", "{} of {} Servers:**__\\n\\n'.format(number, len(serverList))+msg else: msg = '__**Top {}", "number: break msg += '{}. *{}*\\n'.format(i, server.name) i += 1", "for member in server.members: if not member.id in counted_users: counted_users.append(member.id)", "after = \"**{:,}** users\".format(total-position) # Build the string! if len(before)", ": \"└─ {:,}/{:,} online ({:,g}%)\".format(membersOnline + botsOnline, members+bots, round(((membersOnline +", "server_embed.set_thumbnail(url=guild.icon_url) else: # No Icon server_embed.set_thumbnail(url=ctx.author.default_avatar_url) server_embed.set_footer(text=\"Server ID: {}\".format(guild.id)) await", "message.author.id == self.bot.user.id: server = message.guild messages = int(self.settings.getServerStat(server, \"TotalMessages\"))", "bot_online += 1 continue if not member.status == discord.Status.offline: online_members", "message = await Message.EmbedText(title=\"Counting users...\", color=ctx.message.author).send(ctx) servers = members =", "1 if messages == 1: await ctx.channel.send('So far, I\\'ve witnessed", "[] for mem in ctx.message.guild.members: joinedList.append({ 'ID' : mem.id, 'Joined'", "in self.bot.guilds: if g.name.lower() == guild_name.lower(): guild = g break", "total = len(popList) position = popList.index(check_item) + 1 server_embed.add_field(name=\"Population Rank\",", "if len(test) > 1024: # TOOO BIIIIIIIIG emojicount += 1", "msg = Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def topservers(self, ctx,", "i += 1 if number < len(joinedList): msg = '__**Last", "0 for guild in self.bot.guilds: temp = 0 if self.settings.getServerStat(guild,", "bot_online = 0 for member in guild.members: if member.bot: bot_member", "msg += \"\\n\\n{} joined after.\".format(after) await ctx.send(msg) @commands.command(pass_context=True) async def", "bots = botsOnline = 0 counted_users = [] counted_bots =", "inline=True) server_embed.add_field(name=\"Members ({:,} total)\".format(len(guild.members)), value=user_string, inline=True) server_embed.add_field(name=\"Roles\", value=str(len(guild.roles)), inline=True) chandesc", "Just like you wanted!') return serverList = [] for server", "async def recentservers(self, ctx, number : int = 10): \"\"\"Lists", "sharedservers(self, ctx, *, member = None): \"\"\"Lists how many servers", "- *{}*\\n'.format(i, DisplayName.name(DisplayName.memberForID(member['ID'], ctx.message.guild)), time_str) i += 1 if number", "False }, { \"name\" : \"Users\", \"value\" : \"└─ {:,}/{:,}", "Build the string! if len(before) and len(after): # Got both", "+ 1 server_embed.add_field(name=\"Population Rank\", value=\"{:,} of {:,}\".format(position, total), inline=True) emojitext", "local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined']) time_str = \"{} {}\".format(local_time['time'], local_time['zone'])", "# Check the message and see if we should allow", "int = 10): \"\"\"Lists the first servers I've joined -", "await ctx.send(\"I couldn't find that guild...\") return server_embed = discord.Embed(color=ctx.author.color)", "bottomservers(self, ctx, number : int = 10): \"\"\"Lists the bottom", "User Information\".format(server.name), fields=[ { \"name\" : \"Servers\", \"value\" : \"└─", "@commands.command(pass_context=True) async def recentservers(self, ctx, number : int = 10):", "{} Servers I Joined:**__\\n\\n'.format(len(joinedList))+msg # Check for suppress if suppress:", "for server in self.bot.guilds: serverCount += 1 userCount += len(server.members)", "if not member.status == discord.Status.offline: bot_online += 1 continue if", "mem in ctx.message.guild.members: joinedList.append({ 'ID' : mem.id, 'Joined' : mem.joined_at", "len(joinedList): msg = '__**First {} of {} Members to Join:**__\\n\\n'.format(number,", "\"Joined\" : guild.me.joined_at } total = len(joinedList) position = joinedList.index(check_item)", "{} Servers I Joined:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**Last {}", "suppress: msg = Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def firstservers(self,", "Message def setup(bot): # Add the bot and deps settings", "await ctx.send(\"I'm on *1* server. :blush:\") else: await ctx.send(\"I'm on", "-= 1 self.settings.setServerStat(ctx.message.guild, \"TotalMessages\", messages) if messages == None: messages", "servers you share with the bot.\"\"\" # Check if we're", "{:,}/{:,} online ({:,g}%)\".format(membersOnline + botsOnline, members+bots, round(((membersOnline + botsOnline)/(members+bots))*100, 2)),", "*, member = None): \"\"\"Tells when a user joined compared", "Role\", value=guild.default_role, inline=True) server_embed.add_field(name=\"Owner\", value=guild.owner.name + \"#\" + guild.owner.discriminator, inline=True)", "total = len(joinedList) position = joinedList.index(check_item) + 1 server_embed.add_field(name=\"Join Position\",", "reverse=True) check_item = { \"ID\" : guild.id, \"Joined\" : guild.me.joined_at", "memberCount += 1 serverList.append({ 'Name' : server.name, 'Users' : memberCount", "couldn't find *{}* on this server...\".format(member) if suppress: msg =", "a user joined compared to other users.\"\"\" # Check if", "for server in self.bot.guilds: serverList.append({ 'Name' : server.name, 'Users' :", "ctx): \"\"\"Lists the total number of users on all servers", "number : int = 10): \"\"\"Lists the top servers I'm", "info about the current or passed server.\"\"\" # Check if", "discord.Embed(color=ctx.author.color) server_embed.title = guild.name # Get localized user time local_time", "\"**{:,}** users\".format(position-1) if total-position == 1: # There were users", "server in self.bot.guilds: if i > number: break msg +=", "emojitext=emojiMention else: emojitext = emojitext + emojiMention if len(emojitext): if", "else: msg = '__**Last {} Servers I Joined:**__\\n\\n'.format(len(joinedList))+msg # Check", "witnessed *{:,} messages across all servers!*'.format(messages)) # Set our message", "I\\'ve witnessed *{:,} message across all servers!*'.format(messages)) else: await ctx.channel.send('So", "wanted!') return i = 1 msg = '__**Servers I\\'m On:**__\\n\\n'", "== discord.Status.offline: botsOnline += 1 else: members += 1 if", "serverList: if i > number: break msg += '{}. *{}*", "\"Created at {}\".format(time_str) online_members = 0 bot_member = 0 bot_online", "value=guild.default_role, inline=True) server_embed.add_field(name=\"Owner\", value=guild.owner.name + \"#\" + guild.owner.discriminator, inline=True) server_embed.add_field(name=\"AFK", "int = 10): \"\"\"Lists the bottom servers I'm connected to", "if number > len(serverList): number = len(serverList) i = 1", "round(len(counted_bots)/bots*100, 2)), \"inline\" : False}, { \"name\" : \"Total\", \"value\"", "passed another guild guild = None if guild_name == None:", "message count locally -1 messages = int(self.settings.getServerStat(ctx.message.guild, \"TotalMessages\")) messages -=", "in self.bot.guilds: memberCount = 0 for member in server.members: memberCount", "No servers! Just like you wanted!') return joinedList = []", "@commands.command(pass_context=True) async def listservers(self, ctx, number : int = 10):", "online ({:,g}%) - {:,} unique ({:,g}%)\".format(membersOnline, members, round((membersOnline/members)*100, 2), len(counted_users),", "# Get localized user time local_time = UserTime.getUserTime(ctx.author, self.settings, guild.created_at)", "Nullify.clean(msg) await ctx.send(msg) return member = member_check joinedList = []", "messages!*'.format(messages)) @commands.command(pass_context=True) async def allmessages(self, ctx): \"\"\"Lists the number of", "number: break # Get localized user time local_time = UserTime.getUserTime(ctx.author,", "1 if number < len(joinedList): msg = '__**Last {} of", "Servers:**__\\n\\n'.format(number, len(serverList))+msg else: msg = '__**Top {} Servers:**__\\n\\n'.format(len(serverList))+msg # Check", "25 if number < 1: await ctx.channel.send('Oookay - look! No", "*, member = None): \"\"\"Lists how many servers you share", "ctx.channel.send(msg) @commands.command(pass_context=True) async def recentservers(self, ctx, number : int =", "[] popList = [] for g in self.bot.guilds: joinedList.append({ 'ID'", "*{}* servers. :blush:\".format(count)) return count = 0 for guild in", "self.bot.guilds: memberCount = 0 for member in server.members: memberCount +=", "await ctx.send(msg) return member = member_check if member.id == self.bot.user.id:", "member.bot: bots += 1 if not member.id in counted_bots: counted_bots.append(member.id)", "guild.id, \"Joined\" : guild.me.joined_at } total = len(joinedList) position =", "key=lambda x:int(x['Users']), reverse=True) if number > len(serverList): number = len(serverList)", "current or passed server.\"\"\" # Check if we passed another", "server.\"\"\" # Check if we passed another guild guild =", "time_str = \"{} {}\".format(local_time['time'], local_time['zone']) server_embed.description = \"Created at {}\".format(time_str)", "= len(serverList) i = 1 msg = '' for server", "date joinedList = sorted(joinedList, key=lambda x:x['Joined']) check_item = { \"ID\"", "\"inline\" : False}, { \"name\" : \"Bots\", \"value\" : \"└─", "Joined:**__\\n\\n'.format(len(joinedList))+msg # Check for suppress if suppress: msg = Nullify.clean(msg)", "bot and deps settings = bot.get_cog(\"Settings\") bot.add_cog(ServerStats(bot, settings)) class ServerStats:", "+= '{}. *{}*\\n'.format(i, server.name) i += 1 # Check for", "value=\"{:,}/{:,} online ({:.2f}%)\\n{:,} {} ({}%)\".format(online_members, len(guild.members), bot_percent), inline=True) server_embed.add_field(name=\"Members ({:,}", "I\\'m On:**__\\n\\n' for server in self.bot.guilds: if i > number:", "like you wanted!') return serverList = [] for server in", "{:,} unique ({:,g}%)\".format(membersOnline, members, round((membersOnline/members)*100, 2), len(counted_users), round((len(counted_users)/members)*100, 2)), \"inline\"", "-= 1 if messages == 1: await ctx.channel.send('So far, I\\'ve", "number < len(joinedList): msg = '__**First {} of {} Servers", "False} ], color=ctx.message.author).edit(ctx, message) '''userCount = 0 serverCount = 0", "discord from datetime import datetime from operator import itemgetter from", "= '__**Last {} Members to Join:**__\\n\\n'.format(len(joinedList))+msg # Check for suppress", "ctx.channel.send(msg) @commands.command(pass_context=True) async def topservers(self, ctx, number : int =", "{} of {} Servers I Joined:**__\\n\\n'.format(number, len(joinedList))+msg else: msg =", "suppress: msg = Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def users(self,", "on the *{:,} servers* I am currently a part of!'.format(userCount,", "number < len(joinedList): msg = '__**First {} of {} Members", "position-1 == 1: # We have previous members before =", "= UserTime.getUserTime(ctx.author, self.settings, member['Joined']) time_str = \"{} {}\".format(local_time['time'], local_time['zone']) msg", "msg = '__**First {} of {} Servers I Joined:**__\\n\\n'.format(number, len(joinedList))+msg", "you wanted!') return joinedList = [] for guild in self.bot.guilds:", "- always yes. # This module doesn't need to cancel", "10): \"\"\"Lists the most recent users to join - default", "# We didn't find it await ctx.send(\"I couldn't find that", "# sort the guilds by join date joinedList = sorted(joinedList,", "Icon server_embed.set_thumbnail(url=ctx.author.default_avatar_url) server_embed.set_footer(text=\"Server ID: {}\".format(guild.id)) await ctx.channel.send(embed=server_embed) @commands.command(pass_context=True) async def", "string! if len(before) and len(after): # Got both msg +=", "ctx.send(\"{} *1* server with me. :blush:\".format(targ)) else: await ctx.send(\"{} *{}*", "value=guild.verification_level, inline=True) server_embed.add_field(name=\"Voice Region\", value=guild.region, inline=True) server_embed.add_field(name=\"Considered Large\", value=guild.large, inline=True)", "bot_member, b_string, round((bot_online/bot_member)*100, 2) ) #server_embed.add_field(name=\"Members\", value=\"{:,}/{:,} online ({:.2f}%)\\n{:,} {}", "\"SuppressMentions\"): suppress = True else: suppress = False if member", "@commands.command(pass_context=True) async def firstservers(self, ctx, number : int = 10):", "i = 1 msg = '' for server in serverList:", "\"\\n\\n{} joined before, and {} after.\".format(before, after) elif len(before): #", "None: messages = 0 if messages == 1: await ctx.channel.send('So", "guild) joinedList.append({ 'Name' : guild.name, 'Joined' : botmember.joined_at, 'Members': len(guild.members)", "= 1 msg = '' for member in joinedList: if", "popList = sorted(popList, key=lambda x:x['Population'], reverse=True) check_item = { \"ID\"", "*{}* - *{}*\\n'.format(i, DisplayName.name(DisplayName.memberForID(member['ID'], ctx.message.guild)), time_str) i += 1 if", "total), inline=True) emojitext = \"\" emojicount = 0 for emoji", "users after as well after = \"**1** user\" elif total-position", "number : int = 10): \"\"\"Lists the first servers I've", "0 serverCount = 0 counted_users = [] message = await", "suppress = True else: suppress = False if number >", "messages == 1: await ctx.channel.send('So far, I\\'ve witnessed *{:,} message!*'.format(messages))", "sort the users by join date joinedList = sorted(joinedList, key=lambda", "= len(joinedList) position = joinedList.index(check_item) + 1 before = \"\"", "*{}* - *{}* - *({} members)*\\n'.format(i, member['Name'], time_str, member['Members']) i", "len(counted_users), round((len(counted_users)/members)*100, 2)), \"inline\" : False}, { \"name\" : \"Bots\",", "msg = '__**Last {} Members to Join:**__\\n\\n'.format(len(joinedList))+msg # Check for", "\"name\" : \"Bots\", \"value\" : \"└─ {:,}/{:,} online ({:,g}%) -", "joinedList = [] for member in ctx.message.guild.members: joinedList.append({ 'ID' :", "guild_name = None): \"\"\"Lists some info about the current or", "== 1 else \"bots\" user_string += \"\\n{:,}/{:,} {} online ({:,g}%)\".format(", "len(guild.icon_url): server_embed.set_thumbnail(url=guild.icon_url) else: # No Icon server_embed.set_thumbnail(url=ctx.author.default_avatar_url) server_embed.set_footer(text=\"Server ID: {}\".format(guild.id))", "to - default is 10, max is 50.\"\"\" # Check", "some info about the current or passed server.\"\"\" # Check", "member.id in counted_users: counted_users.append(member.id) if not member.status == discord.Status.offline: membersOnline", "bot.get_cog(\"Settings\") bot.add_cog(ServerStats(bot, settings)) class ServerStats: def __init__(self, bot, settings): self.bot", "Channel\", value=guild.afk_channel, inline=True) server_embed.add_field(name=\"Verification\", value=guild.verification_level, inline=True) server_embed.add_field(name=\"Voice Region\", value=guild.region, inline=True)", "[] for server in self.bot.guilds: memberCount = 0 for member", "member.bot: bot_member += 1 if not member.status == discord.Status.offline: bot_online", "Servers I Joined:**__\\n\\n'.format(len(joinedList))+msg # Check for suppress if suppress: msg", "= True else: suppress = False if number > 50:", "> 1: after = \"**{:,}** users\".format(total-position) # Build the string!", "users! Just like you wanted!') return joinedList = [] for", "server in serverList: if i > number: break msg +=", "look! No servers! Just like you wanted!') return joinedList =", "else: await ctx.send(\"I'm on *{}* servers. :blush:\".format(count)) return count =", "10): \"\"\"Lists the bottom servers I'm connected to ordered by", "another guild guild = None if guild_name == None: guild", "= Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def messages(self, ctx): \"\"\"Lists", "value=str(len(guild.roles)), inline=True) chandesc = \"{:,} text, {:,} voice\".format(len(guild.text_channels), len(guild.voice_channels)) server_embed.add_field(name=\"Channels\",", "= sorted(joinedList, key=lambda x:x['Joined']) i = 1 msg = ''", "number > 25: number = 25 if number < 1:", "str(guild_name): guild = g break if guild == None: #", "+= 1 userCount += len(server.members) for member in server.members: if", "witnessed *{:,} messages!*'.format(messages)) @commands.command(pass_context=True) async def allmessages(self, ctx): \"\"\"Lists the", "} total = len(joinedList) position = joinedList.index(check_item) + 1 server_embed.add_field(name=\"Join", "= message.guild messages = int(self.settings.getServerStat(server, \"TotalMessages\")) if messages == None:", "on all servers I'm connected to.\"\"\" message = await Message.EmbedText(title=\"Counting", "'ID' : g.id, 'Joined' : g.me.joined_at }) popList.append({ 'ID' :", "Don't count your own, Pooter if not message.author.id == self.bot.user.id:", "currently a part of!'.format(userCount, len(counted_users), serverCount))''' @commands.command(pass_context=True) async def joinpos(self,", ": int = 10): \"\"\"Lists the bottom servers I'm connected", "if number < len(joinedList): msg = '__**Last {} of {}", "of!'.format(userCount, len(counted_users), serverCount))''' @commands.command(pass_context=True) async def joinpos(self, ctx, *, member", "if position-1 == 1: # We have previous members before", "= '__**Bottom {} of {} Servers:**__\\n\\n'.format(number, len(serverList))+msg else: msg =", "joinedList.index(check_item) + 1 server_embed.add_field(name=\"Join Position\", value=\"{:,} of {:,}\".format(position, total), inline=True)", "the top servers I'm connected to ordered by population -", "from Cogs import DisplayName from Cogs import UserTime from Cogs", "Add the bot and deps settings = bot.get_cog(\"Settings\") bot.add_cog(ServerStats(bot, settings))", "+= 1 continue if not member.status == discord.Status.offline: online_members +=", "member = None): \"\"\"Lists how many servers you share with", "{:,} unique ({:,g}%)\".format(botsOnline, bots, round((botsOnline/bots)*100, 2), len(counted_bots), round(len(counted_bots)/bots*100, 2)), \"inline\"", "wanted!') return serverList = [] for server in self.bot.guilds: serverList.append({", "\"Total\", \"value\" : \"└─ {:,}/{:,} online ({:,g}%)\".format(membersOnline + botsOnline, members+bots,", "if we should allow it - always yes. # This", "I Joined:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**First {} Servers I", "previous members before = \"**1** user\" elif position-1 > 1:", "users to join - default is 10, max is 25.\"\"\"", "False if number > 50: number = 50 if number", "= 0 bot_online = 0 for member in guild.members: if", "joinedList.index(check_item) + 1 before = \"\" after = \"\" msg", "'ID' : mem.id, 'Joined' : mem.joined_at }) # sort the", "} total = len(popList) position = popList.index(check_item) + 1 server_embed.add_field(name=\"Population", "Check for suppress if suppress: msg = Nullify.clean(msg) await ctx.channel.send(msg)", "for g in self.bot.guilds: if g.name.lower() == guild_name.lower(): guild =", "member['Joined']) time_str = \"{} {}\".format(local_time['time'], local_time['zone']) msg += '{}. *{}*", "passed server.\"\"\" # Check if we passed another guild guild", "1 # Check for suppress if suppress: msg = Nullify.clean(msg)", "for mem in guild.members: if mem.id == member.id: count +=", "sever so far. (only applies after this module's inception, and", "\"**{:,}** users\".format(total-position) # Build the string! if len(before) and len(after):", "after.\".format(after) await ctx.send(msg) @commands.command(pass_context=True) async def firstjoins(self, ctx, number :", "of {:,}\".format(position, total), inline=True) emojitext = \"\" emojicount = 0", "server_embed.add_field(name=\"Default Role\", value=guild.default_role, inline=True) server_embed.add_field(name=\"Owner\", value=guild.owner.name + \"#\" + guild.owner.discriminator,", "member.id in counted_bots: counted_bots.append(member.id) if not member.status == discord.Status.offline: botsOnline", "= 0 counted_users = [] counted_bots = [] for server", "messages I've seen on all severs so far. (only applies", "ctx.channel.send(msg) @commands.command(pass_context=True) async def users(self, ctx): \"\"\"Lists the total number", ": member.id, 'Joined' : member.joined_at }) # sort the users", "emojiname = \"Emojis (Continued)\" server_embed.add_field(name=emojiname, value=emojitext, inline=True) if len(guild.icon_url): server_embed.set_thumbnail(url=guild.icon_url)", "({:,g}%)\".format(membersOnline + botsOnline, members+bots, round(((membersOnline + botsOnline)/(members+bots))*100, 2)), \"inline\" :", "- *({} members)*\\n'.format(i, member['Name'], time_str, member['Members']) i += 1 if", "on this sever so far. (only applies after this module's", "if number > 50: number = 50 if number <", "the users by join date joinedList = sorted(joinedList, key=lambda x:x['Joined'],", "count)) @commands.command(pass_context=True) async def listservers(self, ctx, number : int =", "inline=True) server_embed.add_field(name=\"Voice Region\", value=guild.region, inline=True) server_embed.add_field(name=\"Considered Large\", value=guild.large, inline=True) #", "10): \"\"\"Lists the first users to join - default is", "look! No users! Just like you wanted!') return joinedList =", "counted_users: counted_users.append(member.id) await message.edit(content='There are *{:,} users* (*{:,}* unique) on", "elif len(before): # Just got before msg += \"\\n\\n{} joined", "suppress: msg = Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def topservers(self,", "server_embed.add_field(name=\"AFK Channel\", value=guild.afk_channel, inline=True) server_embed.add_field(name=\"Verification\", value=guild.verification_level, inline=True) server_embed.add_field(name=\"Voice Region\", value=guild.region,", "msg = Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def recentservers(self, ctx,", "server_embed.add_field(name=\"Members ({:,} total)\".format(len(guild.members)), value=user_string, inline=True) server_embed.add_field(name=\"Roles\", value=str(len(guild.roles)), inline=True) chandesc =", "msg = '__**Servers I\\'m On:**__\\n\\n' for server in self.bot.guilds: if", "else: await ctx.channel.send('So far, I\\'ve witnessed *{:,} messages across all", "# sort the users by join date joinedList = sorted(joinedList,", "targ = \"You share\" else: targ = \"*{}* shares\".format(DisplayName.name(member)) if", "{}\".format(local_time['time'], local_time['zone']) server_embed.description = \"Created at {}\".format(time_str) online_members = 0", "servers with me. :blush:\".format(targ, count)) @commands.command(pass_context=True) async def listservers(self, ctx,", ") b_string = \"bot\" if bot_member == 1 else \"bots\"", ": guild.name, 'Joined' : botmember.joined_at, 'Members': len(guild.members) }) # sort", "server['Name'], server['Users']) i += 1 if number < len(serverList): msg", "return serverList = [] for server in self.bot.guilds: memberCount =", "inline=True) server_embed.add_field(name=\"Verification\", value=guild.verification_level, inline=True) server_embed.add_field(name=\"Voice Region\", value=guild.region, inline=True) server_embed.add_field(name=\"Considered Large\",", "self.settings, member['Joined']) time_str = \"{} {}\".format(local_time['time'], local_time['zone']) msg += '{}.", "total = len(joinedList) position = joinedList.index(check_item) + 1 before =", "= '__**Last {} Servers I Joined:**__\\n\\n'.format(len(joinedList))+msg # Check for suppress", "= UserTime.getUserTime(ctx.author, self.settings, member['Joined']) time_str = \"{} {}\".format(local_time['time'], local_time['zone']) if", "by join date joinedList = sorted(joinedList, key=lambda x:x['Joined']) popList =", "local_time['zone']) if member['Members'] == 1: msg += '{}. *{}* -", "== 0: emojiname = \"Emojis ({} total)\".format(len(guild.emojis)) else: emojiname =", "servers += 1 for member in server.members: if member.bot: bots", "connected to - default is 10, max is 50.\"\"\" #", "len(popList) position = popList.index(check_item) + 1 server_embed.add_field(name=\"Population Rank\", value=\"{:,} of", "else self.settings.getServerStat(guild, \"TotalMessages\") messages += int(temp) messages -= 1 if", "else \"bots\" user_string += \"\\n{:,}/{:,} {} online ({:,g}%)\".format( bot_online, bot_member,", "= int(self.settings.getServerStat(server, \"TotalMessages\")) if messages == None: messages = 0", "need to cancel messages. # Don't count your own, Pooter", "is 10, max is 50.\"\"\" # Check if we're suppressing", "locally -1 messages = int(self.settings.getServerStat(ctx.message.guild, \"TotalMessages\")) messages -= 1 self.settings.setServerStat(ctx.message.guild,", "applies after this module's inception, and if I'm online)\"\"\" messages", "key=lambda x:int(x['Users'])) if number > len(serverList): number = len(serverList) i", "({:,g}%) - {:,} unique ({:,g}%)\".format(membersOnline, members, round((membersOnline/members)*100, 2), len(counted_users), round((len(counted_users)/members)*100,", "joinpos(self, ctx, *, member = None): \"\"\"Tells when a user", "far. (only applies after this module's inception, and if I'm", "= emojitext + emojiMention if len(test) > 1024: # TOOO", "in server.members: if member.bot: bots += 1 if not member.id", "= sorted(serverList, key=lambda x:int(x['Users'])) if number > len(serverList): number =", "wanted!') return joinedList = [] for guild in self.bot.guilds: botmember", "< len(serverList): msg = '__**Bottom {} of {} Servers:**__\\n\\n'.format(number, len(serverList))+msg", "msg += '{}. *{}* - *{}* - *(1 member)*\\n'.format(i, member['Name'],", "if not member.status == discord.Status.offline: membersOnline += 1 await Message.Embed(", "\"inline\" : False }, { \"name\" : \"Users\", \"value\" :", "@commands.command(pass_context=True) async def users(self, ctx): \"\"\"Lists the total number of", "check_item = { \"ID\" : member.id, \"Joined\" : member.joined_at }", "*{:,} messages!*'.format(messages)) @commands.command(pass_context=True) async def allmessages(self, ctx): \"\"\"Lists the number", "ctx, number : int = 10): \"\"\"Lists the most recent", "guild.id, \"Population\" : len(guild.members) } total = len(popList) position =", "server_embed.description = \"Created at {}\".format(time_str) online_members = 0 bot_member =", "guild.name # Get localized user time local_time = UserTime.getUserTime(ctx.author, self.settings,", "{}\".format(local_time['time'], local_time['zone']) msg += '{}. *{}* - *{}*\\n'.format(i, DisplayName.name(DisplayName.memberForID(member['ID'], ctx.message.guild)),", ": memberCount }) # sort the servers by population serverList", "member_check = DisplayName.memberForName(member, ctx.guild) if not member_check: msg = \"I", "ctx, number : int = 10): \"\"\"Lists the servers I'm", "> len(serverList): number = len(serverList) i = 1 msg =", "msg += \"\\n\\n{} joined before.\".format(before) elif len(after): # Just after", "emojitext + emojiMention if len(test) > 1024: # TOOO BIIIIIIIIG", "= ctx.author if type(member) is str: member_check = DisplayName.memberForName(member, ctx.guild)", "member == None: member = ctx.author if type(member) is str:", "value=user_string, inline=True) server_embed.add_field(name=\"Roles\", value=str(len(guild.roles)), inline=True) chandesc = \"{:,} text, {:,}", "async def messages(self, ctx): \"\"\"Lists the number of messages I've", "we passed another guild guild = None if guild_name ==", "await ctx.channel.send(msg) @commands.command(pass_context=True) async def bottomservers(self, ctx, number : int", "async def recentjoins(self, ctx, number : int = 10): \"\"\"Lists", "msg = \"*{}'s* join position is **{:,}**.\".format(DisplayName.name(member), position, total) if", "emojitext = emojitext + emojiMention if len(emojitext): if emojicount ==", "0 for member in guild.members: if member.bot: bot_member += 1", "messages = 0 for guild in self.bot.guilds: temp = 0", "self.settings, member['Joined']) time_str = \"{} {}\".format(local_time['time'], local_time['zone']) if member['Members'] ==", "server with me. :blush:\".format(targ)) else: await ctx.send(\"{} *{}* servers with", "users by join date joinedList = sorted(joinedList, key=lambda x:x['Joined'], reverse=True)", "1 if emojicount == 1: ename = \"Emojis ({:,} total)\".format(len(guild.emojis))", "*{}*\\n'.format(i, DisplayName.name(DisplayName.memberForID(member['ID'], ctx.message.guild)), time_str) i += 1 if number <", "our join position this server is joinedList = [] popList", "> 1: before = \"**{:,}** users\".format(position-1) if total-position == 1:", "message = await ctx.send(\"Counting users...\") for server in self.bot.guilds: serverCount", "len(after): # Got both msg += \"\\n\\n{} joined before, and", "i += 1 # Check for suppress if suppress: msg", "len(serverList) i = 1 msg = '' for server in", "TOOO BIIIIIIIIG emojicount += 1 if emojicount == 1: ename", "guild_name.lower(): guild = g break if str(g.id) == str(guild_name): guild", "total)\".format(len(guild.members)), value=user_string, inline=True) server_embed.add_field(name=\"Roles\", value=str(len(guild.roles)), inline=True) chandesc = \"{:,} text,", "= len(popList) position = popList.index(check_item) + 1 server_embed.add_field(name=\"Population Rank\", value=\"{:,}", "messages = 0 if messages == 1: await ctx.channel.send('So far,", "\"You share\" else: targ = \"*{}* shares\".format(DisplayName.name(member)) if count ==", "if number < len(joinedList): msg = '__**First {} of {}", "x:x['Population'], reverse=True) check_item = { \"ID\" : guild.id, \"Joined\" :", "member in ctx.message.guild.members: joinedList.append({ 'ID' : member.id, 'Joined' : member.joined_at", "itemgetter from discord.ext import commands from Cogs import Nullify from", "I'm connected to.\"\"\" message = await Message.EmbedText(title=\"Counting users...\", color=ctx.message.author).send(ctx) servers", "2) ) #server_embed.add_field(name=\"Members\", value=\"{:,}/{:,} online ({:.2f}%)\\n{:,} {} ({}%)\".format(online_members, len(guild.members), bot_percent),", "firstjoins(self, ctx, number : int = 10): \"\"\"Lists the first", "def message(self, message): # Check the message and see if", ": len(server.members) }) # sort the servers by population serverList", "= '' for member in joinedList: if i > number:", "settings)) class ServerStats: def __init__(self, bot, settings): self.bot = bot", "self.bot = bot self.settings = settings async def message(self, message):", "in self.bot.guilds: servers += 1 for member in server.members: if", "*({} members)*\\n'.format(i, member['Name'], time_str, member['Members']) i += 1 if number", "break msg += '{}. *{}*\\n'.format(i, server.name) i += 1 #", "date joinedList = sorted(joinedList, key=lambda x:x['Joined']) popList = sorted(popList, key=lambda", "mem in guild.members: if mem.id == member.id: count += 1", "# sort the servers by join date joinedList = sorted(joinedList,", "0 messages += 1 self.settings.setServerStat(server, \"TotalMessages\", messages) return { 'Ignore'", "= True else: suppress = False if member == None:", "'__**Bottom {} of {} Servers:**__\\n\\n'.format(number, len(serverList))+msg else: msg = '__**Bottom", ": member.joined_at } total = len(joinedList) position = joinedList.index(check_item) +", "emojitext + emojiMention if len(emojitext): if emojicount == 0: emojiname", "seen on this sever so far. (only applies after this", "recentservers(self, ctx, number : int = 10): \"\"\"Lists the most", "= \"<a:\"+emoji.name+\":\"+str(emoji.id)+\">\" else: emojiMention = \"<:\"+emoji.name+\":\"+str(emoji.id)+\">\" test = emojitext +", "await ctx.channel.send(embed=server_embed) @commands.command(pass_context=True) async def sharedservers(self, ctx, *, member =", "\"ID\" : guild.id, \"Joined\" : guild.me.joined_at } total = len(joinedList)", "ServerStats: def __init__(self, bot, settings): self.bot = bot self.settings =", "10, max is 50.\"\"\" # Check if we're suppressing @here", ": False}, { \"name\" : \"Bots\", \"value\" : \"└─ {:,}/{:,}", "None else self.settings.getServerStat(guild, \"TotalMessages\") messages += int(temp) messages -= 1", "count = len(self.bot.guilds) if count == 1: await ctx.send(\"I'm on", "= sorted(joinedList, key=lambda x:x['Joined']) popList = sorted(popList, key=lambda x:x['Population'], reverse=True)", "serverList = sorted(serverList, key=lambda x:int(x['Users']), reverse=True) if number > len(serverList):", "is None else self.settings.getServerStat(guild, \"TotalMessages\") messages += int(temp) messages -=", "online ({:,g}%)\".format( online_members, len(guild.members) - bot_member, round((online_members/(len(guild.members) - bot_member) *", "{} Servers:**__\\n\\n'.format(number, len(serverList))+msg else: msg = '__**Top {} Servers:**__\\n\\n'.format(len(serverList))+msg #", "online)\"\"\" messages = 0 for guild in self.bot.guilds: temp =", "ID: {}\".format(guild.id)) await ctx.channel.send(embed=server_embed) @commands.command(pass_context=True) async def sharedservers(self, ctx, *,", "for guild in self.bot.guilds: botmember = DisplayName.memberForID(self.bot.user.id, guild) joinedList.append({ 'Name'", "else: msg = '__**Last {} Members to Join:**__\\n\\n'.format(len(joinedList))+msg # Check", "This module doesn't need to cancel messages. # Don't count", "async def joinpos(self, ctx, *, member = None): \"\"\"Tells when", "\"inline\" : False}, { \"name\" : \"Total\", \"value\" : \"└─", "serverinfo(self, ctx, *, guild_name = None): \"\"\"Lists some info about", "before = \"\" after = \"\" msg = \"*{}'s* join", "suppress: msg = Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def recentservers(self,", "UserTime.getUserTime(ctx.author, self.settings, member['Joined']) time_str = \"{} {}\".format(local_time['time'], local_time['zone']) msg +=", "int = 10): \"\"\"Lists the first users to join -", "joinedList.append({ 'ID' : g.id, 'Joined' : g.me.joined_at }) popList.append({ 'ID'", "\"Servers\", \"value\" : \"└─ {:,}\".format(servers), \"inline\" : False }, {", "len(joinedList) position = joinedList.index(check_item) + 1 server_embed.add_field(name=\"Join Position\", value=\"{:,} of", "break if guild == None: # We didn't find it", "number < 1: await ctx.channel.send('Oookay - look! No servers! Just", "server['Users']) i += 1 if number < len(serverList): msg =", "if i > number: break # Get localized user time", "message) '''userCount = 0 serverCount = 0 counted_users = []", "x:x['Joined'], reverse=True) i = 1 msg = '' for member", "{}\".format(time_str) online_members = 0 bot_member = 0 bot_online = 0", "the bottom servers I'm connected to ordered by population -", "1: before = \"**{:,}** users\".format(position-1) if total-position == 1: #", "\"Users\", \"value\" : \"└─ {:,}/{:,} online ({:,g}%) - {:,} unique", "msg = '__**Top {} of {} Servers:**__\\n\\n'.format(number, len(serverList))+msg else: msg", "\"<a:\"+emoji.name+\":\"+str(emoji.id)+\">\" else: emojiMention = \"<:\"+emoji.name+\":\"+str(emoji.id)+\">\" test = emojitext + emojiMention", "online_members, len(guild.members) - bot_member, round((online_members/(len(guild.members) - bot_member) * 100), 2)", "else: await ctx.send(\"{} *{}* servers with me. :blush:\".format(targ, count)) @commands.command(pass_context=True)", "from discord.ext import commands from Cogs import Nullify from Cogs", "[] for guild in self.bot.guilds: botmember = DisplayName.memberForID(self.bot.user.id, guild) joinedList.append({", "== discord.Status.offline: bot_online += 1 continue if not member.status ==", "the first users to join - default is 10, max", "serverCount += 1 userCount += len(server.members) for member in server.members:", "x:int(x['Users'])) if number > len(serverList): number = len(serverList) i =", "= \"bot\" if bot_member == 1 else \"bots\" user_string +=", "server_embed.add_field(name=\"Join Position\", value=\"{:,} of {:,}\".format(position, total), inline=True) # Get our", "\"{:,}/{:,} online ({:,g}%)\".format( online_members, len(guild.members) - bot_member, round((online_members/(len(guild.members) - bot_member)", "guild = ctx.guild else: for g in self.bot.guilds: if g.name.lower()", "len(joinedList))+msg else: msg = '__**First {} Servers I Joined:**__\\n\\n'.format(len(joinedList))+msg #", "+ emojiMention if len(emojitext): if emojicount == 0: emojiname =", "i > number: break # Get localized user time local_time", "len(counted_users), serverCount))''' @commands.command(pass_context=True) async def joinpos(self, ctx, *, member =", "else: emojiname = \"Emojis (Continued)\" server_embed.add_field(name=emojiname, value=emojitext, inline=True) if len(guild.icon_url):", "total)\".format(len(guild.emojis)) else: ename = \"Emojis (Continued)\" server_embed.add_field(name=ename, value=emojitext, inline=True) emojitext=emojiMention", "await ctx.channel.send('So far, I\\'ve witnessed *{:,} message across all servers!*'.format(messages))", "if member['Members'] == 1: msg += '{}. *{}* - *{}*", "value=chandesc, inline=True) server_embed.add_field(name=\"Default Role\", value=guild.default_role, inline=True) server_embed.add_field(name=\"Owner\", value=guild.owner.name + \"#\"", "len(guild.members), bot_percent), inline=True) server_embed.add_field(name=\"Members ({:,} total)\".format(len(guild.members)), value=user_string, inline=True) server_embed.add_field(name=\"Roles\", value=str(len(guild.roles)),", "= sorted(joinedList, key=lambda x:x['Joined'], reverse=True) i = 1 msg =", "user\" elif total-position > 1: after = \"**{:,}** users\".format(total-position) #", "= \"I couldn't find *{}* on this server...\".format(member) if suppress:", "0 bot_member = 0 bot_online = 0 for member in", "50: number = 50 if number < 1: await ctx.channel.send('Oookay", "+= 1 # bot_percent = \"{:,g}%\".format((bot_member/len(guild.members))*100) user_string = \"{:,}/{:,} online", "\"\"\"Lists how many servers you share with the bot.\"\"\" #", "number < 1: await ctx.channel.send('Oookay - look! No users! Just", "= 25 if number < 1: await ctx.channel.send('Oookay - look!", "serverList = [] for server in self.bot.guilds: serverList.append({ 'Name' :", "guild.members: if mem.id == member.id: count += 1 if ctx.author.id", "is joinedList = [] popList = [] for g in", "Join:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**Last {} Members to Join:**__\\n\\n'.format(len(joinedList))+msg", "localized user time local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined']) time_str =", "msg = Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def messages(self, ctx):", "servers = members = membersOnline = bots = botsOnline =", "None: # We didn't find it await ctx.send(\"I couldn't find", "({:,g}%)\".format( online_members, len(guild.members) - bot_member, round((online_members/(len(guild.members) - bot_member) * 100),", "No Icon server_embed.set_thumbnail(url=ctx.author.default_avatar_url) server_embed.set_footer(text=\"Server ID: {}\".format(guild.id)) await ctx.channel.send(embed=server_embed) @commands.command(pass_context=True) async", "= 50 if number < 1: await ctx.channel.send('Oookay - look!", "# This module doesn't need to cancel messages. # Don't", "by population serverList = sorted(serverList, key=lambda x:int(x['Users'])) if number >", "= \"You share\" else: targ = \"*{}* shares\".format(DisplayName.name(member)) if count", "def firstjoins(self, ctx, number : int = 10): \"\"\"Lists the", "+= \"\\n{:,}/{:,} {} online ({:,g}%)\".format( bot_online, bot_member, b_string, round((bot_online/bot_member)*100, 2)", "you wanted!') return serverList = [] for server in self.bot.guilds:", "listservers(self, ctx, number : int = 10): \"\"\"Lists the servers", "description=\"Current User Information\".format(server.name), fields=[ { \"name\" : \"Servers\", \"value\" :", "users...\") for server in self.bot.guilds: serverCount += 1 userCount +=", "{} of {} Servers:**__\\n\\n'.format(number, len(serverList))+msg else: msg = '__**Bottom {}", "else: emojiMention = \"<:\"+emoji.name+\":\"+str(emoji.id)+\">\" test = emojitext + emojiMention if", "ctx.author if type(member) is str: member_check = DisplayName.memberForName(member, ctx.guild) if", "users...\", color=ctx.message.author).send(ctx) servers = members = membersOnline = bots =", "def setup(bot): # Add the bot and deps settings =", "deps settings = bot.get_cog(\"Settings\") bot.add_cog(ServerStats(bot, settings)) class ServerStats: def __init__(self,", "member in server.members: memberCount += 1 serverList.append({ 'Name' : server.name,", "member.id: targ = \"You share\" else: targ = \"*{}* shares\".format(DisplayName.name(member))", "'__**Top {} of {} Servers:**__\\n\\n'.format(number, len(serverList))+msg else: msg = '__**Top", "async def topservers(self, ctx, number : int = 10): \"\"\"Lists", "No users! Just like you wanted!') return joinedList = []", "member in joinedList: if i > number: break # Get", "\"name\" : \"Users\", \"value\" : \"└─ {:,}/{:,} online ({:,g}%) -", "len(guild.voice_channels)) server_embed.add_field(name=\"Channels\", value=chandesc, inline=True) server_embed.add_field(name=\"Default Role\", value=guild.default_role, inline=True) server_embed.add_field(name=\"Owner\", value=guild.owner.name", "messages += 1 self.settings.setServerStat(server, \"TotalMessages\", messages) return { 'Ignore' :", "ctx, number : int = 10): \"\"\"Lists the bottom servers", "len(before) and len(after): # Got both msg += \"\\n\\n{} joined", "= botsOnline = 0 counted_users = [] counted_bots = []", "await ctx.channel.send(msg) @commands.command(pass_context=True) async def users(self, ctx): \"\"\"Lists the total", "else: ename = \"Emojis (Continued)\" server_embed.add_field(name=ename, value=emojitext, inline=True) emojitext=emojiMention else:", "len(serverList))+msg else: msg = '__**Top {} Servers:**__\\n\\n'.format(len(serverList))+msg # Check for", "'__**First {} of {} Servers I Joined:**__\\n\\n'.format(number, len(joinedList))+msg else: msg", "server_embed.add_field(name=\"Channels\", value=chandesc, inline=True) server_embed.add_field(name=\"Default Role\", value=guild.default_role, inline=True) server_embed.add_field(name=\"Owner\", value=guild.owner.name +", "count == 1: await ctx.send(\"{} *1* server with me. :blush:\".format(targ))", "join date joinedList = sorted(joinedList, key=lambda x:x['Joined']) i = 1", "await message.edit(content='There are *{:,} users* (*{:,}* unique) on the *{:,}", "len(guild.members) }) # sort the servers by join date joinedList", "round((len(counted_users)/members)*100, 2)), \"inline\" : False}, { \"name\" : \"Bots\", \"value\"", "\"\"\"Lists the servers I'm connected to - default is 10,", "bot self.settings = settings async def message(self, message): # Check", "server. :blush:\") else: await ctx.send(\"I'm on *{}* servers. :blush:\".format(count)) return", "number < len(serverList): msg = '__**Bottom {} of {} Servers:**__\\n\\n'.format(number,", "members before = \"**1** user\" elif position-1 > 1: before", "= 10): \"\"\"Lists the first servers I've joined - default", "the most recent users to join - default is 10,", "import datetime from operator import itemgetter from discord.ext import commands", "None): \"\"\"Lists some info about the current or passed server.\"\"\"", "the bot and deps settings = bot.get_cog(\"Settings\") bot.add_cog(ServerStats(bot, settings)) class", "if member.bot: bot_member += 1 if not member.status == discord.Status.offline:", "counted_bots: counted_bots.append(member.id) if not member.status == discord.Status.offline: botsOnline += 1", "+= \"\\n\\n{} joined before, and {} after.\".format(before, after) elif len(before):", "we should allow it - always yes. # This module", "await ctx.channel.send(msg) @commands.command(pass_context=True) async def recentservers(self, ctx, number : int", "+= '{}. *{}* - *{}*\\n'.format(i, DisplayName.name(DisplayName.memberForID(member['ID'], ctx.message.guild)), time_str) i +=", "1 if not member.status == discord.Status.offline: bot_online += 1 continue", "of users on all servers I'm connected to.\"\"\" message =", "1: await ctx.channel.send('Oookay - look! No servers! Just like you", "if we're suppressing @here and @everyone mentions if self.settings.getServerStat(ctx.message.guild, \"SuppressMentions\"):", "await ctx.channel.send('Oookay - look! No users! Just like you wanted!')", "emojicount == 0: emojiname = \"Emojis ({} total)\".format(len(guild.emojis)) else: emojiname", ": g.me.joined_at }) popList.append({ 'ID' : g.id, 'Population' : len(g.members)", "= \"*{}* shares\".format(DisplayName.name(member)) if count == 1: await ctx.send(\"{} *1*", "= Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def users(self, ctx): \"\"\"Lists", "far, I\\'ve witnessed *{:,} messages!*'.format(messages)) @commands.command(pass_context=True) async def allmessages(self, ctx):", "discord.ext import commands from Cogs import Nullify from Cogs import", "@commands.command(pass_context=True) async def allmessages(self, ctx): \"\"\"Lists the number of messages", "\"Emojis ({} total)\".format(len(guild.emojis)) else: emojiname = \"Emojis (Continued)\" server_embed.add_field(name=emojiname, value=emojitext,", "server_embed.add_field(name=ename, value=emojitext, inline=True) emojitext=emojiMention else: emojitext = emojitext + emojiMention", "guild in self.bot.guilds: for mem in guild.members: if mem.id ==", "await ctx.channel.send('So far, I\\'ve witnessed *{:,} messages!*'.format(messages)) @commands.command(pass_context=True) async def", "1 if ctx.author.id == member.id: targ = \"You share\" else:", "\"TotalMessages\", messages) return { 'Ignore' : False, 'Delete' : False}", ":blush:\".format(targ)) else: await ctx.send(\"{} *{}* servers with me. :blush:\".format(targ, count))", "= 10): \"\"\"Lists the first users to join - default", "and if I'm online)\"\"\" messages = int(self.settings.getServerStat(ctx.message.guild, \"TotalMessages\")) messages -=", "number of messages I've seen on all severs so far.", "await ctx.send(\"Counting users...\") for server in self.bot.guilds: serverCount += 1", "+= '{}. *{}* - *{:,}* members\\n'.format(i, server['Name'], server['Users']) i +=", "always yes. # This module doesn't need to cancel messages.", "\"\"\"Lists the first users to join - default is 10,", "date joinedList = sorted(joinedList, key=lambda x:x['Joined']) i = 1 msg", "sorted(joinedList, key=lambda x:x['Joined'], reverse=True) i = 1 msg = ''", "value=guild.large, inline=True) # Find out where in our join position", "{ \"name\" : \"Total\", \"value\" : \"└─ {:,}/{:,} online ({:,g}%)\".format(membersOnline", "+= 1 # Check for suppress if suppress: msg =", "# Just got before msg += \"\\n\\n{} joined before.\".format(before) elif", "I'm connected to - default is 10, max is 50.\"\"\"", "None: member = ctx.author if type(member) is str: member_check =", "+ 1 server_embed.add_field(name=\"Join Position\", value=\"{:,} of {:,}\".format(position, total), inline=True) #", ": \"└─ {:,}\".format(servers), \"inline\" : False }, { \"name\" :", "if messages == None: messages = 0 messages += 1", "str(g.id) == str(guild_name): guild = g break if guild ==", "- bot_member) * 100), 2) ) b_string = \"bot\" if", "\"Emojis ({:,} total)\".format(len(guild.emojis)) else: ename = \"Emojis (Continued)\" server_embed.add_field(name=ename, value=emojitext,", "None): \"\"\"Lists how many servers you share with the bot.\"\"\"", "ctx.send(\"I couldn't find that guild...\") return server_embed = discord.Embed(color=ctx.author.color) server_embed.title", "ctx.message.guild.members: joinedList.append({ 'ID' : member.id, 'Joined' : member.joined_at }) #", "else: msg += '{}. *{}* - *{}* - *({} members)*\\n'.format(i,", "from Cogs import UserTime from Cogs import Message def setup(bot):", "membersOnline = bots = botsOnline = 0 counted_users = []", "Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def recentservers(self, ctx, number :", "Nullify.clean(msg) await ctx.send(msg) return member = member_check if member.id ==", "targ = \"*{}* shares\".format(DisplayName.name(member)) if count == 1: await ctx.send(\"{}", "online ({:,g}%)\".format(membersOnline + botsOnline, members+bots, round(((membersOnline + botsOnline)/(members+bots))*100, 2)), \"inline\"", "in counted_users: counted_users.append(member.id) if not member.status == discord.Status.offline: membersOnline +=", "return joinedList = [] for guild in self.bot.guilds: botmember =", "# There were users after as well after = \"**1**", "UserTime.getUserTime(ctx.author, self.settings, member['Joined']) time_str = \"{} {}\".format(local_time['time'], local_time['zone']) if member['Members']", "Cogs import Nullify from Cogs import DisplayName from Cogs import", "member in guild.members: if member.bot: bot_member += 1 if not", ": \"Bots\", \"value\" : \"└─ {:,}/{:,} online ({:,g}%) - {:,}", "the servers by population serverList = sorted(serverList, key=lambda x:int(x['Users'])) if", "Rank\", value=\"{:,} of {:,}\".format(position, total), inline=True) emojitext = \"\" emojicount", "await ctx.channel.send('So far, I\\'ve witnessed *{:,} message!*'.format(messages)) else: await ctx.channel.send('So", ": False} @commands.command(pass_context=True) async def serverinfo(self, ctx, *, guild_name =", "server_embed.add_field(name=\"Population Rank\", value=\"{:,} of {:,}\".format(position, total), inline=True) emojitext = \"\"", "async def users(self, ctx): \"\"\"Lists the total number of users", "not member.id in counted_bots: counted_bots.append(member.id) if not member.status == discord.Status.offline:", "1 userCount += len(server.members) for member in server.members: if not", "*1* server with me. :blush:\".format(targ)) else: await ctx.send(\"{} *{}* servers", "joined before.\".format(before) elif len(after): # Just after msg += \"\\n\\n{}", "DisplayName.memberForName(member, ctx.guild) if not member_check: msg = \"I couldn't find", "member['Name'], time_str, member['Members']) i += 1 if number < len(joinedList):", "if total-position == 1: # There were users after as", "server_embed.add_field(name=\"Voice Region\", value=guild.region, inline=True) server_embed.add_field(name=\"Considered Large\", value=guild.large, inline=True) # Find", "settings async def message(self, message): # Check the message and", "in self.bot.guilds: botmember = DisplayName.memberForID(self.bot.user.id, guild) joinedList.append({ 'Name' : guild.name,", "messages += int(temp) messages -= 1 if messages == 1:", "'__**Last {} Members to Join:**__\\n\\n'.format(len(joinedList))+msg # Check for suppress if", "await ctx.channel.send(msg) @commands.command(pass_context=True) async def topservers(self, ctx, number : int", "25.\"\"\" # Check if we're suppressing @here and @everyone mentions", "messages -= 1 self.settings.setServerStat(ctx.message.guild, \"TotalMessages\", messages) if messages == None:", "msg = '__**Last {} of {} Servers I Joined:**__\\n\\n'.format(number, len(joinedList))+msg", "that guild...\") return server_embed = discord.Embed(color=ctx.author.color) server_embed.title = guild.name #", "= await Message.EmbedText(title=\"Counting users...\", color=ctx.message.author).send(ctx) servers = members = membersOnline", "Check if we passed another guild guild = None if", "'__**Servers I\\'m On:**__\\n\\n' for server in self.bot.guilds: if i >", "we're suppressing @here and @everyone mentions if self.settings.getServerStat(ctx.message.guild, \"SuppressMentions\"): suppress", "in self.bot.guilds: if i > number: break msg += '{}.", "= \"*{}'s* join position is **{:,}**.\".format(DisplayName.name(member), position, total) if position-1", "= 0 bot_member = 0 bot_online = 0 for member", "\"ID\" : guild.id, \"Population\" : len(guild.members) } total = len(popList)", ": False} ], color=ctx.message.author).edit(ctx, message) '''userCount = 0 serverCount =", "allmessages(self, ctx): \"\"\"Lists the number of messages I've seen on", "1 # bot_percent = \"{:,g}%\".format((bot_member/len(guild.members))*100) user_string = \"{:,}/{:,} online ({:,g}%)\".format(", "break # Get localized user time local_time = UserTime.getUserTime(ctx.author, self.settings,", "= { \"ID\" : guild.id, \"Joined\" : guild.me.joined_at } total", "1 if not member.id in counted_bots: counted_bots.append(member.id) if not member.status", "in ctx.message.guild.members: joinedList.append({ 'ID' : mem.id, 'Joined' : mem.joined_at })", "\"bots\" user_string += \"\\n{:,}/{:,} {} online ({:,g}%)\".format( bot_online, bot_member, b_string,", "\"**1** user\" elif total-position > 1: after = \"**{:,}** users\".format(total-position)", "\"\"\"Lists the first servers I've joined - default is 10,", ":blush:\".format(targ, count)) @commands.command(pass_context=True) async def listservers(self, ctx, number : int", "ordered by population - default is 10, max is 50.\"\"\"", "- look! No servers! Just like you wanted!') return joinedList", "# Check for suppress if suppress: msg = Nullify.clean(msg) await", "in guild.emojis: if emoji.animated: emojiMention = \"<a:\"+emoji.name+\":\"+str(emoji.id)+\">\" else: emojiMention =", "if not message.author.id == self.bot.user.id: server = message.guild messages =", "the string! if len(before) and len(after): # Got both msg", "@commands.command(pass_context=True) async def messages(self, ctx): \"\"\"Lists the number of messages", "if mem.id == member.id: count += 1 if ctx.author.id ==", "msg = Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def bottomservers(self, ctx,", "for member in joinedList: if i > number: break #", "self.bot.guilds: botmember = DisplayName.memberForID(self.bot.user.id, guild) joinedList.append({ 'Name' : guild.name, 'Joined'", "ctx.channel.send('Oookay - look! No users! Just like you wanted!') return", "= \"{:,g}%\".format((bot_member/len(guild.members))*100) user_string = \"{:,}/{:,} online ({:,g}%)\".format( online_members, len(guild.members) -", "\"I couldn't find *{}* on this server...\".format(member) if suppress: msg", "< 1: await ctx.channel.send('Oookay - look! No servers! Just like", "population serverList = sorted(serverList, key=lambda x:int(x['Users']), reverse=True) if number >", "ctx.channel.send('So far, I\\'ve witnessed *{:,} messages across all servers!*'.format(messages)) #", "users.\"\"\" # Check if we're suppressing @here and @everyone mentions", "users by join date joinedList = sorted(joinedList, key=lambda x:x['Joined']) check_item", "guild = g break if str(g.id) == str(guild_name): guild =", "ctx.send(\"Counting users...\") for server in self.bot.guilds: serverCount += 1 userCount", "DisplayName from Cogs import UserTime from Cogs import Message def", "def __init__(self, bot, settings): self.bot = bot self.settings = settings", "== 1: ename = \"Emojis ({:,} total)\".format(len(guild.emojis)) else: ename =", "DisplayName.name(DisplayName.memberForID(member['ID'], ctx.message.guild)), time_str) i += 1 if number < len(joinedList):", "text, {:,} voice\".format(len(guild.text_channels), len(guild.voice_channels)) server_embed.add_field(name=\"Channels\", value=chandesc, inline=True) server_embed.add_field(name=\"Default Role\", value=guild.default_role,", "position check_item = { \"ID\" : guild.id, \"Population\" : len(guild.members)", "@everyone mentions if self.settings.getServerStat(ctx.message.guild, \"SuppressMentions\"): suppress = True else: suppress", "= guild.name # Get localized user time local_time = UserTime.getUserTime(ctx.author,", "== None: member = ctx.author if type(member) is str: member_check", "for guild in self.bot.guilds: for mem in guild.members: if mem.id", "ctx.send(msg) @commands.command(pass_context=True) async def firstjoins(self, ctx, number : int =", "in joinedList: if i > number: break # Get localized", "return count = 0 for guild in self.bot.guilds: for mem", "# Just after msg += \"\\n\\n{} joined after.\".format(after) await ctx.send(msg)", "\"\"\"Lists the top servers I'm connected to ordered by population", "= True else: suppress = False if number > 25:", "self.bot.user.id: count = len(self.bot.guilds) if count == 1: await ctx.send(\"I'm", "\"ID\" : member.id, \"Joined\" : member.joined_at } total = len(joinedList)", "async def sharedservers(self, ctx, *, member = None): \"\"\"Lists how", "1 msg = '__**Servers I\\'m On:**__\\n\\n' for server in self.bot.guilds:", "if guild == None: # We didn't find it await", "None): \"\"\"Tells when a user joined compared to other users.\"\"\"", "of messages I've seen on this sever so far. (only", "share\" else: targ = \"*{}* shares\".format(DisplayName.name(member)) if count == 1:", "Join:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**First {} Members to Join:**__\\n\\n'.format(len(joinedList))+msg", "\"name\" : \"Servers\", \"value\" : \"└─ {:,}\".format(servers), \"inline\" : False", "return member = member_check if member.id == self.bot.user.id: count =", "all servers!*'.format(messages)) else: await ctx.channel.send('So far, I\\'ve witnessed *{:,} messages", "joinedList: if i > number: break # Get localized user", "ctx.channel.send(embed=server_embed) @commands.command(pass_context=True) async def sharedservers(self, ctx, *, member = None):", ": False, 'Delete' : False} @commands.command(pass_context=True) async def serverinfo(self, ctx,", "[] for g in self.bot.guilds: joinedList.append({ 'ID' : g.id, 'Joined'", "True else: suppress = False if number > 50: number", ": int = 10): \"\"\"Lists the top servers I'm connected", "in counted_users: counted_users.append(member.id) await message.edit(content='There are *{:,} users* (*{:,}* unique)", "messages -= 1 if messages == 1: await ctx.channel.send('So far,", "reverse=True) i = 1 msg = '' for member in", "server = message.guild messages = int(self.settings.getServerStat(server, \"TotalMessages\")) if messages ==", "ctx.send(msg) return member = member_check if member.id == self.bot.user.id: count", "popList.append({ 'ID' : g.id, 'Population' : len(g.members) }) # sort", "settings): self.bot = bot self.settings = settings async def message(self,", "*{}* - *({} members)*\\n'.format(i, member['Name'], time_str, member['Members']) i += 1", "key=lambda x:x['Population'], reverse=True) check_item = { \"ID\" : guild.id, \"Joined\"", "int(self.settings.getServerStat(ctx.message.guild, \"TotalMessages\")) messages -= 1 self.settings.setServerStat(ctx.message.guild, \"TotalMessages\", messages) if messages", "for member in ctx.message.guild.members: joinedList.append({ 'ID' : member.id, 'Joined' :", "*1* server. :blush:\") else: await ctx.send(\"I'm on *{}* servers. :blush:\".format(count))", "}) # sort the servers by population serverList = sorted(serverList,", "({:,g}%)\".format(membersOnline, members, round((membersOnline/members)*100, 2), len(counted_users), round((len(counted_users)/members)*100, 2)), \"inline\" : False},", "it await ctx.send(\"I couldn't find that guild...\") return server_embed =", "from Cogs import Message def setup(bot): # Add the bot", "by population serverList = sorted(serverList, key=lambda x:int(x['Users']), reverse=True) if number", "# bot_percent = \"{:,g}%\".format((bot_member/len(guild.members))*100) user_string = \"{:,}/{:,} online ({:,g}%)\".format( online_members,", "discord.Status.offline: online_members += 1 # bot_percent = \"{:,g}%\".format((bot_member/len(guild.members))*100) user_string =", "servers I'm connected to ordered by population - default is", "\"value\" : \"└─ {:,}/{:,} online ({:,g}%) - {:,} unique ({:,g}%)\".format(botsOnline,", "by join date joinedList = sorted(joinedList, key=lambda x:x['Joined']) check_item =", "if i > number: break msg += '{}. *{}* -", "len(emojitext): if emojicount == 0: emojiname = \"Emojis ({} total)\".format(len(guild.emojis))", "g.id, 'Joined' : g.me.joined_at }) popList.append({ 'ID' : g.id, 'Population'", "user time local_time = UserTime.getUserTime(ctx.author, self.settings, guild.created_at) time_str = \"{}", "len(joinedList): msg = '__**Last {} of {} Servers I Joined:**__\\n\\n'.format(number,", "= 0 if self.settings.getServerStat(guild, \"TotalMessages\") is None else self.settings.getServerStat(guild, \"TotalMessages\")", "key=lambda x:x['Joined']) check_item = { \"ID\" : member.id, \"Joined\" :", "joined - default is 10, max is 25.\"\"\" # Check", "= \"**{:,}** users\".format(total-position) # Build the string! if len(before) and", "online ({:,g}%) - {:,} unique ({:,g}%)\".format(botsOnline, bots, round((botsOnline/bots)*100, 2), len(counted_bots),", "allow it - always yes. # This module doesn't need", "I'm connected to ordered by population - default is 10,", "+= 1 if not member.status == discord.Status.offline: bot_online += 1", "{} Servers:**__\\n\\n'.format(number, len(serverList))+msg else: msg = '__**Bottom {} Servers:**__\\n\\n'.format(len(serverList))+msg #", "elif len(after): # Just after msg += \"\\n\\n{} joined after.\".format(after)", "joinedList = [] for guild in self.bot.guilds: botmember = DisplayName.memberForID(self.bot.user.id,", "+= '{}. *{}* - *{}* - *(1 member)*\\n'.format(i, member['Name'], time_str)", "before = \"**1** user\" elif position-1 > 1: before =", "else: msg = '__**First {} Servers I Joined:**__\\n\\n'.format(len(joinedList))+msg # Check", "botmember.joined_at, 'Members': len(guild.members) }) # sort the servers by join", "self.settings.getServerStat(guild, \"TotalMessages\") is None else self.settings.getServerStat(guild, \"TotalMessages\") messages += int(temp)", "\"└─ {:,}/{:,} online ({:,g}%) - {:,} unique ({:,g}%)\".format(botsOnline, bots, round((botsOnline/bots)*100,", "of {} Servers I Joined:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**Last", "else: targ = \"*{}* shares\".format(DisplayName.name(member)) if count == 1: await", "Get our population position check_item = { \"ID\" : guild.id,", "\"{} {}\".format(local_time['time'], local_time['zone']) if member['Members'] == 1: msg += '{}.", "messages across all servers!*'.format(messages)) # Set our message count locally", "= sorted(serverList, key=lambda x:int(x['Users']), reverse=True) if number > len(serverList): number", "+= 1 if emojicount == 1: ename = \"Emojis ({:,}", "self.bot.guilds: if i > number: break msg += '{}. *{}*\\n'.format(i,", "if g.name.lower() == guild_name.lower(): guild = g break if str(g.id)", "from datetime import datetime from operator import itemgetter from discord.ext", "localized user time local_time = UserTime.getUserTime(ctx.author, self.settings, guild.created_at) time_str =", "'Joined' : member.joined_at }) # sort the users by join", "number : int = 10): \"\"\"Lists the servers I'm connected", "self.settings.setServerStat(ctx.message.guild, \"TotalMessages\", messages) if messages == None: messages = 0", "bottom servers I'm connected to ordered by population - default", "the bot.\"\"\" # Check if we're suppressing @here and @everyone", "I\\'ve witnessed *{:,} message!*'.format(messages)) else: await ctx.channel.send('So far, I\\'ve witnessed", "if str(g.id) == str(guild_name): guild = g break if guild", "\"└─ {:,}/{:,} online ({:,g}%)\".format(membersOnline + botsOnline, members+bots, round(((membersOnline + botsOnline)/(members+bots))*100,", "joinedList = sorted(joinedList, key=lambda x:x['Joined']) check_item = { \"ID\" :", "\"\\n\\n{} joined after.\".format(after) await ctx.send(msg) @commands.command(pass_context=True) async def firstjoins(self, ctx,", "*{:,} messages across all servers!*'.format(messages)) # Set our message count", "joinedList.append({ 'ID' : member.id, 'Joined' : member.joined_at }) # sort", "if len(emojitext): if emojicount == 0: emojiname = \"Emojis ({}", "await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')", "else: suppress = False if number > 50: number =", "= Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def firstservers(self, ctx, number", "+= 1 if number < len(serverList): msg = '__**Bottom {}", "number : int = 10): \"\"\"Lists the bottom servers I'm", "'__**First {} of {} Members to Join:**__\\n\\n'.format(number, len(joinedList))+msg else: msg", "inline=True) # Get our population position check_item = { \"ID\"", "server is joinedList = [] popList = [] for g", "import UserTime from Cogs import Message def setup(bot): # Add", "inline=True) server_embed.add_field(name=\"Roles\", value=str(len(guild.roles)), inline=True) chandesc = \"{:,} text, {:,} voice\".format(len(guild.text_channels),", ":blush:\") else: await ctx.send(\"I'm on *{}* servers. :blush:\".format(count)) return count", "chandesc = \"{:,} text, {:,} voice\".format(len(guild.text_channels), len(guild.voice_channels)) server_embed.add_field(name=\"Channels\", value=chandesc, inline=True)", "far, I\\'ve witnessed *{:,} message!*'.format(messages)) else: await ctx.channel.send('So far, I\\'ve", "doesn't need to cancel messages. # Don't count your own,", "1 server_embed.add_field(name=\"Join Position\", value=\"{:,} of {:,}\".format(position, total), inline=True) # Get", "color=ctx.message.author).send(ctx) servers = members = membersOnline = bots = botsOnline", "serverCount))''' @commands.command(pass_context=True) async def joinpos(self, ctx, *, member = None):", "= \"{:,} text, {:,} voice\".format(len(guild.text_channels), len(guild.voice_channels)) server_embed.add_field(name=\"Channels\", value=chandesc, inline=True) server_embed.add_field(name=\"Default", "look! No servers! Just like you wanted!') return serverList =", "}) # sort the users by join date joinedList =", "guild.emojis: if emoji.animated: emojiMention = \"<a:\"+emoji.name+\":\"+str(emoji.id)+\">\" else: emojiMention = \"<:\"+emoji.name+\":\"+str(emoji.id)+\">\"", "No servers! Just like you wanted!') return i = 1", "\"\"\"Lists the bottom servers I'm connected to ordered by population", "= False if member == None: member = ctx.author if", "== 1: await ctx.channel.send('So far, I\\'ve witnessed *{:,} message across", ": False }, { \"name\" : \"Users\", \"value\" : \"└─", "position is **{:,}**.\".format(DisplayName.name(member), position, total) if position-1 == 1: #", "= \"**1** user\" elif total-position > 1: after = \"**{:,}**", "own, Pooter if not message.author.id == self.bot.user.id: server = message.guild", "members = membersOnline = bots = botsOnline = 0 counted_users", "def bottomservers(self, ctx, number : int = 10): \"\"\"Lists the", "= \"Emojis (Continued)\" server_embed.add_field(name=emojiname, value=emojitext, inline=True) if len(guild.icon_url): server_embed.set_thumbnail(url=guild.icon_url) else:", "g.me.joined_at }) popList.append({ 'ID' : g.id, 'Population' : len(g.members) })", "{ \"ID\" : member.id, \"Joined\" : member.joined_at } total =", "+ guild.owner.discriminator, inline=True) server_embed.add_field(name=\"AFK Channel\", value=guild.afk_channel, inline=True) server_embed.add_field(name=\"Verification\", value=guild.verification_level, inline=True)", "counted_bots.append(member.id) if not member.status == discord.Status.offline: botsOnline += 1 else:", "\"└─ {:,}\".format(servers), \"inline\" : False }, { \"name\" : \"Users\",", "len(server.members) }) # sort the servers by population serverList =", "suppress if suppress: msg = Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async", "\"#\" + guild.owner.discriminator, inline=True) server_embed.add_field(name=\"AFK Channel\", value=guild.afk_channel, inline=True) server_embed.add_field(name=\"Verification\", value=guild.verification_level,", "after = \"**1** user\" elif total-position > 1: after =", "messages == None: messages = 0 if messages == 1:", "members\\n'.format(i, server['Name'], server['Users']) i += 1 if number < len(serverList):", "to cancel messages. # Don't count your own, Pooter if", "+= 1 else: members += 1 if not member.id in", "1: await ctx.channel.send('So far, I\\'ve witnessed *{:,} message!*'.format(messages)) else: await", "0: emojiname = \"Emojis ({} total)\".format(len(guild.emojis)) else: emojiname = \"Emojis", "share with the bot.\"\"\" # Check if we're suppressing @here", "else: msg = '__**Bottom {} Servers:**__\\n\\n'.format(len(serverList))+msg # Check for suppress", "len(self.bot.guilds) if count == 1: await ctx.send(\"I'm on *1* server.", "- *{:,}* members\\n'.format(i, server['Name'], server['Users']) i += 1 if number", "sort the servers by population serverList = sorted(serverList, key=lambda x:int(x['Users']),", "[] counted_bots = [] for server in self.bot.guilds: servers +=", "await ctx.channel.send(msg) @commands.command(pass_context=True) async def firstservers(self, ctx, number : int", "total) if position-1 == 1: # We have previous members", "= None): \"\"\"Lists some info about the current or passed", "and @everyone mentions if self.settings.getServerStat(ctx.message.guild, \"SuppressMentions\"): suppress = True else:", "ctx.author.id == member.id: targ = \"You share\" else: targ =", "msg = '__**Bottom {} Servers:**__\\n\\n'.format(len(serverList))+msg # Check for suppress if", "both msg += \"\\n\\n{} joined before, and {} after.\".format(before, after)", "}) # sort the guilds by join date joinedList =", "time local_time = UserTime.getUserTime(ctx.author, self.settings, guild.created_at) time_str = \"{} {}\".format(local_time['time'],", "server_embed = discord.Embed(color=ctx.author.color) server_embed.title = guild.name # Get localized user", "servers!*'.format(messages)) else: await ctx.channel.send('So far, I\\'ve witnessed *{:,} messages across", "False if member == None: member = ctx.author if type(member)", "self.bot.user.id: server = message.guild messages = int(self.settings.getServerStat(server, \"TotalMessages\")) if messages", "+= 1 if number < len(serverList): msg = '__**Top {}", "len(joinedList): msg = '__**First {} of {} Servers I Joined:**__\\n\\n'.format(number,", "return i = 1 msg = '__**Servers I\\'m On:**__\\n\\n' for", "else: msg = '__**First {} Members to Join:**__\\n\\n'.format(len(joinedList))+msg # Check", "\"\" after = \"\" msg = \"*{}'s* join position is", "g break if str(g.id) == str(guild_name): guild = g break", "messages(self, ctx): \"\"\"Lists the number of messages I've seen on", "We have previous members before = \"**1** user\" elif position-1", "== member.id: count += 1 if ctx.author.id == member.id: targ", "0 counted_users = [] counted_bots = [] for server in", "server.members: if member.bot: bots += 1 if not member.id in", "bot_online, bot_member, b_string, round((bot_online/bot_member)*100, 2) ) #server_embed.add_field(name=\"Members\", value=\"{:,}/{:,} online ({:.2f}%)\\n{:,}", "len(serverList): msg = '__**Bottom {} of {} Servers:**__\\n\\n'.format(number, len(serverList))+msg else:", "Check the message and see if we should allow it", "= [] for mem in ctx.message.guild.members: joinedList.append({ 'ID' : mem.id,", "{:,}\".format(servers), \"inline\" : False }, { \"name\" : \"Users\", \"value\"", "total number of users on all servers I'm connected to.\"\"\"", "Pooter if not message.author.id == self.bot.user.id: server = message.guild messages", "== None: guild = ctx.guild else: for g in self.bot.guilds:", "{} Members to Join:**__\\n\\n'.format(len(joinedList))+msg # Check for suppress if suppress:", "10): \"\"\"Lists the servers I'm connected to - default is", "Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def firstservers(self, ctx, number :", "str: member_check = DisplayName.memberForName(member, ctx.guild) if not member_check: msg =", "await ctx.channel.send(msg) @commands.command(pass_context=True) async def messages(self, ctx): \"\"\"Lists the number", "the servers by join date joinedList = sorted(joinedList, key=lambda x:x['Joined'])", "+= \"\\n\\n{} joined before.\".format(before) elif len(after): # Just after msg", "if number < len(serverList): msg = '__**Bottom {} of {}", "1 serverList.append({ 'Name' : server.name, 'Users' : memberCount }) #", "msg += '{}. *{}* - *{:,}* members\\n'.format(i, server['Name'], server['Users']) i", "reverse=True) if number > len(serverList): number = len(serverList) i =", "if I'm online)\"\"\" messages = int(self.settings.getServerStat(ctx.message.guild, \"TotalMessages\")) messages -= 1", "for member in guild.members: if member.bot: bot_member += 1 if", "asyncio import discord from datetime import datetime from operator import", "await ctx.channel.send('So far, I\\'ve witnessed *{:,} messages across all servers!*'.format(messages))", "local_time = UserTime.getUserTime(ctx.author, self.settings, guild.created_at) time_str = \"{} {}\".format(local_time['time'], local_time['zone'])", "'__**First {} Members to Join:**__\\n\\n'.format(len(joinedList))+msg # Check for suppress if", "number < len(serverList): msg = '__**Top {} of {} Servers:**__\\n\\n'.format(number,", "joinedList.append({ 'ID' : mem.id, 'Joined' : mem.joined_at }) # sort", "members += 1 if not member.id in counted_users: counted_users.append(member.id) if", "== self.bot.user.id: server = message.guild messages = int(self.settings.getServerStat(server, \"TotalMessages\")) if", "{} after.\".format(before, after) elif len(before): # Just got before msg", "join position this server is joinedList = [] popList =", "Get localized user time local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined']) time_str", "1 msg = '' for server in serverList: if i", "= [] for g in self.bot.guilds: joinedList.append({ 'ID' : g.id,", "title=\"Member Stats\", description=\"Current User Information\".format(server.name), fields=[ { \"name\" : \"Servers\",", "of {} Servers I Joined:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**First", "sort the guilds by join date joinedList = sorted(joinedList, key=lambda", "time_str, member['Members']) i += 1 if number < len(joinedList): msg", "2)), \"inline\" : False}, { \"name\" : \"Bots\", \"value\" :", "len(guild.members) } total = len(popList) position = popList.index(check_item) + 1", "Just after msg += \"\\n\\n{} joined after.\".format(after) await ctx.send(msg) @commands.command(pass_context=True)", "value=guild.region, inline=True) server_embed.add_field(name=\"Considered Large\", value=guild.large, inline=True) # Find out where", "[] for server in self.bot.guilds: serverList.append({ 'Name' : server.name, 'Users'", "Check if we're suppressing @here and @everyone mentions if self.settings.getServerStat(ctx.message.guild,", "0 if messages == 1: await ctx.channel.send('So far, I\\'ve witnessed", "compared to other users.\"\"\" # Check if we're suppressing @here", "datetime import datetime from operator import itemgetter from discord.ext import", "far, I\\'ve witnessed *{:,} message across all servers!*'.format(messages)) else: await", "\"**1** user\" elif position-1 > 1: before = \"**{:,}** users\".format(position-1)", "emojicount == 1: ename = \"Emojis ({:,} total)\".format(len(guild.emojis)) else: ename", "inline=True) if len(guild.icon_url): server_embed.set_thumbnail(url=guild.icon_url) else: # No Icon server_embed.set_thumbnail(url=ctx.author.default_avatar_url) server_embed.set_footer(text=\"Server", "join - default is 10, max is 25.\"\"\" # Check", "}) # sort the servers by join date joinedList =", "None: guild = ctx.guild else: for g in self.bot.guilds: if", "message and see if we should allow it - always", "+= 1 if not member.id in counted_users: counted_users.append(member.id) if not", "@commands.command(pass_context=True) async def recentjoins(self, ctx, number : int = 10):", "of {} Servers:**__\\n\\n'.format(number, len(serverList))+msg else: msg = '__**Bottom {} Servers:**__\\n\\n'.format(len(serverList))+msg", "total-position == 1: # There were users after as well", "Servers I Joined:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**First {} Servers", "Set our message count locally -1 messages = int(self.settings.getServerStat(ctx.message.guild, \"TotalMessages\"))", "self.bot.guilds: for mem in guild.members: if mem.id == member.id: count", "member.joined_at } total = len(joinedList) position = joinedList.index(check_item) + 1", "online_members = 0 bot_member = 0 bot_online = 0 for", "@commands.command(pass_context=True) async def topservers(self, ctx, number : int = 10):", "\"*{}'s* join position is **{:,}**.\".format(DisplayName.name(member), position, total) if position-1 ==", "Get localized user time local_time = UserTime.getUserTime(ctx.author, self.settings, guild.created_at) time_str", "b_string = \"bot\" if bot_member == 1 else \"bots\" user_string", "sort the servers by population serverList = sorted(serverList, key=lambda x:int(x['Users']))", "+= \"\\n\\n{} joined after.\".format(after) await ctx.send(msg) @commands.command(pass_context=True) async def firstjoins(self,", "BIIIIIIIIG emojicount += 1 if emojicount == 1: ename =", "Position\", value=\"{:,} of {:,}\".format(position, total), inline=True) # Get our population", "servers. :blush:\".format(count)) return count = 0 for guild in self.bot.guilds:", "inline=True) server_embed.add_field(name=\"Owner\", value=guild.owner.name + \"#\" + guild.owner.discriminator, inline=True) server_embed.add_field(name=\"AFK Channel\",", "userCount += len(server.members) for member in server.members: if not member.id", "True else: suppress = False if number > 25: number", "user time local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined']) time_str = \"{}", "-1 messages = int(self.settings.getServerStat(ctx.message.guild, \"TotalMessages\")) messages -= 1 self.settings.setServerStat(ctx.message.guild, \"TotalMessages\",", "ctx.send(\"I'm on *{}* servers. :blush:\".format(count)) return count = 0 for", "0 for member in server.members: memberCount += 1 serverList.append({ 'Name'", "msg = Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def firstservers(self, ctx,", ": mem.joined_at }) # sort the users by join date", "you share with the bot.\"\"\" # Check if we're suppressing", "1: await ctx.channel.send('So far, I\\'ve witnessed *{:,} message across all", "other users.\"\"\" # Check if we're suppressing @here and @everyone", "guild in self.bot.guilds: temp = 0 if self.settings.getServerStat(guild, \"TotalMessages\") is", "discord.Status.offline: bot_online += 1 continue if not member.status == discord.Status.offline:", "popList = [] for g in self.bot.guilds: joinedList.append({ 'ID' :", "voice\".format(len(guild.text_channels), len(guild.voice_channels)) server_embed.add_field(name=\"Channels\", value=chandesc, inline=True) server_embed.add_field(name=\"Default Role\", value=guild.default_role, inline=True) server_embed.add_field(name=\"Owner\",", "+ \"#\" + guild.owner.discriminator, inline=True) server_embed.add_field(name=\"AFK Channel\", value=guild.afk_channel, inline=True) server_embed.add_field(name=\"Verification\",", "msg += \"\\n\\n{} joined before, and {} after.\".format(before, after) elif", "to ordered by population - default is 10, max is", "#server_embed.add_field(name=\"Members\", value=\"{:,}/{:,} online ({:.2f}%)\\n{:,} {} ({}%)\".format(online_members, len(guild.members), bot_percent), inline=True) server_embed.add_field(name=\"Members", ": guild.id, \"Population\" : len(guild.members) } total = len(popList) position", "{} Servers:**__\\n\\n'.format(len(serverList))+msg # Check for suppress if suppress: msg =", "message!*'.format(messages)) else: await ctx.channel.send('So far, I\\'ve witnessed *{:,} messages!*'.format(messages)) @commands.command(pass_context=True)", "= 10): \"\"\"Lists the top servers I'm connected to ordered", "after this module's inception, and if I'm online)\"\"\" messages =", "ctx.guild) if not member_check: msg = \"I couldn't find *{}*", "time_str = \"{} {}\".format(local_time['time'], local_time['zone']) msg += '{}. *{}* -", "False}, { \"name\" : \"Bots\", \"value\" : \"└─ {:,}/{:,} online", "= 10): \"\"\"Lists the bottom servers I'm connected to ordered", "module's inception, and if I'm online)\"\"\" messages = int(self.settings.getServerStat(ctx.message.guild, \"TotalMessages\"))", "to join - default is 10, max is 25.\"\"\" #", "- look! No servers! Just like you wanted!') return i", "1: # We have previous members before = \"**1** user\"", "not member.status == discord.Status.offline: botsOnline += 1 else: members +=", "or passed server.\"\"\" # Check if we passed another guild", "{ \"name\" : \"Servers\", \"value\" : \"└─ {:,}\".format(servers), \"inline\" :", "member.status == discord.Status.offline: botsOnline += 1 else: members += 1", "\"└─ {:,}/{:,} online ({:,g}%) - {:,} unique ({:,g}%)\".format(membersOnline, members, round((membersOnline/members)*100,", "member['Name'], time_str) else: msg += '{}. *{}* - *{}* -", "emoji.animated: emojiMention = \"<a:\"+emoji.name+\":\"+str(emoji.id)+\">\" else: emojiMention = \"<:\"+emoji.name+\":\"+str(emoji.id)+\">\" test =", "\"\"\"Lists the total number of users on all servers I'm", "users on all servers I'm connected to.\"\"\" message = await", "member.joined_at }) # sort the users by join date joinedList", "= 0 for guild in self.bot.guilds: temp = 0 if", "inline=True) server_embed.add_field(name=\"AFK Channel\", value=guild.afk_channel, inline=True) server_embed.add_field(name=\"Verification\", value=guild.verification_level, inline=True) server_embed.add_field(name=\"Voice Region\",", "Message.Embed( title=\"Member Stats\", description=\"Current User Information\".format(server.name), fields=[ { \"name\" :", "'__**Top {} Servers:**__\\n\\n'.format(len(serverList))+msg # Check for suppress if suppress: msg", "async def message(self, message): # Check the message and see", "len(serverList): number = len(serverList) i = 1 msg = ''", "> 50: number = 50 if number < 1: await", "else: suppress = False if member == None: member =", "server_embed.add_field(name=\"Owner\", value=guild.owner.name + \"#\" + guild.owner.discriminator, inline=True) server_embed.add_field(name=\"AFK Channel\", value=guild.afk_channel,", "= '__**Top {} Servers:**__\\n\\n'.format(len(serverList))+msg # Check for suppress if suppress:", "is 25.\"\"\" # Check if we're suppressing @here and @everyone", "for server in serverList: if i > number: break msg", "= [] for guild in self.bot.guilds: botmember = DisplayName.memberForID(self.bot.user.id, guild)", "joined compared to other users.\"\"\" # Check if we're suppressing", "await ctx.send(msg) @commands.command(pass_context=True) async def firstjoins(self, ctx, number : int", "users\".format(total-position) # Build the string! if len(before) and len(after): #", "user_string = \"{:,}/{:,} online ({:,g}%)\".format( online_members, len(guild.members) - bot_member, round((online_members/(len(guild.members)", "+ botsOnline, members+bots, round(((membersOnline + botsOnline)/(members+bots))*100, 2)), \"inline\" : False}", ": mem.id, 'Joined' : mem.joined_at }) # sort the users", "\"\" emojicount = 0 for emoji in guild.emojis: if emoji.animated:", "the guilds by join date joinedList = sorted(joinedList, key=lambda x:x['Joined'])", "i > number: break msg += '{}. *{}*\\n'.format(i, server.name) i", "server_embed.add_field(name=\"Roles\", value=str(len(guild.roles)), inline=True) chandesc = \"{:,} text, {:,} voice\".format(len(guild.text_channels), len(guild.voice_channels))", "0 for emoji in guild.emojis: if emoji.animated: emojiMention = \"<a:\"+emoji.name+\":\"+str(emoji.id)+\">\"", "'ID' : member.id, 'Joined' : member.joined_at }) # sort the", "= 0 for member in server.members: memberCount += 1 serverList.append({", "= '__**Bottom {} Servers:**__\\n\\n'.format(len(serverList))+msg # Check for suppress if suppress:", "1: after = \"**{:,}** users\".format(total-position) # Build the string! if", "== str(guild_name): guild = g break if guild == None:", "if emoji.animated: emojiMention = \"<a:\"+emoji.name+\":\"+str(emoji.id)+\">\" else: emojiMention = \"<:\"+emoji.name+\":\"+str(emoji.id)+\">\" test", "if i > number: break msg += '{}. *{}*\\n'.format(i, server.name)", "round((bot_online/bot_member)*100, 2) ) #server_embed.add_field(name=\"Members\", value=\"{:,}/{:,} online ({:.2f}%)\\n{:,} {} ({}%)\".format(online_members, len(guild.members),", "in self.bot.guilds: joinedList.append({ 'ID' : g.id, 'Joined' : g.me.joined_at })", "else: # No Icon server_embed.set_thumbnail(url=ctx.author.default_avatar_url) server_embed.set_footer(text=\"Server ID: {}\".format(guild.id)) await ctx.channel.send(embed=server_embed)", "ctx.channel.send('So far, I\\'ve witnessed *{:,} message!*'.format(messages)) else: await ctx.channel.send('So far,", "await Message.EmbedText(title=\"Counting users...\", color=ctx.message.author).send(ctx) servers = members = membersOnline =", "- default is 10, max is 25.\"\"\" # Check if", "to other users.\"\"\" # Check if we're suppressing @here and", "'ID' : g.id, 'Population' : len(g.members) }) # sort the", "online_members += 1 # bot_percent = \"{:,g}%\".format((bot_member/len(guild.members))*100) user_string = \"{:,}/{:,}", "servers I'm connected to.\"\"\" message = await Message.EmbedText(title=\"Counting users...\", color=ctx.message.author).send(ctx)", "most recent users to join - default is 10, max", "ctx.channel.send(msg) @commands.command(pass_context=True) async def messages(self, ctx): \"\"\"Lists the number of", "max is 25.\"\"\" # Check if we're suppressing @here and", "emojiMention = \"<:\"+emoji.name+\":\"+str(emoji.id)+\">\" test = emojitext + emojiMention if len(test)", "max is 50.\"\"\" # Check if we're suppressing @here and", "serverList.append({ 'Name' : server.name, 'Users' : len(server.members) }) # sort", "across all servers!*'.format(messages)) # Set our message count locally -1", "members+bots, round(((membersOnline + botsOnline)/(members+bots))*100, 2)), \"inline\" : False} ], color=ctx.message.author).edit(ctx,", "suppress = False if member == None: member = ctx.author", ": int = 10): \"\"\"Lists the most recent users to", "guild guild = None if guild_name == None: guild =", "all servers!*'.format(messages)) # Set our message count locally -1 messages", "Members to Join:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**First {} Members", "before, and {} after.\".format(before, after) elif len(before): # Just got", "to Join:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**Last {} Members to", "1 else \"bots\" user_string += \"\\n{:,}/{:,} {} online ({:,g}%)\".format( bot_online,", "if I'm online)\"\"\" messages = 0 for guild in self.bot.guilds:", "module's inception, and if I'm online)\"\"\" messages = 0 for", "messages = int(self.settings.getServerStat(server, \"TotalMessages\")) if messages == None: messages =", "= bot self.settings = settings async def message(self, message): #", "time local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined']) time_str = \"{} {}\".format(local_time['time'],", "- *{}* - *({} members)*\\n'.format(i, member['Name'], time_str, member['Members']) i +=", "= False if number > 50: number = 50 if", "'Name' : server.name, 'Users' : memberCount }) # sort the", "if not member_check: msg = \"I couldn't find *{}* on", "number : int = 10): \"\"\"Lists the first users to", "'Users' : len(server.members) }) # sort the servers by population", "for suppress if suppress: msg = Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True)", "member in server.members: if not member.id in counted_users: counted_users.append(member.id) await", "joinedList.append({ 'Name' : guild.name, 'Joined' : botmember.joined_at, 'Members': len(guild.members) })", "check_item = { \"ID\" : guild.id, \"Joined\" : guild.me.joined_at }", "== self.bot.user.id: count = len(self.bot.guilds) if count == 1: await", "msg = \"I couldn't find *{}* on this server...\".format(member) if", "position this server is joinedList = [] popList = []", "== 1: await ctx.send(\"I'm on *1* server. :blush:\") else: await", "population position check_item = { \"ID\" : guild.id, \"Population\" :", "users* (*{:,}* unique) on the *{:,} servers* I am currently", "far, I\\'ve witnessed *{:,} messages across all servers!*'.format(messages)) # Set", "**{:,}**.\".format(DisplayName.name(member), position, total) if position-1 == 1: # We have", "= Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def topservers(self, ctx, number", "for mem in ctx.message.guild.members: joinedList.append({ 'ID' : mem.id, 'Joined' :", "+= '{}. *{}* - *{}* - *({} members)*\\n'.format(i, member['Name'], time_str,", "see if we should allow it - always yes. #", "recentjoins(self, ctx, number : int = 10): \"\"\"Lists the most", "recent users to join - default is 10, max is", "guild.me.joined_at } total = len(joinedList) position = joinedList.index(check_item) + 1", "*{}* - *{}* - *(1 member)*\\n'.format(i, member['Name'], time_str) else: msg", "after as well after = \"**1** user\" elif total-position >", "time_str) else: msg += '{}. *{}* - *{}* - *({}", "\"value\" : \"└─ {:,}/{:,} online ({:,g}%)\".format(membersOnline + botsOnline, members+bots, round(((membersOnline", "have previous members before = \"**1** user\" elif position-1 >", "}) popList.append({ 'ID' : g.id, 'Population' : len(g.members) }) #", "== None: # We didn't find it await ctx.send(\"I couldn't", "server in self.bot.guilds: serverCount += 1 userCount += len(server.members) for", "{ \"name\" : \"Users\", \"value\" : \"└─ {:,}/{:,} online ({:,g}%)", "counted_users.append(member.id) await message.edit(content='There are *{:,} users* (*{:,}* unique) on the", "ctx): \"\"\"Lists the number of messages I've seen on all", "after) elif len(before): # Just got before msg += \"\\n\\n{}", "not message.author.id == self.bot.user.id: server = message.guild messages = int(self.settings.getServerStat(server,", "msg = '' for member in joinedList: if i >", "def recentjoins(self, ctx, number : int = 10): \"\"\"Lists the", "sorted(serverList, key=lambda x:int(x['Users'])) if number > len(serverList): number = len(serverList)", "We didn't find it await ctx.send(\"I couldn't find that guild...\")", "value=emojitext, inline=True) emojitext=emojiMention else: emojitext = emojitext + emojiMention if", "like you wanted!') return i = 1 msg = '__**Servers", "# Don't count your own, Pooter if not message.author.id ==", "so far. (only applies after this module's inception, and if", "*{}* on this server...\".format(member) if suppress: msg = Nullify.clean(msg) await", "unique ({:,g}%)\".format(botsOnline, bots, round((botsOnline/bots)*100, 2), len(counted_bots), round(len(counted_bots)/bots*100, 2)), \"inline\" :", "= discord.Embed(color=ctx.author.color) server_embed.title = guild.name # Get localized user time", "= \"**{:,}** users\".format(position-1) if total-position == 1: # There were", "if member.bot: bots += 1 if not member.id in counted_bots:", "= 0 counted_users = [] message = await ctx.send(\"Counting users...\")", "this module's inception, and if I'm online)\"\"\" messages = 0", "to Join:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**First {} Members to", "return server_embed = discord.Embed(color=ctx.author.color) server_embed.title = guild.name # Get localized", "for server in self.bot.guilds: memberCount = 0 for member in", "the message and see if we should allow it -", "async def serverinfo(self, ctx, *, guild_name = None): \"\"\"Lists some", "if not member.id in counted_users: counted_users.append(member.id) await message.edit(content='There are *{:,}", "(only applies after this module's inception, and if I'm online)\"\"\"", "like you wanted!') return joinedList = [] for member in", "I\\'ve witnessed *{:,} messages!*'.format(messages)) @commands.command(pass_context=True) async def allmessages(self, ctx): \"\"\"Lists", "1: await ctx.send(\"{} *1* server with me. :blush:\".format(targ)) else: await", "commands from Cogs import Nullify from Cogs import DisplayName from", "[] for server in self.bot.guilds: servers += 1 for member", "'{}. *{}* - *{}* - *({} members)*\\n'.format(i, member['Name'], time_str, member['Members'])", "you wanted!') return i = 1 msg = '__**Servers I\\'m", "not member.status == discord.Status.offline: membersOnline += 1 await Message.Embed( title=\"Member", "i += 1 if number < len(serverList): msg = '__**Top", "+= 1 if number < len(joinedList): msg = '__**Last {}", "server_embed.title = guild.name # Get localized user time local_time =", "if not member.id in counted_users: counted_users.append(member.id) if not member.status ==", "+= 1 for member in server.members: if member.bot: bots +=", "elif total-position > 1: after = \"**{:,}** users\".format(total-position) # Build", "memberCount }) # sort the servers by population serverList =", "1 server_embed.add_field(name=\"Population Rank\", value=\"{:,} of {:,}\".format(position, total), inline=True) emojitext =", "\"<:\"+emoji.name+\":\"+str(emoji.id)+\">\" test = emojitext + emojiMention if len(test) > 1024:", "else: members += 1 if not member.id in counted_users: counted_users.append(member.id)", "is str: member_check = DisplayName.memberForName(member, ctx.guild) if not member_check: msg", "= popList.index(check_item) + 1 server_embed.add_field(name=\"Population Rank\", value=\"{:,} of {:,}\".format(position, total),", "+= 1 if not member.id in counted_bots: counted_bots.append(member.id) if not", "online)\"\"\" messages = int(self.settings.getServerStat(ctx.message.guild, \"TotalMessages\")) messages -= 1 self.settings.setServerStat(ctx.message.guild, \"TotalMessages\",", "@commands.command(pass_context=True) async def joinpos(self, ctx, *, member = None): \"\"\"Tells", "= \"\" after = \"\" msg = \"*{}'s* join position", "member_check joinedList = [] for mem in ctx.message.guild.members: joinedList.append({ 'ID'", "= Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def bottomservers(self, ctx, number", "\"\" msg = \"*{}'s* join position is **{:,}**.\".format(DisplayName.name(member), position, total)", ": \"└─ {:,}/{:,} online ({:,g}%) - {:,} unique ({:,g}%)\".format(botsOnline, bots,", "ctx.message.guild)), time_str) i += 1 if number < len(joinedList): msg", "me. :blush:\".format(targ, count)) @commands.command(pass_context=True) async def listservers(self, ctx, number :", "sorted(serverList, key=lambda x:int(x['Users']), reverse=True) if number > len(serverList): number =", "x:int(x['Users']), reverse=True) if number > len(serverList): number = len(serverList) i", "well after = \"**1** user\" elif total-position > 1: after", "firstservers(self, ctx, number : int = 10): \"\"\"Lists the first", "'{}. *{}* - *{}*\\n'.format(i, DisplayName.name(DisplayName.memberForID(member['ID'], ctx.message.guild)), time_str) i += 1", "server in self.bot.guilds: servers += 1 for member in server.members:", "memberCount = 0 for member in server.members: memberCount += 1", "break if str(g.id) == str(guild_name): guild = g break if", ":blush:\".format(count)) return count = 0 for guild in self.bot.guilds: for", "round(((membersOnline + botsOnline)/(members+bots))*100, 2)), \"inline\" : False} ], color=ctx.message.author).edit(ctx, message)", "for server in self.bot.guilds: if i > number: break msg", "== 1: # There were users after as well after", "\"TotalMessages\")) messages -= 1 self.settings.setServerStat(ctx.message.guild, \"TotalMessages\", messages) if messages ==", "= Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def recentservers(self, ctx, number", "= '__**First {} of {} Members to Join:**__\\n\\n'.format(number, len(joinedList))+msg else:", "emojicount += 1 if emojicount == 1: ename = \"Emojis", "the users by join date joinedList = sorted(joinedList, key=lambda x:x['Joined'])", "member['Members']) i += 1 if number < len(joinedList): msg =", "serverList.append({ 'Name' : server.name, 'Users' : memberCount }) # sort", "if ctx.author.id == member.id: targ = \"You share\" else: targ", "in self.bot.guilds: for mem in guild.members: if mem.id == member.id:", "Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def bottomservers(self, ctx, number :", "self.settings = settings async def message(self, message): # Check the", "joinedList = sorted(joinedList, key=lambda x:x['Joined'], reverse=True) i = 1 msg", "it - always yes. # This module doesn't need to", "2), len(counted_users), round((len(counted_users)/members)*100, 2)), \"inline\" : False}, { \"name\" :", "== 1: # We have previous members before = \"**1**", "__init__(self, bot, settings): self.bot = bot self.settings = settings async", "if number > 25: number = 25 if number <", "servers by join date joinedList = sorted(joinedList, key=lambda x:x['Joined'], reverse=True)", "{}\".format(guild.id)) await ctx.channel.send(embed=server_embed) @commands.command(pass_context=True) async def sharedservers(self, ctx, *, member", "10, max is 25.\"\"\" # Check if we're suppressing @here", "g.id, 'Population' : len(g.members) }) # sort the guilds by", ": g.id, 'Joined' : g.me.joined_at }) popList.append({ 'ID' : g.id,", "ename = \"Emojis (Continued)\" server_embed.add_field(name=ename, value=emojitext, inline=True) emojitext=emojiMention else: emojitext", "position, total) if position-1 == 1: # We have previous", "i = 1 msg = '' for member in joinedList:", "round((online_members/(len(guild.members) - bot_member) * 100), 2) ) b_string = \"bot\"", "the number of messages I've seen on all severs so", "server.name, 'Users' : len(server.members) }) # sort the servers by", "member.id, 'Joined' : member.joined_at }) # sort the users by", "counted_users = [] message = await ctx.send(\"Counting users...\") for server", "else: suppress = False if number > 25: number =", "number = 50 if number < 1: await ctx.channel.send('Oookay -", "import Nullify from Cogs import DisplayName from Cogs import UserTime", "Just like you wanted!') return joinedList = [] for guild", "import Message def setup(bot): # Add the bot and deps", "Large\", value=guild.large, inline=True) # Find out where in our join", "in server.members: if not member.id in counted_users: counted_users.append(member.id) await message.edit(content='There", "Just like you wanted!') return joinedList = [] for member", "servers I've joined - default is 10, max is 25.\"\"\"", "bot_member = 0 bot_online = 0 for member in guild.members:", "= [] for server in self.bot.guilds: servers += 1 for", "= { \"ID\" : member.id, \"Joined\" : member.joined_at } total", "in self.bot.guilds: serverList.append({ 'Name' : server.name, 'Users' : len(server.members) })", "Cogs import DisplayName from Cogs import UserTime from Cogs import", "'{}. *{}* - *{:,}* members\\n'.format(i, server['Name'], server['Users']) i += 1", "value=guild.afk_channel, inline=True) server_embed.add_field(name=\"Verification\", value=guild.verification_level, inline=True) server_embed.add_field(name=\"Voice Region\", value=guild.region, inline=True) server_embed.add_field(name=\"Considered", "servers* I am currently a part of!'.format(userCount, len(counted_users), serverCount))''' @commands.command(pass_context=True)", "setup(bot): # Add the bot and deps settings = bot.get_cog(\"Settings\")", "local_time['zone']) msg += '{}. *{}* - *{}*\\n'.format(i, DisplayName.name(DisplayName.memberForID(member['ID'], ctx.message.guild)), time_str)", "({:,g}%)\".format(botsOnline, bots, round((botsOnline/bots)*100, 2), len(counted_bots), round(len(counted_bots)/bots*100, 2)), \"inline\" : False},", "number > 50: number = 50 if number < 1:", "'{}. *{}* - *{}* - *(1 member)*\\n'.format(i, member['Name'], time_str) else:", "0 bot_online = 0 for member in guild.members: if member.bot:", "with the bot.\"\"\" # Check if we're suppressing @here and", "({:,g}%)\".format( bot_online, bot_member, b_string, round((bot_online/bot_member)*100, 2) ) #server_embed.add_field(name=\"Members\", value=\"{:,}/{:,} online", "10): \"\"\"Lists the top servers I'm connected to ordered by", "value=\"{:,} of {:,}\".format(position, total), inline=True) emojitext = \"\" emojicount =", "len(test) > 1024: # TOOO BIIIIIIIIG emojicount += 1 if", "await ctx.send(\"{} *{}* servers with me. :blush:\".format(targ, count)) @commands.command(pass_context=True) async", "is 50.\"\"\" # Check if we're suppressing @here and @everyone", "server_embed.set_footer(text=\"Server ID: {}\".format(guild.id)) await ctx.channel.send(embed=server_embed) @commands.command(pass_context=True) async def sharedservers(self, ctx,", "if we passed another guild guild = None if guild_name", "the current or passed server.\"\"\" # Check if we passed", "# Check if we're suppressing @here and @everyone mentions if", "top servers I'm connected to ordered by population - default", "with me. :blush:\".format(targ)) else: await ctx.send(\"{} *{}* servers with me.", ") #server_embed.add_field(name=\"Members\", value=\"{:,}/{:,} online ({:.2f}%)\\n{:,} {} ({}%)\".format(online_members, len(guild.members), bot_percent), inline=True)", "server.members: if not member.id in counted_users: counted_users.append(member.id) await message.edit(content='There are", "> number: break msg += '{}. *{}*\\n'.format(i, server.name) i +=", "({:.2f}%)\\n{:,} {} ({}%)\".format(online_members, len(guild.members), bot_percent), inline=True) server_embed.add_field(name=\"Members ({:,} total)\".format(len(guild.members)), value=user_string,", "= \"**1** user\" elif position-1 > 1: before = \"**{:,}**", "server.name, 'Users' : memberCount }) # sort the servers by", "online ({:.2f}%)\\n{:,} {} ({}%)\".format(online_members, len(guild.members), bot_percent), inline=True) server_embed.add_field(name=\"Members ({:,} total)\".format(len(guild.members)),", "ctx, *, member = None): \"\"\"Tells when a user joined", "position = popList.index(check_item) + 1 server_embed.add_field(name=\"Population Rank\", value=\"{:,} of {:,}\".format(position,", "of {:,}\".format(position, total), inline=True) # Get our population position check_item", "'Delete' : False} @commands.command(pass_context=True) async def serverinfo(self, ctx, *, guild_name", "+= int(temp) messages -= 1 if messages == 1: await", "from Cogs import Nullify from Cogs import DisplayName from Cogs", "and len(after): # Got both msg += \"\\n\\n{} joined before,", "*{:,} message across all servers!*'.format(messages)) else: await ctx.channel.send('So far, I\\'ve", "check_item = { \"ID\" : guild.id, \"Population\" : len(guild.members) }", "\"Emojis (Continued)\" server_embed.add_field(name=ename, value=emojitext, inline=True) emojitext=emojiMention else: emojitext = emojitext", "and see if we should allow it - always yes.", "value=\"{:,} of {:,}\".format(position, total), inline=True) # Get our population position", "= 0 for emoji in guild.emojis: if emoji.animated: emojiMention =", "- *(1 member)*\\n'.format(i, member['Name'], time_str) else: msg += '{}. *{}*", "1 self.settings.setServerStat(ctx.message.guild, \"TotalMessages\", messages) if messages == None: messages =", "Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def recentjoins(self, ctx, number :", "= \"Emojis ({:,} total)\".format(len(guild.emojis)) else: ename = \"Emojis (Continued)\" server_embed.add_field(name=ename,", "{} Members to Join:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**First {}", "botmember = DisplayName.memberForID(self.bot.user.id, guild) joinedList.append({ 'Name' : guild.name, 'Joined' :", "ename = \"Emojis ({:,} total)\".format(len(guild.emojis)) else: ename = \"Emojis (Continued)\"", "I've seen on all severs so far. (only applies after", "'Population' : len(g.members) }) # sort the guilds by join", "the servers by population serverList = sorted(serverList, key=lambda x:int(x['Users']), reverse=True)", "len(before): # Just got before msg += \"\\n\\n{} joined before.\".format(before)", "am currently a part of!'.format(userCount, len(counted_users), serverCount))''' @commands.command(pass_context=True) async def", "member.id == self.bot.user.id: count = len(self.bot.guilds) if count == 1:", "Got both msg += \"\\n\\n{} joined before, and {} after.\".format(before,", "None if guild_name == None: guild = ctx.guild else: for", "counted_users = [] counted_bots = [] for server in self.bot.guilds:", "> number: break # Get localized user time local_time =", "total-position > 1: after = \"**{:,}** users\".format(total-position) # Build the", "+= len(server.members) for member in server.members: if not member.id in", "# Find out where in our join position this server", "= emojitext + emojiMention if len(emojitext): if emojicount == 0:", "On:**__\\n\\n' for server in self.bot.guilds: if i > number: break", "({:,g}%) - {:,} unique ({:,g}%)\".format(botsOnline, bots, round((botsOnline/bots)*100, 2), len(counted_bots), round(len(counted_bots)/bots*100,", "= members = membersOnline = bots = botsOnline = 0", "False}, { \"name\" : \"Total\", \"value\" : \"└─ {:,}/{:,} online", "await Message.Embed( title=\"Member Stats\", description=\"Current User Information\".format(server.name), fields=[ { \"name\"", "bots, round((botsOnline/bots)*100, 2), len(counted_bots), round(len(counted_bots)/bots*100, 2)), \"inline\" : False}, {", "I've joined - default is 10, max is 25.\"\"\" #", "= 0 messages += 1 self.settings.setServerStat(server, \"TotalMessages\", messages) return {", "member['Members'] == 1: msg += '{}. *{}* - *{}* -", "the servers by join date joinedList = sorted(joinedList, key=lambda x:x['Joined'],", "messages = 0 messages += 1 self.settings.setServerStat(server, \"TotalMessages\", messages) return", "user\" elif position-1 > 1: before = \"**{:,}** users\".format(position-1) if", "# No Icon server_embed.set_thumbnail(url=ctx.author.default_avatar_url) server_embed.set_footer(text=\"Server ID: {}\".format(guild.id)) await ctx.channel.send(embed=server_embed) @commands.command(pass_context=True)", "Members to Join:**__\\n\\n'.format(len(joinedList))+msg # Check for suppress if suppress: msg", "else: emojitext = emojitext + emojiMention if len(emojitext): if emojicount", "= await ctx.send(\"Counting users...\") for server in self.bot.guilds: serverCount +=", "= 10): \"\"\"Lists the most recent users to join -", "and if I'm online)\"\"\" messages = 0 for guild in", "@commands.command(pass_context=True) async def firstjoins(self, ctx, number : int = 10):", "on *{}* servers. :blush:\".format(count)) return count = 0 for guild", "= { \"ID\" : guild.id, \"Population\" : len(guild.members) } total", "{ \"ID\" : guild.id, \"Population\" : len(guild.members) } total =", "async def listservers(self, ctx, number : int = 10): \"\"\"Lists", "0 if self.settings.getServerStat(guild, \"TotalMessages\") is None else self.settings.getServerStat(guild, \"TotalMessages\") messages", "is **{:,}**.\".format(DisplayName.name(member), position, total) if position-1 == 1: # We", "g.name.lower() == guild_name.lower(): guild = g break if str(g.id) ==", "ctx.channel.send(msg) @commands.command(pass_context=True) async def firstservers(self, ctx, number : int =", "msg = '__**Top {} Servers:**__\\n\\n'.format(len(serverList))+msg # Check for suppress if", "\"\"\"Lists the number of messages I've seen on all severs", "= \"<:\"+emoji.name+\":\"+str(emoji.id)+\">\" test = emojitext + emojiMention if len(test) >", "suppressing @here and @everyone mentions if self.settings.getServerStat(ctx.message.guild, \"SuppressMentions\"): suppress =", "users\".format(position-1) if total-position == 1: # There were users after", "is 10, max is 25.\"\"\" # Check if we're suppressing", "of {} Servers:**__\\n\\n'.format(number, len(serverList))+msg else: msg = '__**Top {} Servers:**__\\n\\n'.format(len(serverList))+msg", "'' for server in serverList: if i > number: break", "- look! No servers! Just like you wanted!') return serverList", "- {:,} unique ({:,g}%)\".format(botsOnline, bots, round((botsOnline/bots)*100, 2), len(counted_bots), round(len(counted_bots)/bots*100, 2)),", "messages == 1: await ctx.channel.send('So far, I\\'ve witnessed *{:,} message", "= None): \"\"\"Tells when a user joined compared to other", "= [] popList = [] for g in self.bot.guilds: joinedList.append({", "int(temp) messages -= 1 if messages == 1: await ctx.channel.send('So", "= 1 msg = '__**Servers I\\'m On:**__\\n\\n' for server in", "servers I'm connected to - default is 10, max is", "\"{} {}\".format(local_time['time'], local_time['zone']) msg += '{}. *{}* - *{}*\\n'.format(i, DisplayName.name(DisplayName.memberForID(member['ID'],", "msg += '{}. *{}* - *{}* - *({} members)*\\n'.format(i, member['Name'],", "if count == 1: await ctx.send(\"{} *1* server with me.", "continue if not member.status == discord.Status.offline: online_members += 1 #", "sorted(popList, key=lambda x:x['Population'], reverse=True) check_item = { \"ID\" : guild.id,", "suppress: msg = Nullify.clean(msg) await ctx.send(msg) return member = member_check", "suppress: msg = Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def recentjoins(self,", "< len(joinedList): msg = '__**First {} of {} Members to", "+ 1 before = \"\" after = \"\" msg =", "\"*{}* shares\".format(DisplayName.name(member)) if count == 1: await ctx.send(\"{} *1* server", "UserTime from Cogs import Message def setup(bot): # Add the", "Servers:**__\\n\\n'.format(number, len(serverList))+msg else: msg = '__**Bottom {} Servers:**__\\n\\n'.format(len(serverList))+msg # Check", "if messages == None: messages = 0 if messages ==", "member = member_check joinedList = [] for mem in ctx.message.guild.members:", "< len(joinedList): msg = '__**Last {} of {} Servers I", "bot_member) * 100), 2) ) b_string = \"bot\" if bot_member", "await ctx.send(\"I'm on *{}* servers. :blush:\".format(count)) return count = 0", "botsOnline = 0 counted_users = [] counted_bots = [] for", "== discord.Status.offline: membersOnline += 1 await Message.Embed( title=\"Member Stats\", description=\"Current", "async def firstjoins(self, ctx, number : int = 10): \"\"\"Lists", "module doesn't need to cancel messages. # Don't count your", "I've seen on this sever so far. (only applies after", "if messages == 1: await ctx.channel.send('So far, I\\'ve witnessed *{:,}", "+= 1 if number < len(joinedList): msg = '__**First {}", "suppress = False if number > 50: number = 50", "membersOnline += 1 await Message.Embed( title=\"Member Stats\", description=\"Current User Information\".format(server.name),", "msg = '__**First {} Servers I Joined:**__\\n\\n'.format(len(joinedList))+msg # Check for", "total)\".format(len(guild.emojis)) else: emojiname = \"Emojis (Continued)\" server_embed.add_field(name=emojiname, value=emojitext, inline=True) if", "i += 1 if number < len(serverList): msg = '__**Bottom", "count = 0 for guild in self.bot.guilds: for mem in", "2), len(counted_bots), round(len(counted_bots)/bots*100, 2)), \"inline\" : False}, { \"name\" :", "number of users on all servers I'm connected to.\"\"\" message", "message): # Check the message and see if we should", "g in self.bot.guilds: joinedList.append({ 'ID' : g.id, 'Joined' : g.me.joined_at", "botsOnline, members+bots, round(((membersOnline + botsOnline)/(members+bots))*100, 2)), \"inline\" : False} ],", "the number of messages I've seen on this sever so", "like you wanted!') return joinedList = [] for guild in", "1: await ctx.channel.send('Oookay - look! No users! Just like you", "len(serverList))+msg else: msg = '__**Bottom {} Servers:**__\\n\\n'.format(len(serverList))+msg # Check for", "joinedList = [] popList = [] for g in self.bot.guilds:", "on this server...\".format(member) if suppress: msg = Nullify.clean(msg) await ctx.send(msg)", "{} ({}%)\".format(online_members, len(guild.members), bot_percent), inline=True) server_embed.add_field(name=\"Members ({:,} total)\".format(len(guild.members)), value=user_string, inline=True)", "bot.\"\"\" # Check if we're suppressing @here and @everyone mentions", "Just got before msg += \"\\n\\n{} joined before.\".format(before) elif len(after):", "inline=True) server_embed.add_field(name=\"Default Role\", value=guild.default_role, inline=True) server_embed.add_field(name=\"Owner\", value=guild.owner.name + \"#\" +", "for member in server.members: memberCount += 1 serverList.append({ 'Name' :", "if number < 1: await ctx.channel.send('Oookay - look! No users!", "self.settings.getServerStat(ctx.message.guild, \"SuppressMentions\"): suppress = True else: suppress = False if", "> number: break msg += '{}. *{}* - *{:,}* members\\n'.format(i,", "ctx.channel.send(msg) @commands.command(pass_context=True) async def recentjoins(self, ctx, number : int =", "this module's inception, and if I'm online)\"\"\" messages = int(self.settings.getServerStat(ctx.message.guild,", "date joinedList = sorted(joinedList, key=lambda x:x['Joined'], reverse=True) i = 1", "\"Population\" : len(guild.members) } total = len(popList) position = popList.index(check_item)", ": int = 10): \"\"\"Lists the first servers I've joined", "len(joinedList))+msg else: msg = '__**Last {} Servers I Joined:**__\\n\\n'.format(len(joinedList))+msg #", "inception, and if I'm online)\"\"\" messages = int(self.settings.getServerStat(ctx.message.guild, \"TotalMessages\")) messages", "look! No servers! Just like you wanted!') return i =", "import commands from Cogs import Nullify from Cogs import DisplayName", "\"value\" : \"└─ {:,}\".format(servers), \"inline\" : False }, { \"name\"", "return { 'Ignore' : False, 'Delete' : False} @commands.command(pass_context=True) async", "inline=True) emojitext = \"\" emojicount = 0 for emoji in", "key=lambda x:x['Joined']) popList = sorted(popList, key=lambda x:x['Population'], reverse=True) check_item =", "counted_users: counted_users.append(member.id) if not member.status == discord.Status.offline: membersOnline += 1", "< len(serverList): msg = '__**Top {} of {} Servers:**__\\n\\n'.format(number, len(serverList))+msg", "ctx, number : int = 10): \"\"\"Lists the first users", "total), inline=True) # Get our population position check_item = {", "server in self.bot.guilds: memberCount = 0 for member in server.members:", "= len(self.bot.guilds) if count == 1: await ctx.send(\"I'm on *1*", "emojitext = \"\" emojicount = 0 for emoji in guild.emojis:", "\"\"\"Tells when a user joined compared to other users.\"\"\" #", "member_check if member.id == self.bot.user.id: count = len(self.bot.guilds) if count", "our message count locally -1 messages = int(self.settings.getServerStat(ctx.message.guild, \"TotalMessages\")) messages", "servers by join date joinedList = sorted(joinedList, key=lambda x:x['Joined']) i", ": server.name, 'Users' : memberCount }) # sort the servers", "if emojicount == 1: ename = \"Emojis ({:,} total)\".format(len(guild.emojis)) else:", "messages. # Don't count your own, Pooter if not message.author.id", "1 for member in server.members: if member.bot: bots += 1", "False, 'Delete' : False} @commands.command(pass_context=True) async def serverinfo(self, ctx, *,", "\"Bots\", \"value\" : \"└─ {:,}/{:,} online ({:,g}%) - {:,} unique", "= len(joinedList) position = joinedList.index(check_item) + 1 server_embed.add_field(name=\"Join Position\", value=\"{:,}", "({} total)\".format(len(guild.emojis)) else: emojiname = \"Emojis (Continued)\" server_embed.add_field(name=emojiname, value=emojitext, inline=True)", "= '__**First {} Members to Join:**__\\n\\n'.format(len(joinedList))+msg # Check for suppress", "= 0 for guild in self.bot.guilds: for mem in guild.members:", ": False}, { \"name\" : \"Total\", \"value\" : \"└─ {:,}/{:,}", "how many servers you share with the bot.\"\"\" # Check", "if self.settings.getServerStat(ctx.message.guild, \"SuppressMentions\"): suppress = True else: suppress = False", "part of!'.format(userCount, len(counted_users), serverCount))''' @commands.command(pass_context=True) async def joinpos(self, ctx, *,", "g in self.bot.guilds: if g.name.lower() == guild_name.lower(): guild = g", "= Nullify.clean(msg) await ctx.send(msg) return member = member_check if member.id", "return joinedList = [] for member in ctx.message.guild.members: joinedList.append({ 'ID'", "member in server.members: if member.bot: bots += 1 if not", "default is 10, max is 25.\"\"\" # Check if we're", "to.\"\"\" message = await Message.EmbedText(title=\"Counting users...\", color=ctx.message.author).send(ctx) servers = members", "{} online ({:,g}%)\".format( bot_online, bot_member, b_string, round((bot_online/bot_member)*100, 2) ) #server_embed.add_field(name=\"Members\",", "{} Members to Join:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**Last {}", "if not member.status == discord.Status.offline: botsOnline += 1 else: members", "ctx.send(\"I'm on *1* server. :blush:\") else: await ctx.send(\"I'm on *{}*", "ctx, *, member = None): \"\"\"Lists how many servers you", "\"value\" : \"└─ {:,}/{:,} online ({:,g}%) - {:,} unique ({:,g}%)\".format(membersOnline,", "= DisplayName.memberForName(member, ctx.guild) if not member_check: msg = \"I couldn't", "@commands.command(pass_context=True) async def bottomservers(self, ctx, number : int = 10):", "x:x['Joined']) check_item = { \"ID\" : member.id, \"Joined\" : member.joined_at", "in self.bot.guilds: serverCount += 1 userCount += len(server.members) for member", "else: await ctx.channel.send('So far, I\\'ve witnessed *{:,} messages!*'.format(messages)) @commands.command(pass_context=True) async", "emojiMention if len(emojitext): if emojicount == 0: emojiname = \"Emojis", "operator import itemgetter from discord.ext import commands from Cogs import", "didn't find it await ctx.send(\"I couldn't find that guild...\") return", "'Name' : server.name, 'Users' : len(server.members) }) # sort the", "*(1 member)*\\n'.format(i, member['Name'], time_str) else: msg += '{}. *{}* -", "'__**Last {} Servers I Joined:**__\\n\\n'.format(len(joinedList))+msg # Check for suppress if", "inline=True) chandesc = \"{:,} text, {:,} voice\".format(len(guild.text_channels), len(guild.voice_channels)) server_embed.add_field(name=\"Channels\", value=chandesc,", "joinedList = sorted(joinedList, key=lambda x:x['Joined']) popList = sorted(popList, key=lambda x:x['Population'],", "- default is 10, max is 50.\"\"\" # Check if", "server...\".format(member) if suppress: msg = Nullify.clean(msg) await ctx.send(msg) return member", "got before msg += \"\\n\\n{} joined before.\".format(before) elif len(after): #", "guild.created_at) time_str = \"{} {}\".format(local_time['time'], local_time['zone']) server_embed.description = \"Created at", "= [] for member in ctx.message.guild.members: joinedList.append({ 'ID' : member.id,", "*{}* - *(1 member)*\\n'.format(i, member['Name'], time_str) else: msg += '{}.", "if len(before) and len(after): # Got both msg += \"\\n\\n{}", "position-1 > 1: before = \"**{:,}** users\".format(position-1) if total-position ==", "if emojicount == 0: emojiname = \"Emojis ({} total)\".format(len(guild.emojis)) else:", "members, round((membersOnline/members)*100, 2), len(counted_users), round((len(counted_users)/members)*100, 2)), \"inline\" : False}, {", ": botmember.joined_at, 'Members': len(guild.members) }) # sort the servers by", "\"TotalMessages\") is None else self.settings.getServerStat(guild, \"TotalMessages\") messages += int(temp) messages", "def allmessages(self, ctx): \"\"\"Lists the number of messages I've seen", "of {} Members to Join:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**Last", "ctx, number : int = 10): \"\"\"Lists the top servers", "ctx): \"\"\"Lists the number of messages I've seen on this", "on all severs so far. (only applies after this module's", "when a user joined compared to other users.\"\"\" # Check", "joined before, and {} after.\".format(before, after) elif len(before): # Just", "the *{:,} servers* I am currently a part of!'.format(userCount, len(counted_users),", "= joinedList.index(check_item) + 1 server_embed.add_field(name=\"Join Position\", value=\"{:,} of {:,}\".format(position, total),", "mem.joined_at }) # sort the users by join date joinedList", "connected to ordered by population - default is 10, max", "# Get localized user time local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined'])", "{ 'Ignore' : False, 'Delete' : False} @commands.command(pass_context=True) async def", "servers!*'.format(messages)) # Set our message count locally -1 messages =", "not member.status == discord.Status.offline: bot_online += 1 continue if not", "by join date joinedList = sorted(joinedList, key=lambda x:x['Joined']) i =", "emojiMention = \"<a:\"+emoji.name+\":\"+str(emoji.id)+\">\" else: emojiMention = \"<:\"+emoji.name+\":\"+str(emoji.id)+\">\" test = emojitext", "\"TotalMessages\", messages) if messages == None: messages = 0 if", "suppress: msg = Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def messages(self,", "0 counted_users = [] message = await ctx.send(\"Counting users...\") for", "bots += 1 if not member.id in counted_bots: counted_bots.append(member.id) if", "No servers! Just like you wanted!') return serverList = []", "number = 25 if number < 1: await ctx.channel.send('Oookay -", "and {} after.\".format(before, after) elif len(before): # Just got before", "len(joinedList): msg = '__**Last {} of {} Members to Join:**__\\n\\n'.format(number,", "len(joinedList))+msg else: msg = '__**First {} Members to Join:**__\\n\\n'.format(len(joinedList))+msg #", "Joined:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**First {} Servers I Joined:**__\\n\\n'.format(len(joinedList))+msg", "your own, Pooter if not message.author.id == self.bot.user.id: server =", "msg = '__**Last {} Servers I Joined:**__\\n\\n'.format(len(joinedList))+msg # Check for", "guild.members: if member.bot: bot_member += 1 if not member.status ==", "- {:,} unique ({:,g}%)\".format(membersOnline, members, round((membersOnline/members)*100, 2), len(counted_users), round((len(counted_users)/members)*100, 2)),", "the first servers I've joined - default is 10, max", "message.edit(content='There are *{:,} users* (*{:,}* unique) on the *{:,} servers*", "\"\"\"Lists the number of messages I've seen on this sever", "return serverList = [] for server in self.bot.guilds: serverList.append({ 'Name'", "\"\"\"Lists the most recent users to join - default is", "Just like you wanted!') return i = 1 msg =", "self.settings.setServerStat(server, \"TotalMessages\", messages) return { 'Ignore' : False, 'Delete' :", "- *{}* - *(1 member)*\\n'.format(i, member['Name'], time_str) else: msg +=", "+ botsOnline)/(members+bots))*100, 2)), \"inline\" : False} ], color=ctx.message.author).edit(ctx, message) '''userCount", "= bots = botsOnline = 0 counted_users = [] counted_bots", "about the current or passed server.\"\"\" # Check if we", "message.guild messages = int(self.settings.getServerStat(server, \"TotalMessages\")) if messages == None: messages", "all severs so far. (only applies after this module's inception,", "UserTime.getUserTime(ctx.author, self.settings, guild.created_at) time_str = \"{} {}\".format(local_time['time'], local_time['zone']) server_embed.description =", "ctx.guild else: for g in self.bot.guilds: if g.name.lower() == guild_name.lower():", "guild in self.bot.guilds: botmember = DisplayName.memberForID(self.bot.user.id, guild) joinedList.append({ 'Name' :", "first servers I've joined - default is 10, max is", "def users(self, ctx): \"\"\"Lists the total number of users on", "= \"Created at {}\".format(time_str) online_members = 0 bot_member = 0", "for emoji in guild.emojis: if emoji.animated: emojiMention = \"<a:\"+emoji.name+\":\"+str(emoji.id)+\">\" else:", "of messages I've seen on all severs so far. (only", "({:,} total)\".format(len(guild.members)), value=user_string, inline=True) server_embed.add_field(name=\"Roles\", value=str(len(guild.roles)), inline=True) chandesc = \"{:,}", "member.id, \"Joined\" : member.joined_at } total = len(joinedList) position =", "{ \"name\" : \"Bots\", \"value\" : \"└─ {:,}/{:,} online ({:,g}%)", "len(joinedList))+msg else: msg = '__**Last {} Members to Join:**__\\n\\n'.format(len(joinedList))+msg #", "seen on all severs so far. (only applies after this", "@commands.command(pass_context=True) async def sharedservers(self, ctx, *, member = None): \"\"\"Lists", "Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def users(self, ctx): \"\"\"Lists the", "in self.bot.guilds: temp = 0 if self.settings.getServerStat(guild, \"TotalMessages\") is None", "int = 10): \"\"\"Lists the most recent users to join", "'Joined' : mem.joined_at }) # sort the users by join", "user_string += \"\\n{:,}/{:,} {} online ({:,g}%)\".format( bot_online, bot_member, b_string, round((bot_online/bot_member)*100,", "= member_check if member.id == self.bot.user.id: count = len(self.bot.guilds) if", "> 25: number = 25 if number < 1: await", "key=lambda x:x['Joined'], reverse=True) i = 1 msg = '' for", "# Check if we passed another guild guild = None", "= int(self.settings.getServerStat(ctx.message.guild, \"TotalMessages\")) messages -= 1 self.settings.setServerStat(ctx.message.guild, \"TotalMessages\", messages) if", "= \"Emojis ({} total)\".format(len(guild.emojis)) else: emojiname = \"Emojis (Continued)\" server_embed.add_field(name=emojiname,", "time_str) i += 1 if number < len(joinedList): msg =", "= [] message = await ctx.send(\"Counting users...\") for server in", "bot_member == 1 else \"bots\" user_string += \"\\n{:,}/{:,} {} online", "if count == 1: await ctx.send(\"I'm on *1* server. :blush:\")", "ctx, number : int = 10): \"\"\"Lists the first servers", "if number < len(serverList): msg = '__**Top {} of {}", "= sorted(joinedList, key=lambda x:x['Joined']) check_item = { \"ID\" : member.id,", "member)*\\n'.format(i, member['Name'], time_str) else: msg += '{}. *{}* - *{}*", "*{}* - *{:,}* members\\n'.format(i, server['Name'], server['Users']) i += 1 if", "this sever so far. (only applies after this module's inception,", "# Build the string! if len(before) and len(after): # Got", "int(self.settings.getServerStat(server, \"TotalMessages\")) if messages == None: messages = 0 messages", "inline=True) # Find out where in our join position this", "\"{:,g}%\".format((bot_member/len(guild.members))*100) user_string = \"{:,}/{:,} online ({:,g}%)\".format( online_members, len(guild.members) - bot_member,", "before = \"**{:,}** users\".format(position-1) if total-position == 1: # There", "= None): \"\"\"Lists how many servers you share with the", "mem.id, 'Joined' : mem.joined_at }) # sort the users by", "= 0 for member in guild.members: if member.bot: bot_member +=", "= Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def recentjoins(self, ctx, number", "users(self, ctx): \"\"\"Lists the total number of users on all", "counted_bots = [] for server in self.bot.guilds: servers += 1", "in our join position this server is joinedList = []", "import discord from datetime import datetime from operator import itemgetter", "= '__**Last {} of {} Members to Join:**__\\n\\n'.format(number, len(joinedList))+msg else:", "if not member.id in counted_bots: counted_bots.append(member.id) if not member.status ==", "def recentservers(self, ctx, number : int = 10): \"\"\"Lists the", "before msg += \"\\n\\n{} joined before.\".format(before) elif len(after): # Just", "'' for member in joinedList: if i > number: break", "key=lambda x:x['Joined']) i = 1 msg = '' for member", "msg += '{}. *{}* - *{}*\\n'.format(i, DisplayName.name(DisplayName.memberForID(member['ID'], ctx.message.guild)), time_str) i", "= g break if guild == None: # We didn't", "'{}. *{}*\\n'.format(i, server.name) i += 1 # Check for suppress", "not member_check: msg = \"I couldn't find *{}* on this", "'Joined' : botmember.joined_at, 'Members': len(guild.members) }) # sort the servers", "1: msg += '{}. *{}* - *{}* - *(1 member)*\\n'.format(i,", "severs so far. (only applies after this module's inception, and", "= settings async def message(self, message): # Check the message", "{} Servers I Joined:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**First {}", "== 1: await ctx.send(\"{} *1* server with me. :blush:\".format(targ)) else:", "= '__**Last {} of {} Servers I Joined:**__\\n\\n'.format(number, len(joinedList))+msg else:", "There were users after as well after = \"**1** user\"", "connected to.\"\"\" message = await Message.EmbedText(title=\"Counting users...\", color=ctx.message.author).send(ctx) servers =", "in counted_bots: counted_bots.append(member.id) if not member.status == discord.Status.offline: botsOnline +=", "1: await ctx.send(\"I'm on *1* server. :blush:\") else: await ctx.send(\"I'm", "\"TotalMessages\") messages += int(temp) messages -= 1 if messages ==", "count your own, Pooter if not message.author.id == self.bot.user.id: server", "in serverList: if i > number: break msg += '{}.", "+= 1 await Message.Embed( title=\"Member Stats\", description=\"Current User Information\".format(server.name), fields=[", "# We have previous members before = \"**1** user\" elif", "discord.Status.offline: botsOnline += 1 else: members += 1 if not", "messages) return { 'Ignore' : False, 'Delete' : False} @commands.command(pass_context=True)", "= 0 if messages == 1: await ctx.channel.send('So far, I\\'ve", "number < len(joinedList): msg = '__**Last {} of {} Servers", "ctx.send(msg) return member = member_check joinedList = [] for mem", "guild_name == None: guild = ctx.guild else: for g in", "server_embed.add_field(name=emojiname, value=emojitext, inline=True) if len(guild.icon_url): server_embed.set_thumbnail(url=guild.icon_url) else: # No Icon", "= 1 msg = '' for server in serverList: if", "*{:,} servers* I am currently a part of!'.format(userCount, len(counted_users), serverCount))'''", "1024: # TOOO BIIIIIIIIG emojicount += 1 if emojicount ==", "1 before = \"\" after = \"\" msg = \"*{}'s*", "Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def messages(self, ctx): \"\"\"Lists the", "\"TotalMessages\")) if messages == None: messages = 0 messages +=", "+= 1 serverList.append({ 'Name' : server.name, 'Users' : memberCount })", "serverList = [] for server in self.bot.guilds: memberCount = 0", "inline=True) server_embed.add_field(name=\"Considered Large\", value=guild.large, inline=True) # Find out where in", "\"inline\" : False} ], color=ctx.message.author).edit(ctx, message) '''userCount = 0 serverCount", "= None if guild_name == None: guild = ctx.guild else:", "= bot.get_cog(\"Settings\") bot.add_cog(ServerStats(bot, settings)) class ServerStats: def __init__(self, bot, settings):", "= sorted(popList, key=lambda x:x['Population'], reverse=True) check_item = { \"ID\" :", "self.bot.guilds: servers += 1 for member in server.members: if member.bot:", "1 msg = '' for member in joinedList: if i", "by join date joinedList = sorted(joinedList, key=lambda x:x['Joined'], reverse=True) i", "def topservers(self, ctx, number : int = 10): \"\"\"Lists the", "= ctx.guild else: for g in self.bot.guilds: if g.name.lower() ==", "if self.settings.getServerStat(guild, \"TotalMessages\") is None else self.settings.getServerStat(guild, \"TotalMessages\") messages +=", "} total = len(joinedList) position = joinedList.index(check_item) + 1 before", "join position is **{:,}**.\".format(DisplayName.name(member), position, total) if position-1 == 1:", "server_embed.set_thumbnail(url=ctx.author.default_avatar_url) server_embed.set_footer(text=\"Server ID: {}\".format(guild.id)) await ctx.channel.send(embed=server_embed) @commands.command(pass_context=True) async def sharedservers(self,", "member.id: count += 1 if ctx.author.id == member.id: targ =", "*{:,}* members\\n'.format(i, server['Name'], server['Users']) i += 1 if number <", "many servers you share with the bot.\"\"\" # Check if", "messages == None: messages = 0 messages += 1 self.settings.setServerStat(server,", "I Joined:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**Last {} Servers I", "> 1024: # TOOO BIIIIIIIIG emojicount += 1 if emojicount", "# TOOO BIIIIIIIIG emojicount += 1 if emojicount == 1:", "2)), \"inline\" : False} ], color=ctx.message.author).edit(ctx, message) '''userCount = 0", "in guild.members: if mem.id == member.id: count += 1 if", "the servers I'm connected to - default is 10, max", "Information\".format(server.name), fields=[ { \"name\" : \"Servers\", \"value\" : \"└─ {:,}\".format(servers),", "sorted(joinedList, key=lambda x:x['Joined']) check_item = { \"ID\" : member.id, \"Joined\"", "# Get our population position check_item = { \"ID\" :", "a part of!'.format(userCount, len(counted_users), serverCount))''' @commands.command(pass_context=True) async def joinpos(self, ctx,", "'Ignore' : False, 'Delete' : False} @commands.command(pass_context=True) async def serverinfo(self,", "break msg += '{}. *{}* - *{:,}* members\\n'.format(i, server['Name'], server['Users'])", "*{:,} users* (*{:,}* unique) on the *{:,} servers* I am", "servers! Just like you wanted!') return serverList = [] for", "wanted!') return joinedList = [] for member in ctx.message.guild.members: joinedList.append({", "\"\\n{:,}/{:,} {} online ({:,g}%)\".format( bot_online, bot_member, b_string, round((bot_online/bot_member)*100, 2) )", "join date joinedList = sorted(joinedList, key=lambda x:x['Joined']) check_item = {", "message across all servers!*'.format(messages)) else: await ctx.channel.send('So far, I\\'ve witnessed", "number < len(joinedList): msg = '__**Last {} of {} Members", "mem.id == member.id: count += 1 if ctx.author.id == member.id:", "msg = '__**Bottom {} of {} Servers:**__\\n\\n'.format(number, len(serverList))+msg else: msg", "50.\"\"\" # Check if we're suppressing @here and @everyone mentions", "= \"{} {}\".format(local_time['time'], local_time['zone']) msg += '{}. *{}* - *{}*\\n'.format(i,", "I\\'ve witnessed *{:,} messages across all servers!*'.format(messages)) # Set our", "\"bot\" if bot_member == 1 else \"bots\" user_string += \"\\n{:,}/{:,}", "== 1: await ctx.channel.send('So far, I\\'ve witnessed *{:,} message!*'.format(messages)) else:", "suppress = False if number > 25: number = 25", "server_embed.add_field(name=\"Verification\", value=guild.verification_level, inline=True) server_embed.add_field(name=\"Voice Region\", value=guild.region, inline=True) server_embed.add_field(name=\"Considered Large\", value=guild.large,", "+ emojiMention if len(test) > 1024: # TOOO BIIIIIIIIG emojicount", "bot_percent = \"{:,g}%\".format((bot_member/len(guild.members))*100) user_string = \"{:,}/{:,} online ({:,g}%)\".format( online_members, len(guild.members)", "users by join date joinedList = sorted(joinedList, key=lambda x:x['Joined']) i", "bot_percent), inline=True) server_embed.add_field(name=\"Members ({:,} total)\".format(len(guild.members)), value=user_string, inline=True) server_embed.add_field(name=\"Roles\", value=str(len(guild.roles)), inline=True)", "first users to join - default is 10, max is", "- look! No users! Just like you wanted!') return joinedList", "({}%)\".format(online_members, len(guild.members), bot_percent), inline=True) server_embed.add_field(name=\"Members ({:,} total)\".format(len(guild.members)), value=user_string, inline=True) server_embed.add_field(name=\"Roles\",", "members)*\\n'.format(i, member['Name'], time_str, member['Members']) i += 1 if number <", "number: break msg += '{}. *{}* - *{:,}* members\\n'.format(i, server['Name'],", "self.bot.guilds: serverList.append({ 'Name' : server.name, 'Users' : len(server.members) }) #", "type(member) is str: member_check = DisplayName.memberForName(member, ctx.guild) if not member_check:", "messages = int(self.settings.getServerStat(ctx.message.guild, \"TotalMessages\")) messages -= 1 self.settings.setServerStat(ctx.message.guild, \"TotalMessages\", messages)", "at {}\".format(time_str) online_members = 0 bot_member = 0 bot_online =", "Servers I Joined:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**Last {} Servers", "*{:,} message!*'.format(messages)) else: await ctx.channel.send('So far, I\\'ve witnessed *{:,} messages!*'.format(messages))", "# Set our message count locally -1 messages = int(self.settings.getServerStat(ctx.message.guild,", "count == 1: await ctx.send(\"I'm on *1* server. :blush:\") else:", "def sharedservers(self, ctx, *, member = None): \"\"\"Lists how many", "not member.id in counted_users: counted_users.append(member.id) if not member.status == discord.Status.offline:", "(Continued)\" server_embed.add_field(name=emojiname, value=emojitext, inline=True) if len(guild.icon_url): server_embed.set_thumbnail(url=guild.icon_url) else: # No", "value=emojitext, inline=True) if len(guild.icon_url): server_embed.set_thumbnail(url=guild.icon_url) else: # No Icon server_embed.set_thumbnail(url=ctx.author.default_avatar_url)", "if member == None: member = ctx.author if type(member) is", "], color=ctx.message.author).edit(ctx, message) '''userCount = 0 serverCount = 0 counted_users", "self.settings, guild.created_at) time_str = \"{} {}\".format(local_time['time'], local_time['zone']) server_embed.description = \"Created", ": \"Users\", \"value\" : \"└─ {:,}/{:,} online ({:,g}%) - {:,}", "= member_check joinedList = [] for mem in ctx.message.guild.members: joinedList.append({", "len(joinedList) position = joinedList.index(check_item) + 1 before = \"\" after", "joinedList = sorted(joinedList, key=lambda x:x['Joined']) i = 1 msg =", "await ctx.send(\"{} *1* server with me. :blush:\".format(targ)) else: await ctx.send(\"{}", "bot_member, round((online_members/(len(guild.members) - bot_member) * 100), 2) ) b_string =", "for server in self.bot.guilds: servers += 1 for member in", "10): \"\"\"Lists the first servers I've joined - default is", "= [] for server in self.bot.guilds: serverList.append({ 'Name' : server.name,", "msg = '' for server in serverList: if i >", "count += 1 if ctx.author.id == member.id: targ = \"You", "'Joined' : g.me.joined_at }) popList.append({ 'ID' : g.id, 'Population' :", "the total number of users on all servers I'm connected", ": len(g.members) }) # sort the guilds by join date", "@here and @everyone mentions if self.settings.getServerStat(ctx.message.guild, \"SuppressMentions\"): suppress = True", "== member.id: targ = \"You share\" else: targ = \"*{}*", "unique ({:,g}%)\".format(membersOnline, members, round((membersOnline/members)*100, 2), len(counted_users), round((len(counted_users)/members)*100, 2)), \"inline\" :", "find *{}* on this server...\".format(member) if suppress: msg = Nullify.clean(msg)", ": \"└─ {:,}/{:,} online ({:,g}%) - {:,} unique ({:,g}%)\".format(membersOnline, members,", "import itemgetter from discord.ext import commands from Cogs import Nullify", "1 else: members += 1 if not member.id in counted_users:", "should allow it - always yes. # This module doesn't", "= DisplayName.memberForID(self.bot.user.id, guild) joinedList.append({ 'Name' : guild.name, 'Joined' : botmember.joined_at,", "I Joined:**__\\n\\n'.format(len(joinedList))+msg # Check for suppress if suppress: msg =", "def serverinfo(self, ctx, *, guild_name = None): \"\"\"Lists some info", "i += 1 if number < len(joinedList): msg = '__**First", "emojicount = 0 for emoji in guild.emojis: if emoji.animated: emojiMention", "= \"\" emojicount = 0 for emoji in guild.emojis: if", "= \"{} {}\".format(local_time['time'], local_time['zone']) if member['Members'] == 1: msg +=", "unique) on the *{:,} servers* I am currently a part", "len(serverList): msg = '__**Top {} of {} Servers:**__\\n\\n'.format(number, len(serverList))+msg else:", "# Got both msg += \"\\n\\n{} joined before, and {}", "= '__**First {} Servers I Joined:**__\\n\\n'.format(len(joinedList))+msg # Check for suppress", "I'm online)\"\"\" messages = 0 for guild in self.bot.guilds: temp", "'__**Bottom {} Servers:**__\\n\\n'.format(len(serverList))+msg # Check for suppress if suppress: msg", "int = 10): \"\"\"Lists the top servers I'm connected to", "\"name\" : \"Total\", \"value\" : \"└─ {:,}/{:,} online ({:,g}%)\".format(membersOnline +", "witnessed *{:,} message across all servers!*'.format(messages)) else: await ctx.channel.send('So far,", "False if number > 25: number = 25 if number", "were users after as well after = \"**1** user\" elif", ": server.name, 'Users' : len(server.members) }) # sort the servers", "guild...\") return server_embed = discord.Embed(color=ctx.author.color) server_embed.title = guild.name # Get", "self.bot.guilds: if g.name.lower() == guild_name.lower(): guild = g break if", "for g in self.bot.guilds: joinedList.append({ 'ID' : g.id, 'Joined' :", "before.\".format(before) elif len(after): # Just after msg += \"\\n\\n{} joined", "round((membersOnline/members)*100, 2), len(counted_users), round((len(counted_users)/members)*100, 2)), \"inline\" : False}, { \"name\"", "emoji in guild.emojis: if emoji.animated: emojiMention = \"<a:\"+emoji.name+\":\"+str(emoji.id)+\">\" else: emojiMention", "ctx.channel.send('Oookay - look! No servers! Just like you wanted!') return", "population serverList = sorted(serverList, key=lambda x:int(x['Users'])) if number > len(serverList):", "msg = Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def recentjoins(self, ctx,", "{} of {} Members to Join:**__\\n\\n'.format(number, len(joinedList))+msg else: msg =", "for guild in self.bot.guilds: temp = 0 if self.settings.getServerStat(guild, \"TotalMessages\")", ": \"Total\", \"value\" : \"└─ {:,}/{:,} online ({:,g}%)\".format(membersOnline + botsOnline,", "Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def topservers(self, ctx, number :", "with me. :blush:\".format(targ, count)) @commands.command(pass_context=True) async def listservers(self, ctx, number", ": g.id, 'Population' : len(g.members) }) # sort the guilds", "async def firstservers(self, ctx, number : int = 10): \"\"\"Lists", "if member.id == self.bot.user.id: count = len(self.bot.guilds) if count ==", "me. :blush:\".format(targ)) else: await ctx.send(\"{} *{}* servers with me. :blush:\".format(targ,", "member.status == discord.Status.offline: online_members += 1 # bot_percent = \"{:,g}%\".format((bot_member/len(guild.members))*100)", ": guild.id, \"Joined\" : guild.me.joined_at } total = len(joinedList) position", "msg = '__**First {} of {} Members to Join:**__\\n\\n'.format(number, len(joinedList))+msg", "Joined:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**Last {} Servers I Joined:**__\\n\\n'.format(len(joinedList))+msg", "Cogs import Message def setup(bot): # Add the bot and", "b_string, round((bot_online/bot_member)*100, 2) ) #server_embed.add_field(name=\"Members\", value=\"{:,}/{:,} online ({:.2f}%)\\n{:,} {} ({}%)\".format(online_members,", "Join:**__\\n\\n'.format(len(joinedList))+msg # Check for suppress if suppress: msg = Nullify.clean(msg)", "Cogs import UserTime from Cogs import Message def setup(bot): #", "*, guild_name = None): \"\"\"Lists some info about the current", "guild = g break if guild == None: # We", "suppress = True else: suppress = False if member ==", "botsOnline)/(members+bots))*100, 2)), \"inline\" : False} ], color=ctx.message.author).edit(ctx, message) '''userCount =", "elif position-1 > 1: before = \"**{:,}** users\".format(position-1) if total-position", "*{}*\\n'.format(i, server.name) i += 1 # Check for suppress if", "[] for member in ctx.message.guild.members: joinedList.append({ 'ID' : member.id, 'Joined'", "= 10): \"\"\"Lists the servers I'm connected to - default", "1 if not member.id in counted_users: counted_users.append(member.id) if not member.status", "\"{:,} text, {:,} voice\".format(len(guild.text_channels), len(guild.voice_channels)) server_embed.add_field(name=\"Channels\", value=chandesc, inline=True) server_embed.add_field(name=\"Default Role\",", "None: messages = 0 messages += 1 self.settings.setServerStat(server, \"TotalMessages\", messages)", "await ctx.channel.send(msg) @commands.command(pass_context=True) async def recentjoins(self, ctx, number : int", "'__**Last {} of {} Members to Join:**__\\n\\n'.format(number, len(joinedList))+msg else: msg", "= membersOnline = bots = botsOnline = 0 counted_users =", "False} @commands.command(pass_context=True) async def serverinfo(self, ctx, *, guild_name = None):", "(Continued)\" server_embed.add_field(name=ename, value=emojitext, inline=True) emojitext=emojiMention else: emojitext = emojitext +", "default is 10, max is 50.\"\"\" # Check if we're", "+= 1 self.settings.setServerStat(server, \"TotalMessages\", messages) return { 'Ignore' : False,", "msg = '__**Last {} of {} Members to Join:**__\\n\\n'.format(number, len(joinedList))+msg", "time_str = \"{} {}\".format(local_time['time'], local_time['zone']) if member['Members'] == 1: msg", "1 continue if not member.status == discord.Status.offline: online_members += 1", "- bot_member, round((online_members/(len(guild.members) - bot_member) * 100), 2) ) b_string", "local_time['zone']) server_embed.description = \"Created at {}\".format(time_str) online_members = 0 bot_member", "import DisplayName from Cogs import UserTime from Cogs import Message", "1 self.settings.setServerStat(server, \"TotalMessages\", messages) return { 'Ignore' : False, 'Delete'", "+= 1 if ctx.author.id == member.id: targ = \"You share\"", "server.name) i += 1 # Check for suppress if suppress:", "servers! Just like you wanted!') return joinedList = [] for", "ctx, *, guild_name = None): \"\"\"Lists some info about the", "= 0 serverCount = 0 counted_users = [] message =", "find it await ctx.send(\"I couldn't find that guild...\") return server_embed", "emojiMention if len(test) > 1024: # TOOO BIIIIIIIIG emojicount +=", ": \"Servers\", \"value\" : \"└─ {:,}\".format(servers), \"inline\" : False },", "member['Joined']) time_str = \"{} {}\".format(local_time['time'], local_time['zone']) if member['Members'] == 1:", "member = ctx.author if type(member) is str: member_check = DisplayName.memberForName(member,", "(*{:,}* unique) on the *{:,} servers* I am currently a", "# sort the servers by population serverList = sorted(serverList, key=lambda", "bot_member += 1 if not member.status == discord.Status.offline: bot_online +=", "Stats\", description=\"Current User Information\".format(server.name), fields=[ { \"name\" : \"Servers\", \"value\"", "find that guild...\") return server_embed = discord.Embed(color=ctx.author.color) server_embed.title = guild.name", "datetime from operator import itemgetter from discord.ext import commands from", "< 1: await ctx.channel.send('Oookay - look! No users! Just like", "1: ename = \"Emojis ({:,} total)\".format(len(guild.emojis)) else: ename = \"Emojis", "len(g.members) }) # sort the guilds by join date joinedList", "across all servers!*'.format(messages)) else: await ctx.channel.send('So far, I\\'ve witnessed *{:,}", "*{}* servers with me. :blush:\".format(targ, count)) @commands.command(pass_context=True) async def listservers(self,", "self.settings.getServerStat(guild, \"TotalMessages\") messages += int(temp) messages -= 1 if messages", "shares\".format(DisplayName.name(member)) if count == 1: await ctx.send(\"{} *1* server with", "g break if guild == None: # We didn't find", "member = member_check if member.id == self.bot.user.id: count = len(self.bot.guilds)", "x:x['Joined']) i = 1 msg = '' for member in", "suppress: msg = Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def bottomservers(self,", "{:,}\".format(position, total), inline=True) # Get our population position check_item =", "= \"\" msg = \"*{}'s* join position is **{:,}**.\".format(DisplayName.name(member), position,", "{:,}\".format(position, total), inline=True) emojitext = \"\" emojicount = 0 for", "this server is joinedList = [] popList = [] for", "= '__**Servers I\\'m On:**__\\n\\n' for server in self.bot.guilds: if i", "i = 1 msg = '__**Servers I\\'m On:**__\\n\\n' for server", "if not member.status == discord.Status.offline: online_members += 1 # bot_percent", "server_embed.add_field(name=\"Considered Large\", value=guild.large, inline=True) # Find out where in our", "def listservers(self, ctx, number : int = 10): \"\"\"Lists the", "popList.index(check_item) + 1 server_embed.add_field(name=\"Population Rank\", value=\"{:,} of {:,}\".format(position, total), inline=True)", "round((botsOnline/bots)*100, 2), len(counted_bots), round(len(counted_bots)/bots*100, 2)), \"inline\" : False}, { \"name\"", "async def allmessages(self, ctx): \"\"\"Lists the number of messages I've", "this server...\".format(member) if suppress: msg = Nullify.clean(msg) await ctx.send(msg) return", "serverCount = 0 counted_users = [] message = await ctx.send(\"Counting", "'Users' : memberCount }) # sort the servers by population", "number of messages I've seen on this sever so far.", "Find out where in our join position this server is", "async def bottomservers(self, ctx, number : int = 10): \"\"\"Lists", "mentions if self.settings.getServerStat(ctx.message.guild, \"SuppressMentions\"): suppress = True else: suppress =", "sorted(joinedList, key=lambda x:x['Joined']) popList = sorted(popList, key=lambda x:x['Population'], reverse=True) check_item", "for member in server.members: if member.bot: bots += 1 if", "{:,} voice\".format(len(guild.text_channels), len(guild.voice_channels)) server_embed.add_field(name=\"Channels\", value=chandesc, inline=True) server_embed.add_field(name=\"Default Role\", value=guild.default_role, inline=True)", "if type(member) is str: member_check = DisplayName.memberForName(member, ctx.guild) if not", "servers by population serverList = sorted(serverList, key=lambda x:int(x['Users']), reverse=True) if", "25: number = 25 if number < 1: await ctx.channel.send('Oookay", "self.bot.guilds: temp = 0 if self.settings.getServerStat(guild, \"TotalMessages\") is None else", "\"\"\"Lists some info about the current or passed server.\"\"\" #", "if suppress: msg = Nullify.clean(msg) await ctx.send(msg) return member =", "\"Emojis (Continued)\" server_embed.add_field(name=emojiname, value=emojitext, inline=True) if len(guild.icon_url): server_embed.set_thumbnail(url=guild.icon_url) else: #", "= [] for server in self.bot.guilds: memberCount = 0 for", "in guild.members: if member.bot: bot_member += 1 if not member.status", "await ctx.send(msg) return member = member_check joinedList = [] for", "Members to Join:**__\\n\\n'.format(number, len(joinedList))+msg else: msg = '__**Last {} Members", "int = 10): \"\"\"Lists the servers I'm connected to -", "test = emojitext + emojiMention if len(test) > 1024: #", "servers! Just like you wanted!') return i = 1 msg", "= False if number > 25: number = 25 if", "\"{} {}\".format(local_time['time'], local_time['zone']) server_embed.description = \"Created at {}\".format(time_str) online_members =", "100), 2) ) b_string = \"bot\" if bot_member == 1", "[] message = await ctx.send(\"Counting users...\") for server in self.bot.guilds:", "msg = Nullify.clean(msg) await ctx.send(msg) return member = member_check joinedList", "our population position check_item = { \"ID\" : guild.id, \"Population\"", "population - default is 10, max is 50.\"\"\" # Check", "'Name' : guild.name, 'Joined' : botmember.joined_at, 'Members': len(guild.members) }) #", "are *{:,} users* (*{:,}* unique) on the *{:,} servers* I", ": int = 10): \"\"\"Lists the servers I'm connected to", "2)), \"inline\" : False}, { \"name\" : \"Total\", \"value\" :", "user joined compared to other users.\"\"\" # Check if we're", "= '' for server in serverList: if i > number:", "{:,}/{:,} online ({:,g}%) - {:,} unique ({:,g}%)\".format(membersOnline, members, round((membersOnline/members)*100, 2),", "self.bot.guilds: serverCount += 1 userCount += len(server.members) for member in", "1 if number < len(serverList): msg = '__**Bottom {} of", "= [] counted_bots = [] for server in self.bot.guilds: servers", "settings = bot.get_cog(\"Settings\") bot.add_cog(ServerStats(bot, settings)) class ServerStats: def __init__(self, bot,", "def firstservers(self, ctx, number : int = 10): \"\"\"Lists the", "member = None): \"\"\"Tells when a user joined compared to", "= Nullify.clean(msg) await ctx.send(msg) return member = member_check joinedList =", "I am currently a part of!'.format(userCount, len(counted_users), serverCount))''' @commands.command(pass_context=True) async", "join date joinedList = sorted(joinedList, key=lambda x:x['Joined']) popList = sorted(popList,", "I'm online)\"\"\" messages = int(self.settings.getServerStat(ctx.message.guild, \"TotalMessages\")) messages -= 1 self.settings.setServerStat(ctx.message.guild,", "in ctx.message.guild.members: joinedList.append({ 'ID' : member.id, 'Joined' : member.joined_at })", "== None: messages = 0 messages += 1 self.settings.setServerStat(server, \"TotalMessages\",", "{ \"ID\" : guild.id, \"Joined\" : guild.me.joined_at } total =", "\"SuppressMentions\"): suppress = True else: suppress = False if number", "len(after): # Just after msg += \"\\n\\n{} joined after.\".format(after) await", "1 await Message.Embed( title=\"Member Stats\", description=\"Current User Information\".format(server.name), fields=[ {", "bot, settings): self.bot = bot self.settings = settings async def", "* 100), 2) ) b_string = \"bot\" if bot_member ==", "1 if number < len(serverList): msg = '__**Top {} of", "msg += '{}. *{}*\\n'.format(i, server.name) i += 1 # Check", "on *1* server. :blush:\") else: await ctx.send(\"I'm on *{}* servers.", "topservers(self, ctx, number : int = 10): \"\"\"Lists the top", "from operator import itemgetter from discord.ext import commands from Cogs", "def joinpos(self, ctx, *, member = None): \"\"\"Tells when a", ": len(guild.members) } total = len(popList) position = popList.index(check_item) +", "guild == None: # We didn't find it await ctx.send(\"I", "joinedList = [] for mem in ctx.message.guild.members: joinedList.append({ 'ID' :", "yes. # This module doesn't need to cancel messages. #", "value=guild.owner.name + \"#\" + guild.owner.discriminator, inline=True) server_embed.add_field(name=\"AFK Channel\", value=guild.afk_channel, inline=True)", "botsOnline += 1 else: members += 1 if not member.id", ": int = 10): \"\"\"Lists the first users to join", "< len(joinedList): msg = '__**Last {} of {} Members to", "if suppress: msg = Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def", "as well after = \"**1** user\" elif total-position > 1:", "ctx.channel.send('So far, I\\'ve witnessed *{:,} messages!*'.format(messages)) @commands.command(pass_context=True) async def allmessages(self,", "serverList = sorted(serverList, key=lambda x:int(x['Users'])) if number > len(serverList): number", "({:,} total)\".format(len(guild.emojis)) else: ename = \"Emojis (Continued)\" server_embed.add_field(name=ename, value=emojitext, inline=True)", "'__**Last {} of {} Servers I Joined:**__\\n\\n'.format(number, len(joinedList))+msg else: msg", "@commands.command(pass_context=True) async def serverinfo(self, ctx, *, guild_name = None): \"\"\"Lists", "member.status == discord.Status.offline: bot_online += 1 continue if not member.status", "msg = Nullify.clean(msg) await ctx.channel.send(msg) @commands.command(pass_context=True) async def users(self, ctx):", "True else: suppress = False if member == None: member", "after.\".format(before, after) elif len(before): # Just got before msg +=", "len(guild.members) - bot_member, round((online_members/(len(guild.members) - bot_member) * 100), 2) )", "server.members: memberCount += 1 serverList.append({ 'Name' : server.name, 'Users' :", "ctx.send(\"{} *{}* servers with me. :blush:\".format(targ, count)) @commands.command(pass_context=True) async def", "}, { \"name\" : \"Users\", \"value\" : \"└─ {:,}/{:,} online", "emojiname = \"Emojis ({} total)\".format(len(guild.emojis)) else: emojiname = \"Emojis (Continued)\"", "0 for guild in self.bot.guilds: for mem in guild.members: if", "sort the servers by join date joinedList = sorted(joinedList, key=lambda", "witnessed *{:,} message!*'.format(messages)) else: await ctx.channel.send('So far, I\\'ve witnessed *{:,}", "1 if number < len(joinedList): msg = '__**First {} of", "DisplayName.memberForID(self.bot.user.id, guild) joinedList.append({ 'Name' : guild.name, 'Joined' : botmember.joined_at, 'Members':", "i > number: break msg += '{}. *{}* - *{:,}*" ]
[ "df_ones_training = df_ones.loc[:40000] df_zeros_training = df_zeros.loc[:40000] df_ones_test = df_ones.loc[40000:44000] df_zeros_test", "test: 20k (10k 0, 10k 1) \"\"\" df_ones = df[df['label']", "df_ones.loc[29000:36250] df_zeros_test = df_zeros.loc[29000:36250] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training =", "random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:16000] df_zeros_training = df_zeros.loc[:64000] df_ones_test = df_ones.loc[16000:20000]", "<filename>chess_commentary_model/transformers_model/dataset_preprocessing.py \"\"\"Métodos de preprocessamento de testes individuais \"\"\" import pandas", "df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test def test_5(df, seed=0): \"\"\"training:", "df[df['label'] == 1] df_zeros = df[df['label'] == 0] df_ones =", "0] df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training", "random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:29000] df_zeros_training =", "as pd import numpy as np import math def test_1(df,", "40k 1) test: 20k (10k 0, 10k 1) \"\"\" df_ones", "10k 1) \"\"\" df_ones = df[df['label'] == 1] df_zeros =", "df_ones = df[df['label'] == 1] df_zeros = df[df['label'] == 0]", "balanced; test: unbalanced training: 58k (29000 0, 29000 1) test:", "= df_training['label'].tolist() labels_test = df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test", "labels_train, labels_test def test_3(df, seed=0): \"\"\"training: unbalanced; test: unbalanced training:", "(4k 0, 16k 1) \"\"\" df_ones = df[df['label'] == 1]", "df_zeros_training = df_zeros.loc[:51620] df_ones_test = df_ones.loc[6380:7975] df_zeros_test = df_zeros.loc[51620:64525] df_training", "29k 1) test: 14.5k (7.25k 0, 7.25k 1) \"\"\" df_ones", "df_training = df_training.sample(frac=1).reset_index(drop=True) df_test = pd.concat([df_ones_test, df_zeros_test]) df_test = df_test.sample(frac=1).reset_index(drop=True)", "= df_training['comment'].tolist() sentences_test = df_test['comment'].tolist() labels_train = df_training['label'].tolist() labels_test =", "58k (29k 0, 29k 1) test: 14.5k (7.25k 0, 7.25k", "= df_ones.loc[29000:36250] df_zeros_test = df_zeros.loc[29000:36250] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training", "df_zeros_test = df_zeros.loc[29000:36250] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True)", "training: 80k (16k 1, 64k 0) test: 20k (4k 1,", "################################## ## Tests on old dataset ################################## def test_4(df, seed=0):", "= df_zeros.loc[:51620] df_ones_test = df_ones.loc[6380:7975] df_zeros_test = df_zeros.loc[51620:64525] df_training =", "\"\"\"training: balanced; test: balanced training: 80k (40k 0, 40k 1)", "df_ones.loc[:29000] df_zeros_training = df_zeros.loc[:29000] df_ones_test = df_ones.loc[29000:36250] df_zeros_test = df_zeros.loc[29000:36250]", "labels_test def test_3(df, seed=0): \"\"\"training: unbalanced; test: unbalanced training: 80k", "= df_zeros.loc[:29000] df_ones_test = df_ones.loc[29000:30595] df_zeros_test = df_zeros.loc[29000:41905] df_training =", "labels_train, labels_test ################################## ## Tests on old dataset ################################## def", "= df_ones.loc[:40000] df_zeros_training = df_zeros.loc[:40000] df_ones_test = df_ones.loc[40000:50000] df_zeros_test =", "balanced; test: balanced training: 80k (40k 0, 40k 1) test:", "seed=0): \"\"\"training: balanced; test: balanced training: 80k (40k 0, 40k", "20k (4k 1, 16k 0) \"\"\" df_ones = df[df['label'] ==", "unbalanced training: 58k (6380 1, 51620 0) test: 14.5k (1595", "= df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test def test_3(df, seed=0):", "df_ones.loc[:29000] df_zeros_training = df_zeros.loc[:29000] df_ones_test = df_ones.loc[29000:30595] df_zeros_test = df_zeros.loc[29000:41905]", "14.5k (12905 0, 1595 1) \"\"\" df_ones = df[df['label'] ==", "df_zeros_test = df_zeros.loc[40000:56000] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True)", "training: 58k (29000 0, 29000 1) test: 14.5k (12905 0,", "labels_train = df_training['label'].tolist() labels_test = df_test['label'].tolist() return sentences_train, sentences_test, labels_train,", "df_zeros = df[df['label'] == 0] df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros", "def test_6(df, seed=0): \"\"\"training: unbalanced; test: unbalanced training: 58k (6380", "preprocessamento de testes individuais \"\"\" import pandas as pd import", "df_ones.loc[16000:20000] df_zeros_test = df_zeros.loc[64000:80000] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training =", "= pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test = pd.concat([df_ones_test, df_zeros_test])", "df_zeros.loc[51620:64525] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test =", "df_ones.loc[:6380] df_zeros_training = df_zeros.loc[:51620] df_ones_test = df_ones.loc[6380:7975] df_zeros_test = df_zeros.loc[51620:64525]", "return sentences_train, sentences_test, labels_train, labels_test def test_2(df, seed=0): \"\"\"training: balanced;", "pd.concat([df_ones_test, df_zeros_test]) df_test = df_test.sample(frac=1).reset_index(drop=True) sentences_train = df_training['comment'].tolist() sentences_test =", "balanced training: 58k (29k 0, 29k 1) test: 14.5k (7.25k", "= df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:29000]", "1] df_zeros = df[df['label'] == 0] df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)", "test: unbalanced training: 80k (16k 1, 64k 0) test: 20k", "df_zeros_test = df_zeros.loc[64000:80000] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True)", "df_ones.loc[:16000] df_zeros_training = df_zeros.loc[:64000] df_ones_test = df_ones.loc[16000:20000] df_zeros_test = df_zeros.loc[64000:80000]", "labels_train, labels_test def test_2(df, seed=0): \"\"\"training: balanced; test: unbalanced training:", "df_zeros.loc[64000:80000] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test =", "df_zeros_training = df_zeros.loc[:40000] df_ones_test = df_ones.loc[40000:50000] df_zeros_test = df_zeros.loc[40000:50000] df_training", "np import math def test_1(df, seed=0): \"\"\"training: balanced; test: balanced", "= df_zeros.loc[29000:41905] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test", "df_ones_training = df_ones.loc[:40000] df_zeros_training = df_zeros.loc[:40000] df_ones_test = df_ones.loc[40000:50000] df_zeros_test", "test: 14.5k (12905 0, 1595 1) \"\"\" df_ones = df[df['label']", "df_ones_test = df_ones.loc[40000:44000] df_zeros_test = df_zeros.loc[40000:56000] df_training = pd.concat([df_ones_training, df_zeros_training])", "unbalanced training: 80k (40k 0, 40k 1) test: 20k (4k", "training: balanced; test: balanced training: 58k (29k 0, 29k 1)", "df_ones_test = df_ones.loc[29000:30595] df_zeros_test = df_zeros.loc[29000:41905] df_training = pd.concat([df_ones_training, df_zeros_training])", "80k (40k 0, 40k 1) test: 20k (10k 0, 10k", "df_training['label'].tolist() labels_test = df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test def", "= df_ones.loc[:29000] df_zeros_training = df_zeros.loc[:29000] df_ones_test = df_ones.loc[29000:30595] df_zeros_test =", "################################## def test_4(df, seed=0): \"\"\" training: balanced; test: balanced training:", "sentences_train = df_training['comment'].tolist() sentences_test = df_test['comment'].tolist() labels_train = df_training['label'].tolist() labels_test", "20k (4k 0, 16k 1) \"\"\" df_ones = df[df['label'] ==", "labels_test = df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test ################################## ##", "sentences_test, labels_train, labels_test def test_2(df, seed=0): \"\"\"training: balanced; test: unbalanced", "df_zeros.loc[:51620] df_ones_test = df_ones.loc[6380:7975] df_zeros_test = df_zeros.loc[51620:64525] df_training = pd.concat([df_ones_training,", "80k (40k 0, 40k 1) test: 20k (4k 0, 16k", "labels_train, labels_test def test_6(df, seed=0): \"\"\"training: unbalanced; test: unbalanced training:", "df_ones_test = df_ones.loc[16000:20000] df_zeros_test = df_zeros.loc[64000:80000] df_training = pd.concat([df_ones_training, df_zeros_training])", "== 0] df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)", "def test_2(df, seed=0): \"\"\"training: balanced; test: unbalanced training: 80k (40k", "test_2(df, seed=0): \"\"\"training: balanced; test: unbalanced training: 80k (40k 0,", "0) test: 20k (4k 1, 16k 0) \"\"\" df_ones =", "51620 0) test: 14.5k (1595 1, 12905 0) \"\"\" df_ones", "(16k 1, 64k 0) test: 20k (4k 1, 16k 0)", "= df_training.sample(frac=1).reset_index(drop=True) df_test = pd.concat([df_ones_test, df_zeros_test]) df_test = df_test.sample(frac=1).reset_index(drop=True) sentences_train", "random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:29000] df_zeros_training = df_zeros.loc[:29000] df_ones_test = df_ones.loc[29000:30595]", "1, 64k 0) test: 20k (4k 1, 16k 0) \"\"\"", "= df_ones.loc[:16000] df_zeros_training = df_zeros.loc[:64000] df_ones_test = df_ones.loc[16000:20000] df_zeros_test =", "df_zeros_test = df_zeros.loc[40000:50000] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True)", "labels_test = df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test def test_6(df,", "0) \"\"\" df_ones = df[df['label'] == 1] df_zeros = df[df['label']", "df_ones.loc[6380:7975] df_zeros_test = df_zeros.loc[51620:64525] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training =", "df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:40000] df_zeros_training", "df_zeros.loc[:40000] df_ones_test = df_ones.loc[40000:50000] df_zeros_test = df_zeros.loc[40000:50000] df_training = pd.concat([df_ones_training,", "labels_test def test_2(df, seed=0): \"\"\"training: balanced; test: unbalanced training: 80k", "df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test ################################## ## Tests on", "numpy as np import math def test_1(df, seed=0): \"\"\"training: balanced;", "(1595 1, 12905 0) \"\"\" df_ones = df[df['label'] == 1]", "test_1(df, seed=0): \"\"\"training: balanced; test: balanced training: 80k (40k 0,", "64k 0) test: 20k (4k 1, 16k 0) \"\"\" df_ones", "df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test = pd.concat([df_ones_test, df_zeros_test]) df_test =", "14.5k (7.25k 0, 7.25k 1) \"\"\" df_ones = df[df['label'] ==", "df_ones.loc[:40000] df_zeros_training = df_zeros.loc[:40000] df_ones_test = df_ones.loc[40000:44000] df_zeros_test = df_zeros.loc[40000:56000]", "test: 20k (4k 1, 16k 0) \"\"\" df_ones = df[df['label']", "def test_4(df, seed=0): \"\"\" training: balanced; test: balanced training: 58k", "= df_zeros.loc[29000:36250] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test", "df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:16000] df_zeros_training = df_zeros.loc[:64000]", "df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:40000] df_zeros_training = df_zeros.loc[:40000] df_ones_test =", "df_ones.loc[40000:44000] df_zeros_test = df_zeros.loc[40000:56000] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training =", "old dataset ################################## def test_4(df, seed=0): \"\"\" training: balanced; test:", "0, 10k 1) \"\"\" df_ones = df[df['label'] == 1] df_zeros", "= df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:40000]", "return sentences_train, sentences_test, labels_train, labels_test def test_3(df, seed=0): \"\"\"training: unbalanced;", "dataset ################################## def test_4(df, seed=0): \"\"\" training: balanced; test: balanced", "sentences_train, sentences_test, labels_train, labels_test ################################## ## Tests on old dataset", "0, 16k 1) \"\"\" df_ones = df[df['label'] == 1] df_zeros", "df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training =", "de preprocessamento de testes individuais \"\"\" import pandas as pd", "= df_ones.loc[29000:30595] df_zeros_test = df_zeros.loc[29000:41905] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training", "\"\"\" df_ones = df[df['label'] == 1] df_zeros = df[df['label'] ==", "math def test_1(df, seed=0): \"\"\"training: balanced; test: balanced training: 80k", "seed=0): \"\"\"training: unbalanced; test: unbalanced training: 80k (16k 1, 64k", "df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:29000] df_zeros_training = df_zeros.loc[:29000] df_ones_test =", "random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:29000] df_zeros_training = df_zeros.loc[:29000] df_ones_test = df_ones.loc[29000:36250]", "training: 80k (40k 0, 40k 1) test: 20k (10k 0,", "testes individuais \"\"\" import pandas as pd import numpy as", "= pd.concat([df_ones_test, df_zeros_test]) df_test = df_test.sample(frac=1).reset_index(drop=True) sentences_train = df_training['comment'].tolist() sentences_test", "df_zeros_test = df_zeros.loc[29000:41905] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True)", "test: 14.5k (7.25k 0, 7.25k 1) \"\"\" df_ones = df[df['label']", "= df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:16000]", "= df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:40000] df_zeros_training = df_zeros.loc[:40000] df_ones_test", "df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:6380] df_zeros_training", "seed=0): \"\"\"training: balanced; test: unbalanced training: 80k (40k 0, 40k", "16k 1) \"\"\" df_ones = df[df['label'] == 1] df_zeros =", "test_4(df, seed=0): \"\"\" training: balanced; test: balanced training: 58k (29k", "unbalanced training: 80k (16k 1, 64k 0) test: 20k (4k", "df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:16000] df_zeros_training", "random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:16000] df_zeros_training =", "0, 29k 1) test: 14.5k (7.25k 0, 7.25k 1) \"\"\"", "return sentences_train, sentences_test, labels_train, labels_test def test_6(df, seed=0): \"\"\"training: unbalanced;", "df_zeros.loc[29000:36250] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test =", "= df_zeros.loc[51620:64525] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test", "df_training['label'].tolist() labels_test = df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test ##################################", "import pandas as pd import numpy as np import math", "df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test = pd.concat([df_ones_test,", "sentences_test = df_test['comment'].tolist() labels_train = df_training['label'].tolist() labels_test = df_test['label'].tolist() return", "(10k 0, 10k 1) \"\"\" df_ones = df[df['label'] == 1]", "labels_test def test_5(df, seed=0): \"\"\"training: balanced; test: unbalanced training: 58k", "(4k 1, 16k 0) \"\"\" df_ones = df[df['label'] == 1]", "= df_zeros.loc[:29000] df_ones_test = df_ones.loc[29000:36250] df_zeros_test = df_zeros.loc[29000:36250] df_training =", "(7.25k 0, 7.25k 1) \"\"\" df_ones = df[df['label'] == 1]", "= df_ones.loc[16000:20000] df_zeros_test = df_zeros.loc[64000:80000] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training", "random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:40000] df_zeros_training = df_zeros.loc[:40000] df_ones_test = df_ones.loc[40000:50000]", "(29k 0, 29k 1) test: 14.5k (7.25k 0, 7.25k 1)", "(40k 0, 40k 1) test: 20k (4k 0, 16k 1)", "0, 29000 1) test: 14.5k (12905 0, 1595 1) \"\"\"", "df_zeros_test = df_zeros.loc[51620:64525] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True)", "random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:40000] df_zeros_training = df_zeros.loc[:40000] df_ones_test = df_ones.loc[40000:44000]", "unbalanced training: 58k (29000 0, 29000 1) test: 14.5k (12905", "sentences_train, sentences_test, labels_train, labels_test def test_5(df, seed=0): \"\"\"training: balanced; test:", "df_zeros_training = df_zeros.loc[:29000] df_ones_test = df_ones.loc[29000:36250] df_zeros_test = df_zeros.loc[29000:36250] df_training", "0) test: 14.5k (1595 1, 12905 0) \"\"\" df_ones =", "df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test def test_3(df, seed=0): \"\"\"training:", "training: 80k (40k 0, 40k 1) test: 20k (4k 0,", "20k (10k 0, 10k 1) \"\"\" df_ones = df[df['label'] ==", "seed=0): \"\"\"training: balanced; test: unbalanced training: 58k (29000 0, 29000", "= df_ones.loc[:40000] df_zeros_training = df_zeros.loc[:40000] df_ones_test = df_ones.loc[40000:44000] df_zeros_test =", "1595 1) \"\"\" df_ones = df[df['label'] == 1] df_zeros =", "df_ones_training = df_ones.loc[:29000] df_zeros_training = df_zeros.loc[:29000] df_ones_test = df_ones.loc[29000:36250] df_zeros_test", "df_training['comment'].tolist() sentences_test = df_test['comment'].tolist() labels_train = df_training['label'].tolist() labels_test = df_test['label'].tolist()", "test: unbalanced training: 58k (6380 1, 51620 0) test: 14.5k", "40k 1) test: 20k (4k 0, 16k 1) \"\"\" df_ones", "seed=0): \"\"\" training: balanced; test: balanced training: 58k (29k 0,", "= df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:16000] df_zeros_training = df_zeros.loc[:64000] df_ones_test", "random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:6380] df_zeros_training =", "= df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test def test_5(df, seed=0):", "= df_zeros.loc[40000:56000] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test", "labels_test = df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test def test_3(df,", "sentences_train, sentences_test, labels_train, labels_test def test_3(df, seed=0): \"\"\"training: unbalanced; test:", "= df[df['label'] == 1] df_zeros = df[df['label'] == 0] df_ones", "\"\"\"training: unbalanced; test: unbalanced training: 80k (16k 1, 64k 0)", "labels_train, labels_test def test_5(df, seed=0): \"\"\"training: balanced; test: unbalanced training:", "\"\"\"Métodos de preprocessamento de testes individuais \"\"\" import pandas as", "0, 1595 1) \"\"\" df_ones = df[df['label'] == 1] df_zeros", "test_6(df, seed=0): \"\"\"training: unbalanced; test: unbalanced training: 58k (6380 1,", "1) \"\"\" df_ones = df[df['label'] == 1] df_zeros = df[df['label']", "df_zeros_training = df_zeros.loc[:40000] df_ones_test = df_ones.loc[40000:44000] df_zeros_test = df_zeros.loc[40000:56000] df_training", "= df_zeros.loc[:40000] df_ones_test = df_ones.loc[40000:44000] df_zeros_test = df_zeros.loc[40000:56000] df_training =", "sentences_test, labels_train, labels_test def test_5(df, seed=0): \"\"\"training: balanced; test: unbalanced", "(12905 0, 1595 1) \"\"\" df_ones = df[df['label'] == 1]", "Tests on old dataset ################################## def test_4(df, seed=0): \"\"\" training:", "7.25k 1) \"\"\" df_ones = df[df['label'] == 1] df_zeros =", "pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test = pd.concat([df_ones_test, df_zeros_test]) df_test", "df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:40000] df_zeros_training = df_zeros.loc[:40000]", "df_zeros.loc[:64000] df_ones_test = df_ones.loc[16000:20000] df_zeros_test = df_zeros.loc[64000:80000] df_training = pd.concat([df_ones_training,", "pandas as pd import numpy as np import math def", "= df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test ################################## ## Tests", "\"\"\" import pandas as pd import numpy as np import", "(6380 1, 51620 0) test: 14.5k (1595 1, 12905 0)", "= df_ones.loc[40000:44000] df_zeros_test = df_zeros.loc[40000:56000] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training", "sentences_test, labels_train, labels_test def test_6(df, seed=0): \"\"\"training: unbalanced; test: unbalanced", "import numpy as np import math def test_1(df, seed=0): \"\"\"training:", "df_zeros.loc[:29000] df_ones_test = df_ones.loc[29000:30595] df_zeros_test = df_zeros.loc[29000:41905] df_training = pd.concat([df_ones_training,", "= df_ones.loc[40000:50000] df_zeros_test = df_zeros.loc[40000:50000] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training", "58k (29000 0, 29000 1) test: 14.5k (12905 0, 1595", "1, 12905 0) \"\"\" df_ones = df[df['label'] == 1] df_zeros", "0, 7.25k 1) \"\"\" df_ones = df[df['label'] == 1] df_zeros", "= df[df['label'] == 0] df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros =", "1, 16k 0) \"\"\" df_ones = df[df['label'] == 1] df_zeros", "test: 14.5k (1595 1, 12905 0) \"\"\" df_ones = df[df['label']", "= df_zeros.loc[64000:80000] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test", "= df_zeros.loc[40000:50000] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test", "df_ones.loc[40000:50000] df_zeros_test = df_zeros.loc[40000:50000] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training =", "df_zeros.loc[40000:56000] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test =", "29000 1) test: 14.5k (12905 0, 1595 1) \"\"\" df_ones", "test_3(df, seed=0): \"\"\"training: unbalanced; test: unbalanced training: 80k (16k 1,", "df_test['comment'].tolist() labels_train = df_training['label'].tolist() labels_test = df_test['label'].tolist() return sentences_train, sentences_test,", "df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:6380] df_zeros_training = df_zeros.loc[:51620]", "balanced; test: balanced training: 58k (29k 0, 29k 1) test:", "= df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test def test_6(df, seed=0):", "1) test: 20k (4k 0, 16k 1) \"\"\" df_ones =", "return sentences_train, sentences_test, labels_train, labels_test def test_5(df, seed=0): \"\"\"training: balanced;", "df_ones.loc[:40000] df_zeros_training = df_zeros.loc[:40000] df_ones_test = df_ones.loc[40000:50000] df_zeros_test = df_zeros.loc[40000:50000]", "test: balanced training: 58k (29k 0, 29k 1) test: 14.5k", "df_ones_test = df_ones.loc[29000:36250] df_zeros_test = df_zeros.loc[29000:36250] df_training = pd.concat([df_ones_training, df_zeros_training])", "training: 58k (6380 1, 51620 0) test: 14.5k (1595 1,", "= df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:29000] df_zeros_training = df_zeros.loc[:29000] df_ones_test", "random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:40000] df_zeros_training =", "1, 51620 0) test: 14.5k (1595 1, 12905 0) \"\"\"", "pd import numpy as np import math def test_1(df, seed=0):", "return sentences_train, sentences_test, labels_train, labels_test ################################## ## Tests on old", "on old dataset ################################## def test_4(df, seed=0): \"\"\" training: balanced;", "def test_5(df, seed=0): \"\"\"training: balanced; test: unbalanced training: 58k (29000", "df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:6380] df_zeros_training = df_zeros.loc[:51620] df_ones_test =", "= df_test.sample(frac=1).reset_index(drop=True) sentences_train = df_training['comment'].tolist() sentences_test = df_test['comment'].tolist() labels_train =", "unbalanced; test: unbalanced training: 80k (16k 1, 64k 0) test:", "df_ones_training = df_ones.loc[:29000] df_zeros_training = df_zeros.loc[:29000] df_ones_test = df_ones.loc[29000:30595] df_zeros_test", "test: 20k (4k 0, 16k 1) \"\"\" df_ones = df[df['label']", "df_zeros.loc[:29000] df_ones_test = df_ones.loc[29000:36250] df_zeros_test = df_zeros.loc[29000:36250] df_training = pd.concat([df_ones_training,", "16k 0) \"\"\" df_ones = df[df['label'] == 1] df_zeros =", "df_ones.loc[29000:30595] df_zeros_test = df_zeros.loc[29000:41905] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training =", "labels_test = df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test def test_2(df,", "12905 0) \"\"\" df_ones = df[df['label'] == 1] df_zeros =", "1) test: 20k (10k 0, 10k 1) \"\"\" df_ones =", "0, 40k 1) test: 20k (10k 0, 10k 1) \"\"\"", "(40k 0, 40k 1) test: 20k (10k 0, 10k 1)", "df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:29000] df_zeros_training", "labels_test def test_6(df, seed=0): \"\"\"training: unbalanced; test: unbalanced training: 58k", "df_test = pd.concat([df_ones_test, df_zeros_test]) df_test = df_test.sample(frac=1).reset_index(drop=True) sentences_train = df_training['comment'].tolist()", "individuais \"\"\" import pandas as pd import numpy as np", "df_test = df_test.sample(frac=1).reset_index(drop=True) sentences_train = df_training['comment'].tolist() sentences_test = df_test['comment'].tolist() labels_train", "df_zeros_training = df_zeros.loc[:64000] df_ones_test = df_ones.loc[16000:20000] df_zeros_test = df_zeros.loc[64000:80000] df_training", "\"\"\"training: balanced; test: unbalanced training: 80k (40k 0, 40k 1)", "\"\"\"training: balanced; test: unbalanced training: 58k (29000 0, 29000 1)", "df_zeros.loc[40000:50000] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test =", "seed=0): \"\"\"training: unbalanced; test: unbalanced training: 58k (6380 1, 51620", "test: unbalanced training: 80k (40k 0, 40k 1) test: 20k", "= df_ones.loc[:29000] df_zeros_training = df_zeros.loc[:29000] df_ones_test = df_ones.loc[29000:36250] df_zeros_test =", "sentences_train, sentences_test, labels_train, labels_test def test_6(df, seed=0): \"\"\"training: unbalanced; test:", "## Tests on old dataset ################################## def test_4(df, seed=0): \"\"\"", "training: 58k (29k 0, 29k 1) test: 14.5k (7.25k 0,", "unbalanced; test: unbalanced training: 58k (6380 1, 51620 0) test:", "14.5k (1595 1, 12905 0) \"\"\" df_ones = df[df['label'] ==", "= df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:6380] df_zeros_training = df_zeros.loc[:51620] df_ones_test", "sentences_train, sentences_test, labels_train, labels_test def test_2(df, seed=0): \"\"\"training: balanced; test:", "1) test: 14.5k (7.25k 0, 7.25k 1) \"\"\" df_ones =", "sentences_test, labels_train, labels_test def test_3(df, seed=0): \"\"\"training: unbalanced; test: unbalanced", "0, 40k 1) test: 20k (4k 0, 16k 1) \"\"\"", "\"\"\"training: unbalanced; test: unbalanced training: 58k (6380 1, 51620 0)", "df_test.sample(frac=1).reset_index(drop=True) sentences_train = df_training['comment'].tolist() sentences_test = df_test['comment'].tolist() labels_train = df_training['label'].tolist()", "df_zeros.loc[:40000] df_ones_test = df_ones.loc[40000:44000] df_zeros_test = df_zeros.loc[40000:56000] df_training = pd.concat([df_ones_training,", "df_ones_training = df_ones.loc[:16000] df_zeros_training = df_zeros.loc[:64000] df_ones_test = df_ones.loc[16000:20000] df_zeros_test", "= df_test['comment'].tolist() labels_train = df_training['label'].tolist() labels_test = df_test['label'].tolist() return sentences_train,", "= df_ones.loc[6380:7975] df_zeros_test = df_zeros.loc[51620:64525] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training", "(29000 0, 29000 1) test: 14.5k (12905 0, 1595 1)", "df_ones_test = df_ones.loc[6380:7975] df_zeros_test = df_zeros.loc[51620:64525] df_training = pd.concat([df_ones_training, df_zeros_training])", "= df_ones.loc[:6380] df_zeros_training = df_zeros.loc[:51620] df_ones_test = df_ones.loc[6380:7975] df_zeros_test =", "= df_zeros.loc[:64000] df_ones_test = df_ones.loc[16000:20000] df_zeros_test = df_zeros.loc[64000:80000] df_training =", "def test_3(df, seed=0): \"\"\"training: unbalanced; test: unbalanced training: 80k (16k", "labels_test ################################## ## Tests on old dataset ################################## def test_4(df,", "df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:16000] df_zeros_training = df_zeros.loc[:64000] df_ones_test =", "df_training.sample(frac=1).reset_index(drop=True) df_test = pd.concat([df_ones_test, df_zeros_test]) df_test = df_test.sample(frac=1).reset_index(drop=True) sentences_train =", "df_ones_test = df_ones.loc[40000:50000] df_zeros_test = df_zeros.loc[40000:50000] df_training = pd.concat([df_ones_training, df_zeros_training])", "test: unbalanced training: 58k (29000 0, 29000 1) test: 14.5k", "df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:29000] df_zeros_training = df_zeros.loc[:29000]", "labels_test = df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test def test_5(df,", "df_zeros_test]) df_test = df_test.sample(frac=1).reset_index(drop=True) sentences_train = df_training['comment'].tolist() sentences_test = df_test['comment'].tolist()", "== 1] df_zeros = df[df['label'] == 0] df_ones = df_ones.sample(frac=1,", "df[df['label'] == 0] df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1,", "balanced training: 80k (40k 0, 40k 1) test: 20k (10k", "= df_zeros.loc[:40000] df_ones_test = df_ones.loc[40000:50000] df_zeros_test = df_zeros.loc[40000:50000] df_training =", "= df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test def test_2(df, seed=0):", "= df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:6380]", "df_zeros_training = df_zeros.loc[:29000] df_ones_test = df_ones.loc[29000:30595] df_zeros_test = df_zeros.loc[29000:41905] df_training", "80k (16k 1, 64k 0) test: 20k (4k 1, 16k", "def test_1(df, seed=0): \"\"\"training: balanced; test: balanced training: 80k (40k", "\"\"\" training: balanced; test: balanced training: 58k (29k 0, 29k", "df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test def test_2(df, seed=0): \"\"\"training:", "de testes individuais \"\"\" import pandas as pd import numpy", "1) test: 14.5k (12905 0, 1595 1) \"\"\" df_ones =", "df_ones_training = df_ones.loc[:6380] df_zeros_training = df_zeros.loc[:51620] df_ones_test = df_ones.loc[6380:7975] df_zeros_test", "test: balanced training: 80k (40k 0, 40k 1) test: 20k", "df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test def test_6(df, seed=0): \"\"\"training:", "random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:6380] df_zeros_training = df_zeros.loc[:51620] df_ones_test = df_ones.loc[6380:7975]", "balanced; test: unbalanced training: 80k (40k 0, 40k 1) test:", "df_zeros.loc[29000:41905] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test =", "sentences_test, labels_train, labels_test ################################## ## Tests on old dataset ##################################", "58k (6380 1, 51620 0) test: 14.5k (1595 1, 12905", "import math def test_1(df, seed=0): \"\"\"training: balanced; test: balanced training:", "test_5(df, seed=0): \"\"\"training: balanced; test: unbalanced training: 58k (29000 0,", "as np import math def test_1(df, seed=0): \"\"\"training: balanced; test:" ]
[ "= _constants.iconductivity isurface_tension = _constants.isurface_tension iPrandtl = _constants.iPrandtl ispeed_sound =", "= _constants.iT_min iT_max = _constants.iT_max iP_max = _constants.iP_max iP_min =", "iT_freeze = _constants.iT_freeze iGWP20 = _constants.iGWP20 iGWP100 = _constants.iGWP100 iGWP500", "= _constants.DmassT_INPUTS DmolarT_INPUTS = _constants.DmolarT_INPUTS HmolarT_INPUTS = _constants.HmolarT_INPUTS HmassT_INPUTS =", "_constants.iODP iPhase = _constants.iPhase iundefined_parameter = _constants.iundefined_parameter INPUT_PAIR_INVALID = _constants.INPUT_PAIR_INVALID", "= _constants.iT_freeze iGWP20 = _constants.iGWP20 iGWP100 = _constants.iGWP100 iGWP500 =", "_constants.iPrandtl ispeed_sound = _constants.ispeed_sound iisothermal_compressibility = _constants.iisothermal_compressibility iisobaric_expansion_coefficient = _constants.iisobaric_expansion_coefficient", "= _constants.iGWP20 iGWP100 = _constants.iGWP100 iGWP500 = _constants.iGWP500 iFH =", "DONT_CHECK_PROPERTY_LIMITS = _constants.DONT_CHECK_PROPERTY_LIMITS HENRYS_LAW_TO_GENERATE_VLE_GUESSES = _constants.HENRYS_LAW_TO_GENERATE_VLE_GUESSES PHASE_ENVELOPE_STARTING_PRESSURE_PA = _constants.PHASE_ENVELOPE_STARTING_PRESSURE_PA R_U_CODATA", "_constants.iHmolar iSmolar = _constants.iSmolar iCpmolar = _constants.iCpmolar iCp0molar = _constants.iCp0molar", "OF THIS FILE! from __future__ import absolute_import from . import", "_constants.PQ_INPUTS QSmolar_INPUTS = _constants.QSmolar_INPUTS QSmass_INPUTS = _constants.QSmass_INPUTS HmolarQ_INPUTS = _constants.HmolarQ_INPUTS", "= _constants.HmassP_INPUTS HmolarP_INPUTS = _constants.HmolarP_INPUTS PSmass_INPUTS = _constants.PSmass_INPUTS PSmolar_INPUTS =", "= _constants.idalpha0_dtau_constdelta idalpha0_ddelta_consttau = _constants.idalpha0_ddelta_consttau iBvirial = _constants.iBvirial iCvirial =", "= _constants.SmassUmass_INPUTS SmolarUmolar_INPUTS = _constants.SmolarUmolar_INPUTS DmassHmass_INPUTS = _constants.DmassHmass_INPUTS DmolarHmolar_INPUTS =", "_constants.iphase_twophase iphase_unknown = _constants.iphase_unknown iphase_not_imposed = _constants.iphase_not_imposed NORMALIZE_GAS_CONSTANTS = _constants.NORMALIZE_GAS_CONSTANTS", "_constants.iP_min idipole_moment = _constants.idipole_moment iT = _constants.iT iP = _constants.iP", "= _constants.irhomass_reducing irhomass_critical = _constants.irhomass_critical iP_critical = _constants.iP_critical iP_reducing =", "= _constants.NORMALIZE_GAS_CONSTANTS CRITICAL_WITHIN_1UK = _constants.CRITICAL_WITHIN_1UK CRITICAL_SPLINES_ENABLED = _constants.CRITICAL_SPLINES_ENABLED SAVE_RAW_TABLES =", "_constants.iGWP100 iGWP500 = _constants.iGWP500 iFH = _constants.iFH iHH = _constants.iHH", "_constants.ialphar idalphar_dtau_constdelta = _constants.idalphar_dtau_constdelta idalphar_ddelta_consttau = _constants.idalphar_ddelta_consttau ialpha0 = _constants.ialpha0", "_constants.iDelta iDmolar = _constants.iDmolar iHmolar = _constants.iHmolar iSmolar = _constants.iSmolar", "script in wrappers/Python. # DO NOT MODIFY THE CONTENTS OF", "HmassT_INPUTS = _constants.HmassT_INPUTS SmolarT_INPUTS = _constants.SmolarT_INPUTS SmassT_INPUTS = _constants.SmassT_INPUTS TUmolar_INPUTS", "= _constants.iPH iODP = _constants.iODP iPhase = _constants.iPhase iundefined_parameter =", "_constants.iDmolar iHmolar = _constants.iHmolar iSmolar = _constants.iSmolar iCpmolar = _constants.iCpmolar", "iGmass = _constants.iGmass iHelmholtzmass = _constants.iHelmholtzmass iviscosity = _constants.iviscosity iconductivity", "= _constants.FLUID_TYPE_INCOMPRESSIBLE_SOLUTION FLUID_TYPE_UNDEFINED = _constants.FLUID_TYPE_UNDEFINED iphase_liquid = _constants.iphase_liquid iphase_supercritical =", "_constants.HmolarT_INPUTS HmassT_INPUTS = _constants.HmassT_INPUTS SmolarT_INPUTS = _constants.SmolarT_INPUTS SmassT_INPUTS = _constants.SmassT_INPUTS", "_constants.SAVE_RAW_TABLES ALTERNATIVE_TABLES_DIRECTORY = _constants.ALTERNATIVE_TABLES_DIRECTORY ALTERNATIVE_REFPROP_PATH = _constants.ALTERNATIVE_REFPROP_PATH ALTERNATIVE_REFPROP_HMX_BNC_PATH = _constants.ALTERNATIVE_REFPROP_HMX_BNC_PATH", "iphase_liquid = _constants.iphase_liquid iphase_supercritical = _constants.iphase_supercritical iphase_supercritical_gas = _constants.iphase_supercritical_gas iphase_supercritical_liquid", "_constants.iT_reducing iT_critical = _constants.iT_critical irhomass_reducing = _constants.irhomass_reducing irhomass_critical = _constants.irhomass_critical", "HENRYS_LAW_TO_GENERATE_VLE_GUESSES = _constants.HENRYS_LAW_TO_GENERATE_VLE_GUESSES PHASE_ENVELOPE_STARTING_PRESSURE_PA = _constants.PHASE_ENVELOPE_STARTING_PRESSURE_PA R_U_CODATA = _constants.R_U_CODATA VTPR_UNIFAC_PATH", "= _constants.iCvmass iUmass = _constants.iUmass iGmass = _constants.iGmass iHelmholtzmass =", "iP_reducing = _constants.iP_reducing iT_triple = _constants.iT_triple iP_triple = _constants.iP_triple iT_min", "iT_max = _constants.iT_max iP_max = _constants.iP_max iP_min = _constants.iP_min idipole_moment", "iBvirial = _constants.iBvirial iCvirial = _constants.iCvirial idBvirial_dT = _constants.idBvirial_dT idCvirial_dT", "= _constants.CRITICAL_SPLINES_ENABLED SAVE_RAW_TABLES = _constants.SAVE_RAW_TABLES ALTERNATIVE_TABLES_DIRECTORY = _constants.ALTERNATIVE_TABLES_DIRECTORY ALTERNATIVE_REFPROP_PATH =", "_constants.HmassP_INPUTS HmolarP_INPUTS = _constants.HmolarP_INPUTS PSmass_INPUTS = _constants.PSmass_INPUTS PSmolar_INPUTS = _constants.PSmolar_INPUTS", "_constants.DmolarHmolar_INPUTS DmassSmass_INPUTS = _constants.DmassSmass_INPUTS DmolarSmolar_INPUTS = _constants.DmolarSmolar_INPUTS DmassUmass_INPUTS = _constants.DmassUmass_INPUTS", "= _constants.DmassQ_INPUTS PT_INPUTS = _constants.PT_INPUTS DmassT_INPUTS = _constants.DmassT_INPUTS DmolarT_INPUTS =", "_constants.INVALID_PARAMETER igas_constant = _constants.igas_constant imolar_mass = _constants.imolar_mass iacentric_factor = _constants.iacentric_factor", "PUmolar_INPUTS = _constants.PUmolar_INPUTS HmassSmass_INPUTS = _constants.HmassSmass_INPUTS HmolarSmolar_INPUTS = _constants.HmolarSmolar_INPUTS SmassUmass_INPUTS", "= _constants.HENRYS_LAW_TO_GENERATE_VLE_GUESSES PHASE_ENVELOPE_STARTING_PRESSURE_PA = _constants.PHASE_ENVELOPE_STARTING_PRESSURE_PA R_U_CODATA = _constants.R_U_CODATA VTPR_UNIFAC_PATH =", "= _constants.ifraction_max iT_freeze = _constants.iT_freeze iGWP20 = _constants.iGWP20 iGWP100 =", "_constants.DmolarT_INPUTS HmolarT_INPUTS = _constants.HmolarT_INPUTS HmassT_INPUTS = _constants.HmassT_INPUTS SmolarT_INPUTS = _constants.SmolarT_INPUTS", "= _constants.REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS = _constants.REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS REFPROP_USE_GERG = _constants.REFPROP_USE_GERG REFPROP_USE_PENGROBINSON =", "= _constants.iisothermal_compressibility iisobaric_expansion_coefficient = _constants.iisobaric_expansion_coefficient ifundamental_derivative_of_gas_dynamics = _constants.ifundamental_derivative_of_gas_dynamics ialphar =", "_constants.iDmass iHmass = _constants.iHmass iSmass = _constants.iSmass iCpmass = _constants.iCpmass", "= _constants.idipole_moment iT = _constants.iT iP = _constants.iP iQ =", "_constants.SPINODAL_MINIMUM_DELTA OVERWRITE_FLUIDS = _constants.OVERWRITE_FLUIDS OVERWRITE_DEPARTURE_FUNCTION = _constants.OVERWRITE_DEPARTURE_FUNCTION OVERWRITE_BINARY_INTERACTION = _constants.OVERWRITE_BINARY_INTERACTION", "SmassUmass_INPUTS = _constants.SmassUmass_INPUTS SmolarUmolar_INPUTS = _constants.SmolarUmolar_INPUTS DmassHmass_INPUTS = _constants.DmassHmass_INPUTS DmolarHmolar_INPUTS", "_constants.TUmass_INPUTS DmassP_INPUTS = _constants.DmassP_INPUTS DmolarP_INPUTS = _constants.DmolarP_INPUTS HmassP_INPUTS = _constants.HmassP_INPUTS", "iisobaric_expansion_coefficient = _constants.iisobaric_expansion_coefficient ifundamental_derivative_of_gas_dynamics = _constants.ifundamental_derivative_of_gas_dynamics ialphar = _constants.ialphar idalphar_dtau_constdelta", "= _constants.SmolarT_INPUTS SmassT_INPUTS = _constants.SmassT_INPUTS TUmolar_INPUTS = _constants.TUmolar_INPUTS TUmass_INPUTS =", "NOT MODIFY THE CONTENTS OF THIS FILE! from __future__ import", "FLUID_TYPE_UNDEFINED = _constants.FLUID_TYPE_UNDEFINED iphase_liquid = _constants.iphase_liquid iphase_supercritical = _constants.iphase_supercritical iphase_supercritical_gas", "the generate_constants_module.py script in wrappers/Python. # DO NOT MODIFY THE", "= _constants.iphase_gas iphase_twophase = _constants.iphase_twophase iphase_unknown = _constants.iphase_unknown iphase_not_imposed =", "= _constants.CRITICAL_WITHIN_1UK CRITICAL_SPLINES_ENABLED = _constants.CRITICAL_SPLINES_ENABLED SAVE_RAW_TABLES = _constants.SAVE_RAW_TABLES ALTERNATIVE_TABLES_DIRECTORY =", "SAVE_RAW_TABLES = _constants.SAVE_RAW_TABLES ALTERNATIVE_TABLES_DIRECTORY = _constants.ALTERNATIVE_TABLES_DIRECTORY ALTERNATIVE_REFPROP_PATH = _constants.ALTERNATIVE_REFPROP_PATH ALTERNATIVE_REFPROP_HMX_BNC_PATH", "irhomolar_reducing = _constants.irhomolar_reducing irhomolar_critical = _constants.irhomolar_critical iT_reducing = _constants.iT_reducing iT_critical", "= _constants.iQ iTau = _constants.iTau iDelta = _constants.iDelta iDmolar =", "iSmolar = _constants.iSmolar iCpmolar = _constants.iCpmolar iCp0molar = _constants.iCp0molar iCvmolar", "_constants.iCp0mass iCvmass = _constants.iCvmass iUmass = _constants.iUmass iGmass = _constants.iGmass", "= _constants.ifraction_min ifraction_max = _constants.ifraction_max iT_freeze = _constants.iT_freeze iGWP20 =", "DmolarP_INPUTS = _constants.DmolarP_INPUTS HmassP_INPUTS = _constants.HmassP_INPUTS HmolarP_INPUTS = _constants.HmolarP_INPUTS PSmass_INPUTS", "_constants.idipole_moment iT = _constants.iT iP = _constants.iP iQ = _constants.iQ", "= _constants.iDelta iDmolar = _constants.iDmolar iHmolar = _constants.iHmolar iSmolar =", "_constants.idalphar_ddelta_consttau ialpha0 = _constants.ialpha0 idalpha0_dtau_constdelta = _constants.idalpha0_dtau_constdelta idalpha0_ddelta_consttau = _constants.idalpha0_ddelta_consttau", "ALTERNATIVE_REFPROP_PATH = _constants.ALTERNATIVE_REFPROP_PATH ALTERNATIVE_REFPROP_HMX_BNC_PATH = _constants.ALTERNATIVE_REFPROP_HMX_BNC_PATH ALTERNATIVE_REFPROP_LIBRARY_PATH = _constants.ALTERNATIVE_REFPROP_LIBRARY_PATH REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS", "# This file is automatically generated by the generate_constants_module.py script", "_constants.iphase_supercritical iphase_supercritical_gas = _constants.iphase_supercritical_gas iphase_supercritical_liquid = _constants.iphase_supercritical_liquid iphase_critical_point = _constants.iphase_critical_point", "_constants.irhomass_reducing irhomass_critical = _constants.irhomass_critical iP_critical = _constants.iP_critical iP_reducing = _constants.iP_reducing", "= _constants.iODP iPhase = _constants.iPhase iundefined_parameter = _constants.iundefined_parameter INPUT_PAIR_INVALID =", "_constants.PT_INPUTS DmassT_INPUTS = _constants.DmassT_INPUTS DmolarT_INPUTS = _constants.DmolarT_INPUTS HmolarT_INPUTS = _constants.HmolarT_INPUTS", "= _constants.iBvirial iCvirial = _constants.iCvirial idBvirial_dT = _constants.idBvirial_dT idCvirial_dT =", "PSmass_INPUTS = _constants.PSmass_INPUTS PSmolar_INPUTS = _constants.PSmolar_INPUTS PUmass_INPUTS = _constants.PUmass_INPUTS PUmolar_INPUTS", "= _constants.REFPROP_USE_PENGROBINSON MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB = _constants.MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB DONT_CHECK_PROPERTY_LIMITS = _constants.DONT_CHECK_PROPERTY_LIMITS HENRYS_LAW_TO_GENERATE_VLE_GUESSES =", "= _constants.iSmolar_residual iDmass = _constants.iDmass iHmass = _constants.iHmass iSmass =", "PHASE_ENVELOPE_STARTING_PRESSURE_PA = _constants.PHASE_ENVELOPE_STARTING_PRESSURE_PA R_U_CODATA = _constants.R_U_CODATA VTPR_UNIFAC_PATH = _constants.VTPR_UNIFAC_PATH SPINODAL_MINIMUM_DELTA", "_constants.iGmolar iHelmholtzmolar = _constants.iHelmholtzmolar iSmolar_residual = _constants.iSmolar_residual iDmass = _constants.iDmass", "= _constants.iP_reducing iT_triple = _constants.iT_triple iP_triple = _constants.iP_triple iT_min =", "automatically generated by the generate_constants_module.py script in wrappers/Python. # DO", "INVALID_PARAMETER = _constants.INVALID_PARAMETER igas_constant = _constants.igas_constant imolar_mass = _constants.imolar_mass iacentric_factor", "ALTERNATIVE_TABLES_DIRECTORY = _constants.ALTERNATIVE_TABLES_DIRECTORY ALTERNATIVE_REFPROP_PATH = _constants.ALTERNATIVE_REFPROP_PATH ALTERNATIVE_REFPROP_HMX_BNC_PATH = _constants.ALTERNATIVE_REFPROP_HMX_BNC_PATH ALTERNATIVE_REFPROP_LIBRARY_PATH", "INPUT_PAIR_INVALID = _constants.INPUT_PAIR_INVALID QT_INPUTS = _constants.QT_INPUTS PQ_INPUTS = _constants.PQ_INPUTS QSmolar_INPUTS", "= _constants.iphase_not_imposed NORMALIZE_GAS_CONSTANTS = _constants.NORMALIZE_GAS_CONSTANTS CRITICAL_WITHIN_1UK = _constants.CRITICAL_WITHIN_1UK CRITICAL_SPLINES_ENABLED =", "_constants.iP_critical iP_reducing = _constants.iP_reducing iT_triple = _constants.iT_triple iP_triple = _constants.iP_triple", "_constants.iCvirial idBvirial_dT = _constants.idBvirial_dT idCvirial_dT = _constants.idCvirial_dT iZ = _constants.iZ", "PSmolar_INPUTS = _constants.PSmolar_INPUTS PUmass_INPUTS = _constants.PUmass_INPUTS PUmolar_INPUTS = _constants.PUmolar_INPUTS HmassSmass_INPUTS", "= _constants.REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS REFPROP_USE_GERG = _constants.REFPROP_USE_GERG REFPROP_USE_PENGROBINSON = _constants.REFPROP_USE_PENGROBINSON MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB =", "_constants.CRITICAL_WITHIN_1UK CRITICAL_SPLINES_ENABLED = _constants.CRITICAL_SPLINES_ENABLED SAVE_RAW_TABLES = _constants.SAVE_RAW_TABLES ALTERNATIVE_TABLES_DIRECTORY = _constants.ALTERNATIVE_TABLES_DIRECTORY", "_constants.iCpmolar iCp0molar = _constants.iCp0molar iCvmolar = _constants.iCvmolar iUmolar = _constants.iUmolar", "_constants.idalpha0_ddelta_consttau iBvirial = _constants.iBvirial iCvirial = _constants.iCvirial idBvirial_dT = _constants.idBvirial_dT", "_constants.iBvirial iCvirial = _constants.iCvirial idBvirial_dT = _constants.idBvirial_dT idCvirial_dT = _constants.idCvirial_dT", "ifraction_min = _constants.ifraction_min ifraction_max = _constants.ifraction_max iT_freeze = _constants.iT_freeze iGWP20", "= _constants.irhomolar_critical iT_reducing = _constants.iT_reducing iT_critical = _constants.iT_critical irhomass_reducing =", "iP = _constants.iP iQ = _constants.iQ iTau = _constants.iTau iDelta", "= _constants.iFH iHH = _constants.iHH iPH = _constants.iPH iODP =", "_constants.iphase_supercritical_gas iphase_supercritical_liquid = _constants.iphase_supercritical_liquid iphase_critical_point = _constants.iphase_critical_point iphase_gas = _constants.iphase_gas", "= _constants.DmolarT_INPUTS HmolarT_INPUTS = _constants.HmolarT_INPUTS HmassT_INPUTS = _constants.HmassT_INPUTS SmolarT_INPUTS =", "= _constants.INVALID_PARAMETER igas_constant = _constants.igas_constant imolar_mass = _constants.imolar_mass iacentric_factor =", "= _constants.DmolarSmolar_INPUTS DmassUmass_INPUTS = _constants.DmassUmass_INPUTS DmolarUmolar_INPUTS = _constants.DmolarUmolar_INPUTS FLUID_TYPE_PURE =", "_constants.iPhase iundefined_parameter = _constants.iundefined_parameter INPUT_PAIR_INVALID = _constants.INPUT_PAIR_INVALID QT_INPUTS = _constants.QT_INPUTS", "__future__ import absolute_import from . import _constants INVALID_PARAMETER = _constants.INVALID_PARAMETER", "igas_constant = _constants.igas_constant imolar_mass = _constants.imolar_mass iacentric_factor = _constants.iacentric_factor irhomolar_reducing", "= _constants.INPUT_PAIR_INVALID QT_INPUTS = _constants.QT_INPUTS PQ_INPUTS = _constants.PQ_INPUTS QSmolar_INPUTS =", "_constants.DmolarSmolar_INPUTS DmassUmass_INPUTS = _constants.DmassUmass_INPUTS DmolarUmolar_INPUTS = _constants.DmolarUmolar_INPUTS FLUID_TYPE_PURE = _constants.FLUID_TYPE_PURE", "= _constants.irhomass_critical iP_critical = _constants.iP_critical iP_reducing = _constants.iP_reducing iT_triple =", "_constants.MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB DONT_CHECK_PROPERTY_LIMITS = _constants.DONT_CHECK_PROPERTY_LIMITS HENRYS_LAW_TO_GENERATE_VLE_GUESSES = _constants.HENRYS_LAW_TO_GENERATE_VLE_GUESSES PHASE_ENVELOPE_STARTING_PRESSURE_PA = _constants.PHASE_ENVELOPE_STARTING_PRESSURE_PA", "REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS = _constants.REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS REFPROP_USE_GERG = _constants.REFPROP_USE_GERG REFPROP_USE_PENGROBINSON = _constants.REFPROP_USE_PENGROBINSON MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB", "irhomass_reducing = _constants.irhomass_reducing irhomass_critical = _constants.irhomass_critical iP_critical = _constants.iP_critical iP_reducing", "iphase_supercritical = _constants.iphase_supercritical iphase_supercritical_gas = _constants.iphase_supercritical_gas iphase_supercritical_liquid = _constants.iphase_supercritical_liquid iphase_critical_point", "_constants.iP_max iP_min = _constants.iP_min idipole_moment = _constants.idipole_moment iT = _constants.iT", "iphase_unknown = _constants.iphase_unknown iphase_not_imposed = _constants.iphase_not_imposed NORMALIZE_GAS_CONSTANTS = _constants.NORMALIZE_GAS_CONSTANTS CRITICAL_WITHIN_1UK", "idalphar_ddelta_consttau = _constants.idalphar_ddelta_consttau ialpha0 = _constants.ialpha0 idalpha0_dtau_constdelta = _constants.idalpha0_dtau_constdelta idalpha0_ddelta_consttau", "_constants.iT_min iT_max = _constants.iT_max iP_max = _constants.iP_max iP_min = _constants.iP_min", "iT_min = _constants.iT_min iT_max = _constants.iT_max iP_max = _constants.iP_max iP_min", "= _constants.PSmass_INPUTS PSmolar_INPUTS = _constants.PSmolar_INPUTS PUmass_INPUTS = _constants.PUmass_INPUTS PUmolar_INPUTS =", "_constants.HENRYS_LAW_TO_GENERATE_VLE_GUESSES PHASE_ENVELOPE_STARTING_PRESSURE_PA = _constants.PHASE_ENVELOPE_STARTING_PRESSURE_PA R_U_CODATA = _constants.R_U_CODATA VTPR_UNIFAC_PATH = _constants.VTPR_UNIFAC_PATH", "iHH = _constants.iHH iPH = _constants.iPH iODP = _constants.iODP iPhase", "= _constants.OVERWRITE_BINARY_INTERACTION USE_GUESSES_IN_PROPSSI = _constants.USE_GUESSES_IN_PROPSSI ASSUME_CRITICAL_POINT_STABLE = _constants.ASSUME_CRITICAL_POINT_STABLE VTPR_ALWAYS_RELOAD_LIBRARY =", "= _constants.iacentric_factor irhomolar_reducing = _constants.irhomolar_reducing irhomolar_critical = _constants.irhomolar_critical iT_reducing =", "= _constants.iCpmolar iCp0molar = _constants.iCp0molar iCvmolar = _constants.iCvmolar iUmolar =", "_constants.iSmolar_residual iDmass = _constants.iDmass iHmass = _constants.iHmass iSmass = _constants.iSmass", "_constants.ispeed_sound iisothermal_compressibility = _constants.iisothermal_compressibility iisobaric_expansion_coefficient = _constants.iisobaric_expansion_coefficient ifundamental_derivative_of_gas_dynamics = _constants.ifundamental_derivative_of_gas_dynamics", "= _constants.HmassQ_INPUTS DmolarQ_INPUTS = _constants.DmolarQ_INPUTS DmassQ_INPUTS = _constants.DmassQ_INPUTS PT_INPUTS =", "_constants.iQ iTau = _constants.iTau iDelta = _constants.iDelta iDmolar = _constants.iDmolar", "DmassQ_INPUTS = _constants.DmassQ_INPUTS PT_INPUTS = _constants.PT_INPUTS DmassT_INPUTS = _constants.DmassT_INPUTS DmolarT_INPUTS", "= _constants.DmassSmass_INPUTS DmolarSmolar_INPUTS = _constants.DmolarSmolar_INPUTS DmassUmass_INPUTS = _constants.DmassUmass_INPUTS DmolarUmolar_INPUTS =", "HmassSmass_INPUTS = _constants.HmassSmass_INPUTS HmolarSmolar_INPUTS = _constants.HmolarSmolar_INPUTS SmassUmass_INPUTS = _constants.SmassUmass_INPUTS SmolarUmolar_INPUTS", "iundefined_parameter = _constants.iundefined_parameter INPUT_PAIR_INVALID = _constants.INPUT_PAIR_INVALID QT_INPUTS = _constants.QT_INPUTS PQ_INPUTS", "imolar_mass = _constants.imolar_mass iacentric_factor = _constants.iacentric_factor irhomolar_reducing = _constants.irhomolar_reducing irhomolar_critical", "_constants.INPUT_PAIR_INVALID QT_INPUTS = _constants.QT_INPUTS PQ_INPUTS = _constants.PQ_INPUTS QSmolar_INPUTS = _constants.QSmolar_INPUTS", "= _constants.DmassUmass_INPUTS DmolarUmolar_INPUTS = _constants.DmolarUmolar_INPUTS FLUID_TYPE_PURE = _constants.FLUID_TYPE_PURE FLUID_TYPE_PSEUDOPURE =", "iphase_not_imposed = _constants.iphase_not_imposed NORMALIZE_GAS_CONSTANTS = _constants.NORMALIZE_GAS_CONSTANTS CRITICAL_WITHIN_1UK = _constants.CRITICAL_WITHIN_1UK CRITICAL_SPLINES_ENABLED", "QSmass_INPUTS = _constants.QSmass_INPUTS HmolarQ_INPUTS = _constants.HmolarQ_INPUTS HmassQ_INPUTS = _constants.HmassQ_INPUTS DmolarQ_INPUTS", "= _constants.PUmolar_INPUTS HmassSmass_INPUTS = _constants.HmassSmass_INPUTS HmolarSmolar_INPUTS = _constants.HmolarSmolar_INPUTS SmassUmass_INPUTS =", "HmolarT_INPUTS = _constants.HmolarT_INPUTS HmassT_INPUTS = _constants.HmassT_INPUTS SmolarT_INPUTS = _constants.SmolarT_INPUTS SmassT_INPUTS", "_constants.USE_GUESSES_IN_PROPSSI ASSUME_CRITICAL_POINT_STABLE = _constants.ASSUME_CRITICAL_POINT_STABLE VTPR_ALWAYS_RELOAD_LIBRARY = _constants.VTPR_ALWAYS_RELOAD_LIBRARY FLOAT_PUNCTUATION = _constants.FLOAT_PUNCTUATION", "_constants.iphase_not_imposed NORMALIZE_GAS_CONSTANTS = _constants.NORMALIZE_GAS_CONSTANTS CRITICAL_WITHIN_1UK = _constants.CRITICAL_WITHIN_1UK CRITICAL_SPLINES_ENABLED = _constants.CRITICAL_SPLINES_ENABLED", "FLUID_TYPE_INCOMPRESSIBLE_LIQUID = _constants.FLUID_TYPE_INCOMPRESSIBLE_LIQUID FLUID_TYPE_INCOMPRESSIBLE_SOLUTION = _constants.FLUID_TYPE_INCOMPRESSIBLE_SOLUTION FLUID_TYPE_UNDEFINED = _constants.FLUID_TYPE_UNDEFINED iphase_liquid", "_constants.FLUID_TYPE_PSEUDOPURE FLUID_TYPE_REFPROP = _constants.FLUID_TYPE_REFPROP FLUID_TYPE_INCOMPRESSIBLE_LIQUID = _constants.FLUID_TYPE_INCOMPRESSIBLE_LIQUID FLUID_TYPE_INCOMPRESSIBLE_SOLUTION = _constants.FLUID_TYPE_INCOMPRESSIBLE_SOLUTION", "= _constants.SAVE_RAW_TABLES ALTERNATIVE_TABLES_DIRECTORY = _constants.ALTERNATIVE_TABLES_DIRECTORY ALTERNATIVE_REFPROP_PATH = _constants.ALTERNATIVE_REFPROP_PATH ALTERNATIVE_REFPROP_HMX_BNC_PATH =", "= _constants.REFPROP_USE_GERG REFPROP_USE_PENGROBINSON = _constants.REFPROP_USE_PENGROBINSON MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB = _constants.MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB DONT_CHECK_PROPERTY_LIMITS =", "in wrappers/Python. # DO NOT MODIFY THE CONTENTS OF THIS", "= _constants.iviscosity iconductivity = _constants.iconductivity isurface_tension = _constants.isurface_tension iPrandtl =", "_constants.HmassQ_INPUTS DmolarQ_INPUTS = _constants.DmolarQ_INPUTS DmassQ_INPUTS = _constants.DmassQ_INPUTS PT_INPUTS = _constants.PT_INPUTS", "_constants.SmolarT_INPUTS SmassT_INPUTS = _constants.SmassT_INPUTS TUmolar_INPUTS = _constants.TUmolar_INPUTS TUmass_INPUTS = _constants.TUmass_INPUTS", "THE CONTENTS OF THIS FILE! from __future__ import absolute_import from", "_constants.iTau iDelta = _constants.iDelta iDmolar = _constants.iDmolar iHmolar = _constants.iHmolar", "iphase_supercritical_gas = _constants.iphase_supercritical_gas iphase_supercritical_liquid = _constants.iphase_supercritical_liquid iphase_critical_point = _constants.iphase_critical_point iphase_gas", "_constants.iCvmolar iUmolar = _constants.iUmolar iGmolar = _constants.iGmolar iHelmholtzmolar = _constants.iHelmholtzmolar", "REFPROP_USE_PENGROBINSON = _constants.REFPROP_USE_PENGROBINSON MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB = _constants.MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB DONT_CHECK_PROPERTY_LIMITS = _constants.DONT_CHECK_PROPERTY_LIMITS HENRYS_LAW_TO_GENERATE_VLE_GUESSES", "_constants.VTPR_UNIFAC_PATH SPINODAL_MINIMUM_DELTA = _constants.SPINODAL_MINIMUM_DELTA OVERWRITE_FLUIDS = _constants.OVERWRITE_FLUIDS OVERWRITE_DEPARTURE_FUNCTION = _constants.OVERWRITE_DEPARTURE_FUNCTION", "iDelta = _constants.iDelta iDmolar = _constants.iDmolar iHmolar = _constants.iHmolar iSmolar", "_constants.igas_constant imolar_mass = _constants.imolar_mass iacentric_factor = _constants.iacentric_factor irhomolar_reducing = _constants.irhomolar_reducing", "CRITICAL_SPLINES_ENABLED = _constants.CRITICAL_SPLINES_ENABLED SAVE_RAW_TABLES = _constants.SAVE_RAW_TABLES ALTERNATIVE_TABLES_DIRECTORY = _constants.ALTERNATIVE_TABLES_DIRECTORY ALTERNATIVE_REFPROP_PATH", "= _constants.ialphar idalphar_dtau_constdelta = _constants.idalphar_dtau_constdelta idalphar_ddelta_consttau = _constants.idalphar_ddelta_consttau ialpha0 =", "_constants.iHH iPH = _constants.iPH iODP = _constants.iODP iPhase = _constants.iPhase", "= _constants.iHelmholtzmolar iSmolar_residual = _constants.iSmolar_residual iDmass = _constants.iDmass iHmass =", "generate_constants_module.py script in wrappers/Python. # DO NOT MODIFY THE CONTENTS", "_constants.ALTERNATIVE_REFPROP_HMX_BNC_PATH ALTERNATIVE_REFPROP_LIBRARY_PATH = _constants.ALTERNATIVE_REFPROP_LIBRARY_PATH REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS = _constants.REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS = _constants.REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS", "_constants.iP_triple iT_min = _constants.iT_min iT_max = _constants.iT_max iP_max = _constants.iP_max", "_constants.NORMALIZE_GAS_CONSTANTS CRITICAL_WITHIN_1UK = _constants.CRITICAL_WITHIN_1UK CRITICAL_SPLINES_ENABLED = _constants.CRITICAL_SPLINES_ENABLED SAVE_RAW_TABLES = _constants.SAVE_RAW_TABLES", "is automatically generated by the generate_constants_module.py script in wrappers/Python. #", "THIS FILE! from __future__ import absolute_import from . import _constants", "_constants.FLUID_TYPE_INCOMPRESSIBLE_LIQUID FLUID_TYPE_INCOMPRESSIBLE_SOLUTION = _constants.FLUID_TYPE_INCOMPRESSIBLE_SOLUTION FLUID_TYPE_UNDEFINED = _constants.FLUID_TYPE_UNDEFINED iphase_liquid = _constants.iphase_liquid", "= _constants.DmassP_INPUTS DmolarP_INPUTS = _constants.DmolarP_INPUTS HmassP_INPUTS = _constants.HmassP_INPUTS HmolarP_INPUTS =", "iphase_critical_point = _constants.iphase_critical_point iphase_gas = _constants.iphase_gas iphase_twophase = _constants.iphase_twophase iphase_unknown", "_constants.iP_reducing iT_triple = _constants.iT_triple iP_triple = _constants.iP_triple iT_min = _constants.iT_min", "_constants.ifundamental_derivative_of_gas_dynamics ialphar = _constants.ialphar idalphar_dtau_constdelta = _constants.idalphar_dtau_constdelta idalphar_ddelta_consttau = _constants.idalphar_ddelta_consttau", "idBvirial_dT = _constants.idBvirial_dT idCvirial_dT = _constants.idCvirial_dT iZ = _constants.iZ iPIP", "_constants.HmolarP_INPUTS PSmass_INPUTS = _constants.PSmass_INPUTS PSmolar_INPUTS = _constants.PSmolar_INPUTS PUmass_INPUTS = _constants.PUmass_INPUTS", "= _constants.iphase_supercritical iphase_supercritical_gas = _constants.iphase_supercritical_gas iphase_supercritical_liquid = _constants.iphase_supercritical_liquid iphase_critical_point =", "= _constants.QSmolar_INPUTS QSmass_INPUTS = _constants.QSmass_INPUTS HmolarQ_INPUTS = _constants.HmolarQ_INPUTS HmassQ_INPUTS =", "CRITICAL_WITHIN_1UK = _constants.CRITICAL_WITHIN_1UK CRITICAL_SPLINES_ENABLED = _constants.CRITICAL_SPLINES_ENABLED SAVE_RAW_TABLES = _constants.SAVE_RAW_TABLES ALTERNATIVE_TABLES_DIRECTORY", "= _constants.iHelmholtzmass iviscosity = _constants.iviscosity iconductivity = _constants.iconductivity isurface_tension =", "= _constants.DmolarUmolar_INPUTS FLUID_TYPE_PURE = _constants.FLUID_TYPE_PURE FLUID_TYPE_PSEUDOPURE = _constants.FLUID_TYPE_PSEUDOPURE FLUID_TYPE_REFPROP =", "_constants.DmassUmass_INPUTS DmolarUmolar_INPUTS = _constants.DmolarUmolar_INPUTS FLUID_TYPE_PURE = _constants.FLUID_TYPE_PURE FLUID_TYPE_PSEUDOPURE = _constants.FLUID_TYPE_PSEUDOPURE", "from . import _constants INVALID_PARAMETER = _constants.INVALID_PARAMETER igas_constant = _constants.igas_constant", "_constants.iT_max iP_max = _constants.iP_max iP_min = _constants.iP_min idipole_moment = _constants.idipole_moment", "ALTERNATIVE_REFPROP_HMX_BNC_PATH = _constants.ALTERNATIVE_REFPROP_HMX_BNC_PATH ALTERNATIVE_REFPROP_LIBRARY_PATH = _constants.ALTERNATIVE_REFPROP_LIBRARY_PATH REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS = _constants.REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS", "= _constants.iHmolar iSmolar = _constants.iSmolar iCpmolar = _constants.iCpmolar iCp0molar =", "= _constants.DONT_CHECK_PROPERTY_LIMITS HENRYS_LAW_TO_GENERATE_VLE_GUESSES = _constants.HENRYS_LAW_TO_GENERATE_VLE_GUESSES PHASE_ENVELOPE_STARTING_PRESSURE_PA = _constants.PHASE_ENVELOPE_STARTING_PRESSURE_PA R_U_CODATA =", "= _constants.FLUID_TYPE_PSEUDOPURE FLUID_TYPE_REFPROP = _constants.FLUID_TYPE_REFPROP FLUID_TYPE_INCOMPRESSIBLE_LIQUID = _constants.FLUID_TYPE_INCOMPRESSIBLE_LIQUID FLUID_TYPE_INCOMPRESSIBLE_SOLUTION =", "iT_reducing = _constants.iT_reducing iT_critical = _constants.iT_critical irhomass_reducing = _constants.irhomass_reducing irhomass_critical", "_constants.ALTERNATIVE_REFPROP_PATH ALTERNATIVE_REFPROP_HMX_BNC_PATH = _constants.ALTERNATIVE_REFPROP_HMX_BNC_PATH ALTERNATIVE_REFPROP_LIBRARY_PATH = _constants.ALTERNATIVE_REFPROP_LIBRARY_PATH REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS = _constants.REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS", "= _constants.iZ iPIP = _constants.iPIP ifraction_min = _constants.ifraction_min ifraction_max =", "DmassUmass_INPUTS = _constants.DmassUmass_INPUTS DmolarUmolar_INPUTS = _constants.DmolarUmolar_INPUTS FLUID_TYPE_PURE = _constants.FLUID_TYPE_PURE FLUID_TYPE_PSEUDOPURE", "= _constants.iHH iPH = _constants.iPH iODP = _constants.iODP iPhase =", "= _constants.SPINODAL_MINIMUM_DELTA OVERWRITE_FLUIDS = _constants.OVERWRITE_FLUIDS OVERWRITE_DEPARTURE_FUNCTION = _constants.OVERWRITE_DEPARTURE_FUNCTION OVERWRITE_BINARY_INTERACTION =", "_constants.ifraction_max iT_freeze = _constants.iT_freeze iGWP20 = _constants.iGWP20 iGWP100 = _constants.iGWP100", "_constants.iSmolar iCpmolar = _constants.iCpmolar iCp0molar = _constants.iCp0molar iCvmolar = _constants.iCvmolar", "HmassP_INPUTS = _constants.HmassP_INPUTS HmolarP_INPUTS = _constants.HmolarP_INPUTS PSmass_INPUTS = _constants.PSmass_INPUTS PSmolar_INPUTS", "= _constants.igas_constant imolar_mass = _constants.imolar_mass iacentric_factor = _constants.iacentric_factor irhomolar_reducing =", "_constants.REFPROP_USE_PENGROBINSON MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB = _constants.MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB DONT_CHECK_PROPERTY_LIMITS = _constants.DONT_CHECK_PROPERTY_LIMITS HENRYS_LAW_TO_GENERATE_VLE_GUESSES = _constants.HENRYS_LAW_TO_GENERATE_VLE_GUESSES", "iGmolar = _constants.iGmolar iHelmholtzmolar = _constants.iHelmholtzmolar iSmolar_residual = _constants.iSmolar_residual iDmass", "_constants.REFPROP_USE_GERG REFPROP_USE_PENGROBINSON = _constants.REFPROP_USE_PENGROBINSON MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB = _constants.MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB DONT_CHECK_PROPERTY_LIMITS = _constants.DONT_CHECK_PROPERTY_LIMITS", "_constants.HmassT_INPUTS SmolarT_INPUTS = _constants.SmolarT_INPUTS SmassT_INPUTS = _constants.SmassT_INPUTS TUmolar_INPUTS = _constants.TUmolar_INPUTS", "iDmass = _constants.iDmass iHmass = _constants.iHmass iSmass = _constants.iSmass iCpmass", "_constants.DmassP_INPUTS DmolarP_INPUTS = _constants.DmolarP_INPUTS HmassP_INPUTS = _constants.HmassP_INPUTS HmolarP_INPUTS = _constants.HmolarP_INPUTS", "USE_GUESSES_IN_PROPSSI = _constants.USE_GUESSES_IN_PROPSSI ASSUME_CRITICAL_POINT_STABLE = _constants.ASSUME_CRITICAL_POINT_STABLE VTPR_ALWAYS_RELOAD_LIBRARY = _constants.VTPR_ALWAYS_RELOAD_LIBRARY FLOAT_PUNCTUATION", "DmassP_INPUTS = _constants.DmassP_INPUTS DmolarP_INPUTS = _constants.DmolarP_INPUTS HmassP_INPUTS = _constants.HmassP_INPUTS HmolarP_INPUTS", "= _constants.SmolarUmolar_INPUTS DmassHmass_INPUTS = _constants.DmassHmass_INPUTS DmolarHmolar_INPUTS = _constants.DmolarHmolar_INPUTS DmassSmass_INPUTS =", "_constants.irhomolar_reducing irhomolar_critical = _constants.irhomolar_critical iT_reducing = _constants.iT_reducing iT_critical = _constants.iT_critical", "_constants.iCpmass iCp0mass = _constants.iCp0mass iCvmass = _constants.iCvmass iUmass = _constants.iUmass", "iGWP20 = _constants.iGWP20 iGWP100 = _constants.iGWP100 iGWP500 = _constants.iGWP500 iFH", "wrappers/Python. # DO NOT MODIFY THE CONTENTS OF THIS FILE!", "= _constants.iGmolar iHelmholtzmolar = _constants.iHelmholtzmolar iSmolar_residual = _constants.iSmolar_residual iDmass =", "_constants.iZ iPIP = _constants.iPIP ifraction_min = _constants.ifraction_min ifraction_max = _constants.ifraction_max", "idipole_moment = _constants.idipole_moment iT = _constants.iT iP = _constants.iP iQ", "= _constants.HmolarQ_INPUTS HmassQ_INPUTS = _constants.HmassQ_INPUTS DmolarQ_INPUTS = _constants.DmolarQ_INPUTS DmassQ_INPUTS =", "HmolarSmolar_INPUTS = _constants.HmolarSmolar_INPUTS SmassUmass_INPUTS = _constants.SmassUmass_INPUTS SmolarUmolar_INPUTS = _constants.SmolarUmolar_INPUTS DmassHmass_INPUTS", "= _constants.iTau iDelta = _constants.iDelta iDmolar = _constants.iDmolar iHmolar =", "isurface_tension = _constants.isurface_tension iPrandtl = _constants.iPrandtl ispeed_sound = _constants.ispeed_sound iisothermal_compressibility", "_constants.DmolarP_INPUTS HmassP_INPUTS = _constants.HmassP_INPUTS HmolarP_INPUTS = _constants.HmolarP_INPUTS PSmass_INPUTS = _constants.PSmass_INPUTS", "= _constants.OVERWRITE_DEPARTURE_FUNCTION OVERWRITE_BINARY_INTERACTION = _constants.OVERWRITE_BINARY_INTERACTION USE_GUESSES_IN_PROPSSI = _constants.USE_GUESSES_IN_PROPSSI ASSUME_CRITICAL_POINT_STABLE =", "= _constants.DmolarHmolar_INPUTS DmassSmass_INPUTS = _constants.DmassSmass_INPUTS DmolarSmolar_INPUTS = _constants.DmolarSmolar_INPUTS DmassUmass_INPUTS =", "= _constants.iP_critical iP_reducing = _constants.iP_reducing iT_triple = _constants.iT_triple iP_triple =", "= _constants.iSmass iCpmass = _constants.iCpmass iCp0mass = _constants.iCp0mass iCvmass =", "ifraction_max = _constants.ifraction_max iT_freeze = _constants.iT_freeze iGWP20 = _constants.iGWP20 iGWP100", "= _constants.iT_max iP_max = _constants.iP_max iP_min = _constants.iP_min idipole_moment =", "_constants.PHASE_ENVELOPE_STARTING_PRESSURE_PA R_U_CODATA = _constants.R_U_CODATA VTPR_UNIFAC_PATH = _constants.VTPR_UNIFAC_PATH SPINODAL_MINIMUM_DELTA = _constants.SPINODAL_MINIMUM_DELTA", "= _constants.idalpha0_ddelta_consttau iBvirial = _constants.iBvirial iCvirial = _constants.iCvirial idBvirial_dT =", "= _constants.OVERWRITE_FLUIDS OVERWRITE_DEPARTURE_FUNCTION = _constants.OVERWRITE_DEPARTURE_FUNCTION OVERWRITE_BINARY_INTERACTION = _constants.OVERWRITE_BINARY_INTERACTION USE_GUESSES_IN_PROPSSI =", "= _constants.MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB DONT_CHECK_PROPERTY_LIMITS = _constants.DONT_CHECK_PROPERTY_LIMITS HENRYS_LAW_TO_GENERATE_VLE_GUESSES = _constants.HENRYS_LAW_TO_GENERATE_VLE_GUESSES PHASE_ENVELOPE_STARTING_PRESSURE_PA =", "= _constants.TUmass_INPUTS DmassP_INPUTS = _constants.DmassP_INPUTS DmolarP_INPUTS = _constants.DmolarP_INPUTS HmassP_INPUTS =", "= _constants.QT_INPUTS PQ_INPUTS = _constants.PQ_INPUTS QSmolar_INPUTS = _constants.QSmolar_INPUTS QSmass_INPUTS =", "OVERWRITE_DEPARTURE_FUNCTION = _constants.OVERWRITE_DEPARTURE_FUNCTION OVERWRITE_BINARY_INTERACTION = _constants.OVERWRITE_BINARY_INTERACTION USE_GUESSES_IN_PROPSSI = _constants.USE_GUESSES_IN_PROPSSI ASSUME_CRITICAL_POINT_STABLE", "_constants.iphase_supercritical_liquid iphase_critical_point = _constants.iphase_critical_point iphase_gas = _constants.iphase_gas iphase_twophase = _constants.iphase_twophase", "_constants.SmassUmass_INPUTS SmolarUmolar_INPUTS = _constants.SmolarUmolar_INPUTS DmassHmass_INPUTS = _constants.DmassHmass_INPUTS DmolarHmolar_INPUTS = _constants.DmolarHmolar_INPUTS", "iPrandtl = _constants.iPrandtl ispeed_sound = _constants.ispeed_sound iisothermal_compressibility = _constants.iisothermal_compressibility iisobaric_expansion_coefficient", "idalpha0_dtau_constdelta = _constants.idalpha0_dtau_constdelta idalpha0_ddelta_consttau = _constants.idalpha0_ddelta_consttau iBvirial = _constants.iBvirial iCvirial", "= _constants.iDmolar iHmolar = _constants.iHmolar iSmolar = _constants.iSmolar iCpmolar =", "= _constants.idalphar_ddelta_consttau ialpha0 = _constants.ialpha0 idalpha0_dtau_constdelta = _constants.idalpha0_dtau_constdelta idalpha0_ddelta_consttau =", "import _constants INVALID_PARAMETER = _constants.INVALID_PARAMETER igas_constant = _constants.igas_constant imolar_mass =", "ifundamental_derivative_of_gas_dynamics = _constants.ifundamental_derivative_of_gas_dynamics ialphar = _constants.ialphar idalphar_dtau_constdelta = _constants.idalphar_dtau_constdelta idalphar_ddelta_consttau", "iSmolar_residual = _constants.iSmolar_residual iDmass = _constants.iDmass iHmass = _constants.iHmass iSmass", "iCvirial = _constants.iCvirial idBvirial_dT = _constants.idBvirial_dT idCvirial_dT = _constants.idCvirial_dT iZ", "= _constants.TUmolar_INPUTS TUmass_INPUTS = _constants.TUmass_INPUTS DmassP_INPUTS = _constants.DmassP_INPUTS DmolarP_INPUTS =", "_constants.idalpha0_dtau_constdelta idalpha0_ddelta_consttau = _constants.idalpha0_ddelta_consttau iBvirial = _constants.iBvirial iCvirial = _constants.iCvirial", "_constants.imolar_mass iacentric_factor = _constants.iacentric_factor irhomolar_reducing = _constants.irhomolar_reducing irhomolar_critical = _constants.irhomolar_critical", "= _constants.iCp0mass iCvmass = _constants.iCvmass iUmass = _constants.iUmass iGmass =", "DmassSmass_INPUTS = _constants.DmassSmass_INPUTS DmolarSmolar_INPUTS = _constants.DmolarSmolar_INPUTS DmassUmass_INPUTS = _constants.DmassUmass_INPUTS DmolarUmolar_INPUTS", "_constants.FLUID_TYPE_INCOMPRESSIBLE_SOLUTION FLUID_TYPE_UNDEFINED = _constants.FLUID_TYPE_UNDEFINED iphase_liquid = _constants.iphase_liquid iphase_supercritical = _constants.iphase_supercritical", "= _constants.iphase_supercritical_gas iphase_supercritical_liquid = _constants.iphase_supercritical_liquid iphase_critical_point = _constants.iphase_critical_point iphase_gas =", "DmassT_INPUTS = _constants.DmassT_INPUTS DmolarT_INPUTS = _constants.DmolarT_INPUTS HmolarT_INPUTS = _constants.HmolarT_INPUTS HmassT_INPUTS", "= _constants.PUmass_INPUTS PUmolar_INPUTS = _constants.PUmolar_INPUTS HmassSmass_INPUTS = _constants.HmassSmass_INPUTS HmolarSmolar_INPUTS =", "_constants.iGWP20 iGWP100 = _constants.iGWP100 iGWP500 = _constants.iGWP500 iFH = _constants.iFH", "_constants.DmassQ_INPUTS PT_INPUTS = _constants.PT_INPUTS DmassT_INPUTS = _constants.DmassT_INPUTS DmolarT_INPUTS = _constants.DmolarT_INPUTS", "DmassHmass_INPUTS = _constants.DmassHmass_INPUTS DmolarHmolar_INPUTS = _constants.DmolarHmolar_INPUTS DmassSmass_INPUTS = _constants.DmassSmass_INPUTS DmolarSmolar_INPUTS", "= _constants.VTPR_UNIFAC_PATH SPINODAL_MINIMUM_DELTA = _constants.SPINODAL_MINIMUM_DELTA OVERWRITE_FLUIDS = _constants.OVERWRITE_FLUIDS OVERWRITE_DEPARTURE_FUNCTION =", "= _constants.idBvirial_dT idCvirial_dT = _constants.idCvirial_dT iZ = _constants.iZ iPIP =", "_constants.OVERWRITE_DEPARTURE_FUNCTION OVERWRITE_BINARY_INTERACTION = _constants.OVERWRITE_BINARY_INTERACTION USE_GUESSES_IN_PROPSSI = _constants.USE_GUESSES_IN_PROPSSI ASSUME_CRITICAL_POINT_STABLE = _constants.ASSUME_CRITICAL_POINT_STABLE", "REFPROP_USE_GERG = _constants.REFPROP_USE_GERG REFPROP_USE_PENGROBINSON = _constants.REFPROP_USE_PENGROBINSON MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB = _constants.MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB DONT_CHECK_PROPERTY_LIMITS", "irhomolar_critical = _constants.irhomolar_critical iT_reducing = _constants.iT_reducing iT_critical = _constants.iT_critical irhomass_reducing", "generated by the generate_constants_module.py script in wrappers/Python. # DO NOT", "MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB = _constants.MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB DONT_CHECK_PROPERTY_LIMITS = _constants.DONT_CHECK_PROPERTY_LIMITS HENRYS_LAW_TO_GENERATE_VLE_GUESSES = _constants.HENRYS_LAW_TO_GENERATE_VLE_GUESSES PHASE_ENVELOPE_STARTING_PRESSURE_PA", "iODP = _constants.iODP iPhase = _constants.iPhase iundefined_parameter = _constants.iundefined_parameter INPUT_PAIR_INVALID", "# DO NOT MODIFY THE CONTENTS OF THIS FILE! from", "= _constants.iPhase iundefined_parameter = _constants.iundefined_parameter INPUT_PAIR_INVALID = _constants.INPUT_PAIR_INVALID QT_INPUTS =", "This file is automatically generated by the generate_constants_module.py script in", "iphase_twophase = _constants.iphase_twophase iphase_unknown = _constants.iphase_unknown iphase_not_imposed = _constants.iphase_not_imposed NORMALIZE_GAS_CONSTANTS", "_constants.iphase_unknown iphase_not_imposed = _constants.iphase_not_imposed NORMALIZE_GAS_CONSTANTS = _constants.NORMALIZE_GAS_CONSTANTS CRITICAL_WITHIN_1UK = _constants.CRITICAL_WITHIN_1UK", "import absolute_import from . import _constants INVALID_PARAMETER = _constants.INVALID_PARAMETER igas_constant", "by the generate_constants_module.py script in wrappers/Python. # DO NOT MODIFY", "= _constants.iUmolar iGmolar = _constants.iGmolar iHelmholtzmolar = _constants.iHelmholtzmolar iSmolar_residual =", "= _constants.idCvirial_dT iZ = _constants.iZ iPIP = _constants.iPIP ifraction_min =", "= _constants.PT_INPUTS DmassT_INPUTS = _constants.DmassT_INPUTS DmolarT_INPUTS = _constants.DmolarT_INPUTS HmolarT_INPUTS =", "REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS = _constants.REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS = _constants.REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS REFPROP_USE_GERG = _constants.REFPROP_USE_GERG REFPROP_USE_PENGROBINSON", "DmolarT_INPUTS = _constants.DmolarT_INPUTS HmolarT_INPUTS = _constants.HmolarT_INPUTS HmassT_INPUTS = _constants.HmassT_INPUTS SmolarT_INPUTS", "_constants.iT iP = _constants.iP iQ = _constants.iQ iTau = _constants.iTau", "_constants.iacentric_factor irhomolar_reducing = _constants.irhomolar_reducing irhomolar_critical = _constants.irhomolar_critical iT_reducing = _constants.iT_reducing", "iphase_gas = _constants.iphase_gas iphase_twophase = _constants.iphase_twophase iphase_unknown = _constants.iphase_unknown iphase_not_imposed", "= _constants.iSmolar iCpmolar = _constants.iCpmolar iCp0molar = _constants.iCp0molar iCvmolar =", "= _constants.iT_reducing iT_critical = _constants.iT_critical irhomass_reducing = _constants.irhomass_reducing irhomass_critical =", "_constants.PSmass_INPUTS PSmolar_INPUTS = _constants.PSmolar_INPUTS PUmass_INPUTS = _constants.PUmass_INPUTS PUmolar_INPUTS = _constants.PUmolar_INPUTS", "iT_critical = _constants.iT_critical irhomass_reducing = _constants.irhomass_reducing irhomass_critical = _constants.irhomass_critical iP_critical", "= _constants.iphase_unknown iphase_not_imposed = _constants.iphase_not_imposed NORMALIZE_GAS_CONSTANTS = _constants.NORMALIZE_GAS_CONSTANTS CRITICAL_WITHIN_1UK =", "iT = _constants.iT iP = _constants.iP iQ = _constants.iQ iTau", "= _constants.idalphar_dtau_constdelta idalphar_ddelta_consttau = _constants.idalphar_ddelta_consttau ialpha0 = _constants.ialpha0 idalpha0_dtau_constdelta =", "iacentric_factor = _constants.iacentric_factor irhomolar_reducing = _constants.irhomolar_reducing irhomolar_critical = _constants.irhomolar_critical iT_reducing", "_constants.iFH iHH = _constants.iHH iPH = _constants.iPH iODP = _constants.iODP", "FLUID_TYPE_PSEUDOPURE = _constants.FLUID_TYPE_PSEUDOPURE FLUID_TYPE_REFPROP = _constants.FLUID_TYPE_REFPROP FLUID_TYPE_INCOMPRESSIBLE_LIQUID = _constants.FLUID_TYPE_INCOMPRESSIBLE_LIQUID FLUID_TYPE_INCOMPRESSIBLE_SOLUTION", "= _constants.iHmass iSmass = _constants.iSmass iCpmass = _constants.iCpmass iCp0mass =", "= _constants.iundefined_parameter INPUT_PAIR_INVALID = _constants.INPUT_PAIR_INVALID QT_INPUTS = _constants.QT_INPUTS PQ_INPUTS =", "NORMALIZE_GAS_CONSTANTS = _constants.NORMALIZE_GAS_CONSTANTS CRITICAL_WITHIN_1UK = _constants.CRITICAL_WITHIN_1UK CRITICAL_SPLINES_ENABLED = _constants.CRITICAL_SPLINES_ENABLED SAVE_RAW_TABLES", "= _constants.iUmass iGmass = _constants.iGmass iHelmholtzmass = _constants.iHelmholtzmass iviscosity =", "QSmolar_INPUTS = _constants.QSmolar_INPUTS QSmass_INPUTS = _constants.QSmass_INPUTS HmolarQ_INPUTS = _constants.HmolarQ_INPUTS HmassQ_INPUTS", "= _constants.FLUID_TYPE_REFPROP FLUID_TYPE_INCOMPRESSIBLE_LIQUID = _constants.FLUID_TYPE_INCOMPRESSIBLE_LIQUID FLUID_TYPE_INCOMPRESSIBLE_SOLUTION = _constants.FLUID_TYPE_INCOMPRESSIBLE_SOLUTION FLUID_TYPE_UNDEFINED =", "_constants.REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS REFPROP_USE_GERG = _constants.REFPROP_USE_GERG REFPROP_USE_PENGROBINSON = _constants.REFPROP_USE_PENGROBINSON MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB = _constants.MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB", "_constants.iPH iODP = _constants.iODP iPhase = _constants.iPhase iundefined_parameter = _constants.iundefined_parameter", "_constants.irhomass_critical iP_critical = _constants.iP_critical iP_reducing = _constants.iP_reducing iT_triple = _constants.iT_triple", "= _constants.iP_triple iT_min = _constants.iT_min iT_max = _constants.iT_max iP_max =", "iHmass = _constants.iHmass iSmass = _constants.iSmass iCpmass = _constants.iCpmass iCp0mass", "ialphar = _constants.ialphar idalphar_dtau_constdelta = _constants.idalphar_dtau_constdelta idalphar_ddelta_consttau = _constants.idalphar_ddelta_consttau ialpha0", "FLUID_TYPE_INCOMPRESSIBLE_SOLUTION = _constants.FLUID_TYPE_INCOMPRESSIBLE_SOLUTION FLUID_TYPE_UNDEFINED = _constants.FLUID_TYPE_UNDEFINED iphase_liquid = _constants.iphase_liquid iphase_supercritical", "iCpmass = _constants.iCpmass iCp0mass = _constants.iCp0mass iCvmass = _constants.iCvmass iUmass", "_constants.HmolarQ_INPUTS HmassQ_INPUTS = _constants.HmassQ_INPUTS DmolarQ_INPUTS = _constants.DmolarQ_INPUTS DmassQ_INPUTS = _constants.DmassQ_INPUTS", "_constants.iviscosity iconductivity = _constants.iconductivity isurface_tension = _constants.isurface_tension iPrandtl = _constants.iPrandtl", "= _constants.PQ_INPUTS QSmolar_INPUTS = _constants.QSmolar_INPUTS QSmass_INPUTS = _constants.QSmass_INPUTS HmolarQ_INPUTS =", "_constants.iUmolar iGmolar = _constants.iGmolar iHelmholtzmolar = _constants.iHelmholtzmolar iSmolar_residual = _constants.iSmolar_residual", "iHelmholtzmass = _constants.iHelmholtzmass iviscosity = _constants.iviscosity iconductivity = _constants.iconductivity isurface_tension", "iDmolar = _constants.iDmolar iHmolar = _constants.iHmolar iSmolar = _constants.iSmolar iCpmolar", "_constants.iCp0molar iCvmolar = _constants.iCvmolar iUmolar = _constants.iUmolar iGmolar = _constants.iGmolar", "iGWP100 = _constants.iGWP100 iGWP500 = _constants.iGWP500 iFH = _constants.iFH iHH", "iCp0mass = _constants.iCp0mass iCvmass = _constants.iCvmass iUmass = _constants.iUmass iGmass", "from __future__ import absolute_import from . import _constants INVALID_PARAMETER =", "= _constants.QSmass_INPUTS HmolarQ_INPUTS = _constants.HmolarQ_INPUTS HmassQ_INPUTS = _constants.HmassQ_INPUTS DmolarQ_INPUTS =", "= _constants.HmassSmass_INPUTS HmolarSmolar_INPUTS = _constants.HmolarSmolar_INPUTS SmassUmass_INPUTS = _constants.SmassUmass_INPUTS SmolarUmolar_INPUTS =", "_constants.SmolarUmolar_INPUTS DmassHmass_INPUTS = _constants.DmassHmass_INPUTS DmolarHmolar_INPUTS = _constants.DmolarHmolar_INPUTS DmassSmass_INPUTS = _constants.DmassSmass_INPUTS", "= _constants.HmolarSmolar_INPUTS SmassUmass_INPUTS = _constants.SmassUmass_INPUTS SmolarUmolar_INPUTS = _constants.SmolarUmolar_INPUTS DmassHmass_INPUTS =", "= _constants.iGWP500 iFH = _constants.iFH iHH = _constants.iHH iPH =", "_constants.iSmass iCpmass = _constants.iCpmass iCp0mass = _constants.iCp0mass iCvmass = _constants.iCvmass", "_constants.idalphar_dtau_constdelta idalphar_ddelta_consttau = _constants.idalphar_ddelta_consttau ialpha0 = _constants.ialpha0 idalpha0_dtau_constdelta = _constants.idalpha0_dtau_constdelta", "QT_INPUTS = _constants.QT_INPUTS PQ_INPUTS = _constants.PQ_INPUTS QSmolar_INPUTS = _constants.QSmolar_INPUTS QSmass_INPUTS", "= _constants.iP iQ = _constants.iQ iTau = _constants.iTau iDelta =", "file is automatically generated by the generate_constants_module.py script in wrappers/Python.", "SmolarT_INPUTS = _constants.SmolarT_INPUTS SmassT_INPUTS = _constants.SmassT_INPUTS TUmolar_INPUTS = _constants.TUmolar_INPUTS TUmass_INPUTS", "= _constants.ALTERNATIVE_REFPROP_PATH ALTERNATIVE_REFPROP_HMX_BNC_PATH = _constants.ALTERNATIVE_REFPROP_HMX_BNC_PATH ALTERNATIVE_REFPROP_LIBRARY_PATH = _constants.ALTERNATIVE_REFPROP_LIBRARY_PATH REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS =", "_constants.iP iQ = _constants.iQ iTau = _constants.iTau iDelta = _constants.iDelta", "DmolarQ_INPUTS = _constants.DmolarQ_INPUTS DmassQ_INPUTS = _constants.DmassQ_INPUTS PT_INPUTS = _constants.PT_INPUTS DmassT_INPUTS", "= _constants.R_U_CODATA VTPR_UNIFAC_PATH = _constants.VTPR_UNIFAC_PATH SPINODAL_MINIMUM_DELTA = _constants.SPINODAL_MINIMUM_DELTA OVERWRITE_FLUIDS =", "_constants.iGmass iHelmholtzmass = _constants.iHelmholtzmass iviscosity = _constants.iviscosity iconductivity = _constants.iconductivity", "SmolarUmolar_INPUTS = _constants.SmolarUmolar_INPUTS DmassHmass_INPUTS = _constants.DmassHmass_INPUTS DmolarHmolar_INPUTS = _constants.DmolarHmolar_INPUTS DmassSmass_INPUTS", "_constants.iGWP500 iFH = _constants.iFH iHH = _constants.iHH iPH = _constants.iPH", "iT_triple = _constants.iT_triple iP_triple = _constants.iP_triple iT_min = _constants.iT_min iT_max", "_constants.iphase_critical_point iphase_gas = _constants.iphase_gas iphase_twophase = _constants.iphase_twophase iphase_unknown = _constants.iphase_unknown", "FLUID_TYPE_REFPROP = _constants.FLUID_TYPE_REFPROP FLUID_TYPE_INCOMPRESSIBLE_LIQUID = _constants.FLUID_TYPE_INCOMPRESSIBLE_LIQUID FLUID_TYPE_INCOMPRESSIBLE_SOLUTION = _constants.FLUID_TYPE_INCOMPRESSIBLE_SOLUTION FLUID_TYPE_UNDEFINED", "PQ_INPUTS = _constants.PQ_INPUTS QSmolar_INPUTS = _constants.QSmolar_INPUTS QSmass_INPUTS = _constants.QSmass_INPUTS HmolarQ_INPUTS", "_constants.FLUID_TYPE_UNDEFINED iphase_liquid = _constants.iphase_liquid iphase_supercritical = _constants.iphase_supercritical iphase_supercritical_gas = _constants.iphase_supercritical_gas", "= _constants.iphase_liquid iphase_supercritical = _constants.iphase_supercritical iphase_supercritical_gas = _constants.iphase_supercritical_gas iphase_supercritical_liquid =", "= _constants.iphase_supercritical_liquid iphase_critical_point = _constants.iphase_critical_point iphase_gas = _constants.iphase_gas iphase_twophase =", "_constants.REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS = _constants.REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS REFPROP_USE_GERG = _constants.REFPROP_USE_GERG REFPROP_USE_PENGROBINSON = _constants.REFPROP_USE_PENGROBINSON", "_constants.iHelmholtzmolar iSmolar_residual = _constants.iSmolar_residual iDmass = _constants.iDmass iHmass = _constants.iHmass", "_constants.QT_INPUTS PQ_INPUTS = _constants.PQ_INPUTS QSmolar_INPUTS = _constants.QSmolar_INPUTS QSmass_INPUTS = _constants.QSmass_INPUTS", "= _constants.iT_critical irhomass_reducing = _constants.irhomass_reducing irhomass_critical = _constants.irhomass_critical iP_critical =", "absolute_import from . import _constants INVALID_PARAMETER = _constants.INVALID_PARAMETER igas_constant =", "DmolarUmolar_INPUTS = _constants.DmolarUmolar_INPUTS FLUID_TYPE_PURE = _constants.FLUID_TYPE_PURE FLUID_TYPE_PSEUDOPURE = _constants.FLUID_TYPE_PSEUDOPURE FLUID_TYPE_REFPROP", "iCp0molar = _constants.iCp0molar iCvmolar = _constants.iCvmolar iUmolar = _constants.iUmolar iGmolar", "iPH = _constants.iPH iODP = _constants.iODP iPhase = _constants.iPhase iundefined_parameter", "= _constants.iCvmolar iUmolar = _constants.iUmolar iGmolar = _constants.iGmolar iHelmholtzmolar =", "HmolarP_INPUTS = _constants.HmolarP_INPUTS PSmass_INPUTS = _constants.PSmass_INPUTS PSmolar_INPUTS = _constants.PSmolar_INPUTS PUmass_INPUTS", "= _constants.iisobaric_expansion_coefficient ifundamental_derivative_of_gas_dynamics = _constants.ifundamental_derivative_of_gas_dynamics ialphar = _constants.ialphar idalphar_dtau_constdelta =", "_constants.DmassT_INPUTS DmolarT_INPUTS = _constants.DmolarT_INPUTS HmolarT_INPUTS = _constants.HmolarT_INPUTS HmassT_INPUTS = _constants.HmassT_INPUTS", "= _constants.USE_GUESSES_IN_PROPSSI ASSUME_CRITICAL_POINT_STABLE = _constants.ASSUME_CRITICAL_POINT_STABLE VTPR_ALWAYS_RELOAD_LIBRARY = _constants.VTPR_ALWAYS_RELOAD_LIBRARY FLOAT_PUNCTUATION =", "_constants.SmassT_INPUTS TUmolar_INPUTS = _constants.TUmolar_INPUTS TUmass_INPUTS = _constants.TUmass_INPUTS DmassP_INPUTS = _constants.DmassP_INPUTS", "ALTERNATIVE_REFPROP_LIBRARY_PATH = _constants.ALTERNATIVE_REFPROP_LIBRARY_PATH REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS = _constants.REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS = _constants.REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS REFPROP_USE_GERG", "idalphar_dtau_constdelta = _constants.idalphar_dtau_constdelta idalphar_ddelta_consttau = _constants.idalphar_ddelta_consttau ialpha0 = _constants.ialpha0 idalpha0_dtau_constdelta", "_constants.iHelmholtzmass iviscosity = _constants.iviscosity iconductivity = _constants.iconductivity isurface_tension = _constants.isurface_tension", "_constants.TUmolar_INPUTS TUmass_INPUTS = _constants.TUmass_INPUTS DmassP_INPUTS = _constants.DmassP_INPUTS DmolarP_INPUTS = _constants.DmolarP_INPUTS", "HmassQ_INPUTS = _constants.HmassQ_INPUTS DmolarQ_INPUTS = _constants.DmolarQ_INPUTS DmassQ_INPUTS = _constants.DmassQ_INPUTS PT_INPUTS", "iSmass = _constants.iSmass iCpmass = _constants.iCpmass iCp0mass = _constants.iCp0mass iCvmass", "_constants.ALTERNATIVE_REFPROP_LIBRARY_PATH REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS = _constants.REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS = _constants.REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS REFPROP_USE_GERG = _constants.REFPROP_USE_GERG", "_constants.iPIP ifraction_min = _constants.ifraction_min ifraction_max = _constants.ifraction_max iT_freeze = _constants.iT_freeze", "_constants.DmassSmass_INPUTS DmolarSmolar_INPUTS = _constants.DmolarSmolar_INPUTS DmassUmass_INPUTS = _constants.DmassUmass_INPUTS DmolarUmolar_INPUTS = _constants.DmolarUmolar_INPUTS", "= _constants.iphase_twophase iphase_unknown = _constants.iphase_unknown iphase_not_imposed = _constants.iphase_not_imposed NORMALIZE_GAS_CONSTANTS =", "= _constants.iT_triple iP_triple = _constants.iP_triple iT_min = _constants.iT_min iT_max =", "iUmolar = _constants.iUmolar iGmolar = _constants.iGmolar iHelmholtzmolar = _constants.iHelmholtzmolar iSmolar_residual", "FLUID_TYPE_PURE = _constants.FLUID_TYPE_PURE FLUID_TYPE_PSEUDOPURE = _constants.FLUID_TYPE_PSEUDOPURE FLUID_TYPE_REFPROP = _constants.FLUID_TYPE_REFPROP FLUID_TYPE_INCOMPRESSIBLE_LIQUID", "_constants.R_U_CODATA VTPR_UNIFAC_PATH = _constants.VTPR_UNIFAC_PATH SPINODAL_MINIMUM_DELTA = _constants.SPINODAL_MINIMUM_DELTA OVERWRITE_FLUIDS = _constants.OVERWRITE_FLUIDS", "= _constants.ispeed_sound iisothermal_compressibility = _constants.iisothermal_compressibility iisobaric_expansion_coefficient = _constants.iisobaric_expansion_coefficient ifundamental_derivative_of_gas_dynamics =", "= _constants.DmolarP_INPUTS HmassP_INPUTS = _constants.HmassP_INPUTS HmolarP_INPUTS = _constants.HmolarP_INPUTS PSmass_INPUTS =", "iHmolar = _constants.iHmolar iSmolar = _constants.iSmolar iCpmolar = _constants.iCpmolar iCp0molar", "idCvirial_dT = _constants.idCvirial_dT iZ = _constants.iZ iPIP = _constants.iPIP ifraction_min", "iGWP500 = _constants.iGWP500 iFH = _constants.iFH iHH = _constants.iHH iPH", "iFH = _constants.iFH iHH = _constants.iHH iPH = _constants.iPH iODP", "SmassT_INPUTS = _constants.SmassT_INPUTS TUmolar_INPUTS = _constants.TUmolar_INPUTS TUmass_INPUTS = _constants.TUmass_INPUTS DmassP_INPUTS", "_constants.iundefined_parameter INPUT_PAIR_INVALID = _constants.INPUT_PAIR_INVALID QT_INPUTS = _constants.QT_INPUTS PQ_INPUTS = _constants.PQ_INPUTS", "= _constants.iGWP100 iGWP500 = _constants.iGWP500 iFH = _constants.iFH iHH =", "= _constants.iT iP = _constants.iP iQ = _constants.iQ iTau =", "_constants.iCvmass iUmass = _constants.iUmass iGmass = _constants.iGmass iHelmholtzmass = _constants.iHelmholtzmass", "PUmass_INPUTS = _constants.PUmass_INPUTS PUmolar_INPUTS = _constants.PUmolar_INPUTS HmassSmass_INPUTS = _constants.HmassSmass_INPUTS HmolarSmolar_INPUTS", "_constants.iphase_gas iphase_twophase = _constants.iphase_twophase iphase_unknown = _constants.iphase_unknown iphase_not_imposed = _constants.iphase_not_imposed", "_constants.ifraction_min ifraction_max = _constants.ifraction_max iT_freeze = _constants.iT_freeze iGWP20 = _constants.iGWP20", "= _constants.SmassT_INPUTS TUmolar_INPUTS = _constants.TUmolar_INPUTS TUmass_INPUTS = _constants.TUmass_INPUTS DmassP_INPUTS =", "= _constants.ALTERNATIVE_REFPROP_HMX_BNC_PATH ALTERNATIVE_REFPROP_LIBRARY_PATH = _constants.ALTERNATIVE_REFPROP_LIBRARY_PATH REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS = _constants.REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS =", "_constants.irhomolar_critical iT_reducing = _constants.iT_reducing iT_critical = _constants.iT_critical irhomass_reducing = _constants.irhomass_reducing", "_constants.OVERWRITE_FLUIDS OVERWRITE_DEPARTURE_FUNCTION = _constants.OVERWRITE_DEPARTURE_FUNCTION OVERWRITE_BINARY_INTERACTION = _constants.OVERWRITE_BINARY_INTERACTION USE_GUESSES_IN_PROPSSI = _constants.USE_GUESSES_IN_PROPSSI", "= _constants.HmolarP_INPUTS PSmass_INPUTS = _constants.PSmass_INPUTS PSmolar_INPUTS = _constants.PSmolar_INPUTS PUmass_INPUTS =", "TUmolar_INPUTS = _constants.TUmolar_INPUTS TUmass_INPUTS = _constants.TUmass_INPUTS DmassP_INPUTS = _constants.DmassP_INPUTS DmolarP_INPUTS", "= _constants.DmassHmass_INPUTS DmolarHmolar_INPUTS = _constants.DmolarHmolar_INPUTS DmassSmass_INPUTS = _constants.DmassSmass_INPUTS DmolarSmolar_INPUTS =", "= _constants.FLUID_TYPE_INCOMPRESSIBLE_LIQUID FLUID_TYPE_INCOMPRESSIBLE_SOLUTION = _constants.FLUID_TYPE_INCOMPRESSIBLE_SOLUTION FLUID_TYPE_UNDEFINED = _constants.FLUID_TYPE_UNDEFINED iphase_liquid =", ". import _constants INVALID_PARAMETER = _constants.INVALID_PARAMETER igas_constant = _constants.igas_constant imolar_mass", "_constants.DmolarQ_INPUTS DmassQ_INPUTS = _constants.DmassQ_INPUTS PT_INPUTS = _constants.PT_INPUTS DmassT_INPUTS = _constants.DmassT_INPUTS", "_constants.iconductivity isurface_tension = _constants.isurface_tension iPrandtl = _constants.iPrandtl ispeed_sound = _constants.ispeed_sound", "= _constants.FLUID_TYPE_UNDEFINED iphase_liquid = _constants.iphase_liquid iphase_supercritical = _constants.iphase_supercritical iphase_supercritical_gas =", "= _constants.iphase_critical_point iphase_gas = _constants.iphase_gas iphase_twophase = _constants.iphase_twophase iphase_unknown =", "iPIP = _constants.iPIP ifraction_min = _constants.ifraction_min ifraction_max = _constants.ifraction_max iT_freeze", "= _constants.DmolarQ_INPUTS DmassQ_INPUTS = _constants.DmassQ_INPUTS PT_INPUTS = _constants.PT_INPUTS DmassT_INPUTS =", "_constants.FLUID_TYPE_REFPROP FLUID_TYPE_INCOMPRESSIBLE_LIQUID = _constants.FLUID_TYPE_INCOMPRESSIBLE_LIQUID FLUID_TYPE_INCOMPRESSIBLE_SOLUTION = _constants.FLUID_TYPE_INCOMPRESSIBLE_SOLUTION FLUID_TYPE_UNDEFINED = _constants.FLUID_TYPE_UNDEFINED", "iP_critical = _constants.iP_critical iP_reducing = _constants.iP_reducing iT_triple = _constants.iT_triple iP_triple", "= _constants.PHASE_ENVELOPE_STARTING_PRESSURE_PA R_U_CODATA = _constants.R_U_CODATA VTPR_UNIFAC_PATH = _constants.VTPR_UNIFAC_PATH SPINODAL_MINIMUM_DELTA =", "_constants.iisothermal_compressibility iisobaric_expansion_coefficient = _constants.iisobaric_expansion_coefficient ifundamental_derivative_of_gas_dynamics = _constants.ifundamental_derivative_of_gas_dynamics ialphar = _constants.ialphar", "iisothermal_compressibility = _constants.iisothermal_compressibility iisobaric_expansion_coefficient = _constants.iisobaric_expansion_coefficient ifundamental_derivative_of_gas_dynamics = _constants.ifundamental_derivative_of_gas_dynamics ialphar", "iCvmolar = _constants.iCvmolar iUmolar = _constants.iUmolar iGmolar = _constants.iGmolar iHelmholtzmolar", "_constants.HmassSmass_INPUTS HmolarSmolar_INPUTS = _constants.HmolarSmolar_INPUTS SmassUmass_INPUTS = _constants.SmassUmass_INPUTS SmolarUmolar_INPUTS = _constants.SmolarUmolar_INPUTS", "iQ = _constants.iQ iTau = _constants.iTau iDelta = _constants.iDelta iDmolar", "CONTENTS OF THIS FILE! from __future__ import absolute_import from .", "= _constants.iP_min idipole_moment = _constants.idipole_moment iT = _constants.iT iP =", "iZ = _constants.iZ iPIP = _constants.iPIP ifraction_min = _constants.ifraction_min ifraction_max", "iP_min = _constants.iP_min idipole_moment = _constants.idipole_moment iT = _constants.iT iP", "= _constants.iDmass iHmass = _constants.iHmass iSmass = _constants.iSmass iCpmass =", "DmolarHmolar_INPUTS = _constants.DmolarHmolar_INPUTS DmassSmass_INPUTS = _constants.DmassSmass_INPUTS DmolarSmolar_INPUTS = _constants.DmolarSmolar_INPUTS DmassUmass_INPUTS", "= _constants.iGmass iHelmholtzmass = _constants.iHelmholtzmass iviscosity = _constants.iviscosity iconductivity =", "_constants.iisobaric_expansion_coefficient ifundamental_derivative_of_gas_dynamics = _constants.ifundamental_derivative_of_gas_dynamics ialphar = _constants.ialphar idalphar_dtau_constdelta = _constants.idalphar_dtau_constdelta", "= _constants.ALTERNATIVE_TABLES_DIRECTORY ALTERNATIVE_REFPROP_PATH = _constants.ALTERNATIVE_REFPROP_PATH ALTERNATIVE_REFPROP_HMX_BNC_PATH = _constants.ALTERNATIVE_REFPROP_HMX_BNC_PATH ALTERNATIVE_REFPROP_LIBRARY_PATH =", "iviscosity = _constants.iviscosity iconductivity = _constants.iconductivity isurface_tension = _constants.isurface_tension iPrandtl", "iCpmolar = _constants.iCpmolar iCp0molar = _constants.iCp0molar iCvmolar = _constants.iCvmolar iUmolar", "iconductivity = _constants.iconductivity isurface_tension = _constants.isurface_tension iPrandtl = _constants.iPrandtl ispeed_sound", "= _constants.iCvirial idBvirial_dT = _constants.idBvirial_dT idCvirial_dT = _constants.idCvirial_dT iZ =", "PT_INPUTS = _constants.PT_INPUTS DmassT_INPUTS = _constants.DmassT_INPUTS DmolarT_INPUTS = _constants.DmolarT_INPUTS HmolarT_INPUTS", "_constants.iphase_liquid iphase_supercritical = _constants.iphase_supercritical iphase_supercritical_gas = _constants.iphase_supercritical_gas iphase_supercritical_liquid = _constants.iphase_supercritical_liquid", "irhomass_critical = _constants.irhomass_critical iP_critical = _constants.iP_critical iP_reducing = _constants.iP_reducing iT_triple", "= _constants.iPIP ifraction_min = _constants.ifraction_min ifraction_max = _constants.ifraction_max iT_freeze =", "iP_max = _constants.iP_max iP_min = _constants.iP_min idipole_moment = _constants.idipole_moment iT", "= _constants.imolar_mass iacentric_factor = _constants.iacentric_factor irhomolar_reducing = _constants.irhomolar_reducing irhomolar_critical =", "iHelmholtzmolar = _constants.iHelmholtzmolar iSmolar_residual = _constants.iSmolar_residual iDmass = _constants.iDmass iHmass", "= _constants.isurface_tension iPrandtl = _constants.iPrandtl ispeed_sound = _constants.ispeed_sound iisothermal_compressibility =", "_constants.idBvirial_dT idCvirial_dT = _constants.idCvirial_dT iZ = _constants.iZ iPIP = _constants.iPIP", "_constants.OVERWRITE_BINARY_INTERACTION USE_GUESSES_IN_PROPSSI = _constants.USE_GUESSES_IN_PROPSSI ASSUME_CRITICAL_POINT_STABLE = _constants.ASSUME_CRITICAL_POINT_STABLE VTPR_ALWAYS_RELOAD_LIBRARY = _constants.VTPR_ALWAYS_RELOAD_LIBRARY", "_constants.DONT_CHECK_PROPERTY_LIMITS HENRYS_LAW_TO_GENERATE_VLE_GUESSES = _constants.HENRYS_LAW_TO_GENERATE_VLE_GUESSES PHASE_ENVELOPE_STARTING_PRESSURE_PA = _constants.PHASE_ENVELOPE_STARTING_PRESSURE_PA R_U_CODATA = _constants.R_U_CODATA", "iphase_supercritical_liquid = _constants.iphase_supercritical_liquid iphase_critical_point = _constants.iphase_critical_point iphase_gas = _constants.iphase_gas iphase_twophase", "= _constants.ALTERNATIVE_REFPROP_LIBRARY_PATH REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS = _constants.REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS = _constants.REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS REFPROP_USE_GERG =", "MODIFY THE CONTENTS OF THIS FILE! from __future__ import absolute_import", "_constants.iT_critical irhomass_reducing = _constants.irhomass_reducing irhomass_critical = _constants.irhomass_critical iP_critical = _constants.iP_critical", "OVERWRITE_FLUIDS = _constants.OVERWRITE_FLUIDS OVERWRITE_DEPARTURE_FUNCTION = _constants.OVERWRITE_DEPARTURE_FUNCTION OVERWRITE_BINARY_INTERACTION = _constants.OVERWRITE_BINARY_INTERACTION USE_GUESSES_IN_PROPSSI", "= _constants.ifundamental_derivative_of_gas_dynamics ialphar = _constants.ialphar idalphar_dtau_constdelta = _constants.idalphar_dtau_constdelta idalphar_ddelta_consttau =", "_constants.PUmolar_INPUTS HmassSmass_INPUTS = _constants.HmassSmass_INPUTS HmolarSmolar_INPUTS = _constants.HmolarSmolar_INPUTS SmassUmass_INPUTS = _constants.SmassUmass_INPUTS", "DmolarSmolar_INPUTS = _constants.DmolarSmolar_INPUTS DmassUmass_INPUTS = _constants.DmassUmass_INPUTS DmolarUmolar_INPUTS = _constants.DmolarUmolar_INPUTS FLUID_TYPE_PURE", "_constants.QSmolar_INPUTS QSmass_INPUTS = _constants.QSmass_INPUTS HmolarQ_INPUTS = _constants.HmolarQ_INPUTS HmassQ_INPUTS = _constants.HmassQ_INPUTS", "_constants.idCvirial_dT iZ = _constants.iZ iPIP = _constants.iPIP ifraction_min = _constants.ifraction_min", "= _constants.iP_max iP_min = _constants.iP_min idipole_moment = _constants.idipole_moment iT =", "TUmass_INPUTS = _constants.TUmass_INPUTS DmassP_INPUTS = _constants.DmassP_INPUTS DmolarP_INPUTS = _constants.DmolarP_INPUTS HmassP_INPUTS", "= _constants.HmassT_INPUTS SmolarT_INPUTS = _constants.SmolarT_INPUTS SmassT_INPUTS = _constants.SmassT_INPUTS TUmolar_INPUTS =", "_constants.DmolarUmolar_INPUTS FLUID_TYPE_PURE = _constants.FLUID_TYPE_PURE FLUID_TYPE_PSEUDOPURE = _constants.FLUID_TYPE_PSEUDOPURE FLUID_TYPE_REFPROP = _constants.FLUID_TYPE_REFPROP", "ialpha0 = _constants.ialpha0 idalpha0_dtau_constdelta = _constants.idalpha0_dtau_constdelta idalpha0_ddelta_consttau = _constants.idalpha0_ddelta_consttau iBvirial", "_constants.FLUID_TYPE_PURE FLUID_TYPE_PSEUDOPURE = _constants.FLUID_TYPE_PSEUDOPURE FLUID_TYPE_REFPROP = _constants.FLUID_TYPE_REFPROP FLUID_TYPE_INCOMPRESSIBLE_LIQUID = _constants.FLUID_TYPE_INCOMPRESSIBLE_LIQUID", "_constants.QSmass_INPUTS HmolarQ_INPUTS = _constants.HmolarQ_INPUTS HmassQ_INPUTS = _constants.HmassQ_INPUTS DmolarQ_INPUTS = _constants.DmolarQ_INPUTS", "= _constants.iCpmass iCp0mass = _constants.iCp0mass iCvmass = _constants.iCvmass iUmass =", "HmolarQ_INPUTS = _constants.HmolarQ_INPUTS HmassQ_INPUTS = _constants.HmassQ_INPUTS DmolarQ_INPUTS = _constants.DmolarQ_INPUTS DmassQ_INPUTS", "FILE! from __future__ import absolute_import from . import _constants INVALID_PARAMETER", "SPINODAL_MINIMUM_DELTA = _constants.SPINODAL_MINIMUM_DELTA OVERWRITE_FLUIDS = _constants.OVERWRITE_FLUIDS OVERWRITE_DEPARTURE_FUNCTION = _constants.OVERWRITE_DEPARTURE_FUNCTION OVERWRITE_BINARY_INTERACTION", "_constants.iHmass iSmass = _constants.iSmass iCpmass = _constants.iCpmass iCp0mass = _constants.iCp0mass", "_constants.HmolarSmolar_INPUTS SmassUmass_INPUTS = _constants.SmassUmass_INPUTS SmolarUmolar_INPUTS = _constants.SmolarUmolar_INPUTS DmassHmass_INPUTS = _constants.DmassHmass_INPUTS", "_constants.DmassHmass_INPUTS DmolarHmolar_INPUTS = _constants.DmolarHmolar_INPUTS DmassSmass_INPUTS = _constants.DmassSmass_INPUTS DmolarSmolar_INPUTS = _constants.DmolarSmolar_INPUTS", "iCvmass = _constants.iCvmass iUmass = _constants.iUmass iGmass = _constants.iGmass iHelmholtzmass", "OVERWRITE_BINARY_INTERACTION = _constants.OVERWRITE_BINARY_INTERACTION USE_GUESSES_IN_PROPSSI = _constants.USE_GUESSES_IN_PROPSSI ASSUME_CRITICAL_POINT_STABLE = _constants.ASSUME_CRITICAL_POINT_STABLE VTPR_ALWAYS_RELOAD_LIBRARY", "R_U_CODATA = _constants.R_U_CODATA VTPR_UNIFAC_PATH = _constants.VTPR_UNIFAC_PATH SPINODAL_MINIMUM_DELTA = _constants.SPINODAL_MINIMUM_DELTA OVERWRITE_FLUIDS", "iUmass = _constants.iUmass iGmass = _constants.iGmass iHelmholtzmass = _constants.iHelmholtzmass iviscosity", "= _constants.HmolarT_INPUTS HmassT_INPUTS = _constants.HmassT_INPUTS SmolarT_INPUTS = _constants.SmolarT_INPUTS SmassT_INPUTS =", "= _constants.FLUID_TYPE_PURE FLUID_TYPE_PSEUDOPURE = _constants.FLUID_TYPE_PSEUDOPURE FLUID_TYPE_REFPROP = _constants.FLUID_TYPE_REFPROP FLUID_TYPE_INCOMPRESSIBLE_LIQUID =", "_constants.isurface_tension iPrandtl = _constants.iPrandtl ispeed_sound = _constants.ispeed_sound iisothermal_compressibility = _constants.iisothermal_compressibility", "= _constants.ialpha0 idalpha0_dtau_constdelta = _constants.idalpha0_dtau_constdelta idalpha0_ddelta_consttau = _constants.idalpha0_ddelta_consttau iBvirial =", "iPhase = _constants.iPhase iundefined_parameter = _constants.iundefined_parameter INPUT_PAIR_INVALID = _constants.INPUT_PAIR_INVALID QT_INPUTS", "idalpha0_ddelta_consttau = _constants.idalpha0_ddelta_consttau iBvirial = _constants.iBvirial iCvirial = _constants.iCvirial idBvirial_dT", "_constants.iT_freeze iGWP20 = _constants.iGWP20 iGWP100 = _constants.iGWP100 iGWP500 = _constants.iGWP500", "_constants.CRITICAL_SPLINES_ENABLED SAVE_RAW_TABLES = _constants.SAVE_RAW_TABLES ALTERNATIVE_TABLES_DIRECTORY = _constants.ALTERNATIVE_TABLES_DIRECTORY ALTERNATIVE_REFPROP_PATH = _constants.ALTERNATIVE_REFPROP_PATH", "ispeed_sound = _constants.ispeed_sound iisothermal_compressibility = _constants.iisothermal_compressibility iisobaric_expansion_coefficient = _constants.iisobaric_expansion_coefficient ifundamental_derivative_of_gas_dynamics", "_constants.iUmass iGmass = _constants.iGmass iHelmholtzmass = _constants.iHelmholtzmass iviscosity = _constants.iviscosity", "= _constants.PSmolar_INPUTS PUmass_INPUTS = _constants.PUmass_INPUTS PUmolar_INPUTS = _constants.PUmolar_INPUTS HmassSmass_INPUTS =", "_constants INVALID_PARAMETER = _constants.INVALID_PARAMETER igas_constant = _constants.igas_constant imolar_mass = _constants.imolar_mass", "_constants.ialpha0 idalpha0_dtau_constdelta = _constants.idalpha0_dtau_constdelta idalpha0_ddelta_consttau = _constants.idalpha0_ddelta_consttau iBvirial = _constants.iBvirial", "_constants.ALTERNATIVE_TABLES_DIRECTORY ALTERNATIVE_REFPROP_PATH = _constants.ALTERNATIVE_REFPROP_PATH ALTERNATIVE_REFPROP_HMX_BNC_PATH = _constants.ALTERNATIVE_REFPROP_HMX_BNC_PATH ALTERNATIVE_REFPROP_LIBRARY_PATH = _constants.ALTERNATIVE_REFPROP_LIBRARY_PATH", "_constants.PSmolar_INPUTS PUmass_INPUTS = _constants.PUmass_INPUTS PUmolar_INPUTS = _constants.PUmolar_INPUTS HmassSmass_INPUTS = _constants.HmassSmass_INPUTS", "_constants.PUmass_INPUTS PUmolar_INPUTS = _constants.PUmolar_INPUTS HmassSmass_INPUTS = _constants.HmassSmass_INPUTS HmolarSmolar_INPUTS = _constants.HmolarSmolar_INPUTS", "_constants.iT_triple iP_triple = _constants.iP_triple iT_min = _constants.iT_min iT_max = _constants.iT_max", "= _constants.irhomolar_reducing irhomolar_critical = _constants.irhomolar_critical iT_reducing = _constants.iT_reducing iT_critical =", "DO NOT MODIFY THE CONTENTS OF THIS FILE! from __future__", "iP_triple = _constants.iP_triple iT_min = _constants.iT_min iT_max = _constants.iT_max iP_max", "= _constants.iCp0molar iCvmolar = _constants.iCvmolar iUmolar = _constants.iUmolar iGmolar =", "VTPR_UNIFAC_PATH = _constants.VTPR_UNIFAC_PATH SPINODAL_MINIMUM_DELTA = _constants.SPINODAL_MINIMUM_DELTA OVERWRITE_FLUIDS = _constants.OVERWRITE_FLUIDS OVERWRITE_DEPARTURE_FUNCTION", "iTau = _constants.iTau iDelta = _constants.iDelta iDmolar = _constants.iDmolar iHmolar", "= _constants.iPrandtl ispeed_sound = _constants.ispeed_sound iisothermal_compressibility = _constants.iisothermal_compressibility iisobaric_expansion_coefficient =" ]
[ ": Number of batches to generate (if None, then dataset_size", "a smaller n_classes or n_samples'.format(n_samples) def _make_batch_ids(self): batch_ids = []", "+= entry_ids batch_ids += random.sample(remaining_entry_ids, self.batch_size - len(batch_ids)) # Randomly", "in enumerate(dataset_labels): if label in self.label_to_entry_ids: self.label_to_entry_ids[label].append(entry_id) else: self.label_to_entry_ids[label] =", "in self.label_to_entry_ids: self.label_to_entry_ids[label].append(entry_id) else: self.label_to_entry_ids[label] = [entry_id] # Subset the", "same sequence) batch_size : batch_size no explaination needed step_size :", "# Choose classes and entries labels_choosen = random.sample(self.labels_subset, self.n_classes) #", "< self.batch_size: # Randomly sample remainder labels_choosen = {l: None", "[label for (label, entry_ids) in self.label_to_entry_ids.items() if len(entry_ids) >= n_samples]", "< self.steps: self.count += 1 yield self._make_batch_ids() def __len__(self): return", "in labels_choosen: batch_ids += random.sample(self.label_to_entry_ids[l], self.n_samples) if len(batch_ids) < self.batch_size:", "random.sample(self.label_to_entry_ids[l], self.n_samples) if len(batch_ids) < self.batch_size: # Randomly sample remainder", "\"\"\" Create a balanced batch sampler for label based datasets", "to generate (if None, then dataset_size / batch_size will be", "len(self.labels_subset) >= n_classes, 'Too little labels have {} entries, choose", "= n_samples # Create a label_to_entry_ids table self.label_to_entry_ids = {}", "and entries labels_choosen = random.sample(self.labels_subset, self.n_classes) # Randomly sample n_samples", "explaination needed step_size : Number of batches to generate (if", "If batch_size > n_classes * n_samples, rest of batch will", "choosen labels for l in labels_choosen: batch_ids += random.sample(self.label_to_entry_ids[l], self.n_samples)", "no explaination needed step_size : Number of batches to generate", "of batches to generate (if None, then dataset_size / batch_size", "a dataset (in the same sequence) batch_size : batch_size no", "\"\"\" self.batch_size = batch_size self.steps = len(dataset_labels) // batch_size if", "class BalancedBatchSampler(torch.utils.data.sampler.BatchSampler): def __init__( self, dataset_labels, batch_size=1, steps=None, n_classes=0, n_samples=2", "have {} entries, choose a smaller n_classes or n_samples'.format(n_samples) def", "entries from choosen labels for l in labels_choosen: batch_ids +=", "self.count < self.steps: self.count += 1 yield self._make_batch_ids() def __len__(self):", "remainder labels_choosen = {l: None for l in labels_choosen} remaining_entry_ids", "batch_size > n_classes * n_samples, rest of batch will be", "generate (if None, then dataset_size / batch_size will be used)", "batch_size self.steps = len(dataset_labels) // batch_size if steps is None", "self.batch_size) batch_ids = torch.LongTensor(batch_ids) return batch_ids def __iter__(self): self.count =", "from choosen labels for l in labels_choosen: batch_ids += random.sample(self.label_to_entry_ids[l],", "more than n_samples entries self.labels_subset = [label for (label, entry_ids)", "batch_ids = torch.LongTensor(batch_ids) return batch_ids def __iter__(self): self.count = 0", "len(dataset_labels) // batch_size if steps is None else steps self.n_classes", "Number of batches to generate (if None, then dataset_size /", "is None else steps self.n_classes = n_classes self.n_samples = n_samples", "for l in labels_choosen: batch_ids += random.sample(self.label_to_entry_ids[l], self.n_samples) if len(batch_ids)", "will be randomly filled \"\"\" self.batch_size = batch_size self.steps =", "self.label_to_entry_ids[label].append(entry_id) else: self.label_to_entry_ids[label] = [entry_id] # Subset the labels with", "label_to_entry_ids table self.label_to_entry_ids = {} for entry_id, label in enumerate(dataset_labels):", "# Create a label_to_entry_ids table self.label_to_entry_ids = {} for entry_id,", "enumerate(dataset_labels): if label in self.label_to_entry_ids: self.label_to_entry_ids[label].append(entry_id) else: self.label_to_entry_ids[label] = [entry_id]", "= 0 while self.count < self.steps: self.count += 1 yield", "label based datasets Args dataset_labels : Labels of every entry", "smaller n_classes or n_samples'.format(n_samples) def _make_batch_ids(self): batch_ids = [] #", "per class *** If batch_size > n_classes * n_samples, rest", "entry_ids in self.label_to_entry_ids.items(): if label not in labels_choosen: remaining_entry_ids +=", "= {} for entry_id, label in enumerate(dataset_labels): if label in", "n_samples entries self.labels_subset = [label for (label, entry_ids) in self.label_to_entry_ids.items()", "choose a smaller n_classes or n_samples'.format(n_samples) def _make_batch_ids(self): batch_ids =", "Randomly sample n_samples entries from choosen labels for l in", "def _make_batch_ids(self): batch_ids = [] # Choose classes and entries", ": Number of classes n_samples : Number of samples per", "[] for label, entry_ids in self.label_to_entry_ids.items(): if label not in", "sample n_samples entries from choosen labels for l in labels_choosen:", "if len(batch_ids) < self.batch_size: # Randomly sample remainder labels_choosen =", "batch_ids += random.sample(self.label_to_entry_ids[l], self.n_samples) if len(batch_ids) < self.batch_size: # Randomly", "batch_size no explaination needed step_size : Number of batches to", "labels_choosen = random.sample(self.labels_subset, self.n_classes) # Randomly sample n_samples entries from", "self.n_samples) if len(batch_ids) < self.batch_size: # Randomly sample remainder labels_choosen", "{l: None for l in labels_choosen} remaining_entry_ids = [] for", "assert len(self.labels_subset) >= n_classes, 'Too little labels have {} entries,", "classes n_samples : Number of samples per class *** If", "self.label_to_entry_ids[label] = [entry_id] # Subset the labels with more than", "a balanced batch sampler for label based datasets Args dataset_labels", "= [label for (label, entry_ids) in self.label_to_entry_ids.items() if len(entry_ids) >=", "class *** If batch_size > n_classes * n_samples, rest of", "batch will be randomly filled \"\"\" self.batch_size = batch_size self.steps", "n_samples : Number of samples per class *** If batch_size", "batch_ids def __iter__(self): self.count = 0 while self.count < self.steps:", ">= n_classes, 'Too little labels have {} entries, choose a", "random import torch.utils.data.sampler class BalancedBatchSampler(torch.utils.data.sampler.BatchSampler): def __init__( self, dataset_labels, batch_size=1,", "= random.sample(self.labels_subset, self.n_classes) # Randomly sample n_samples entries from choosen", "): \"\"\" Create a balanced batch sampler for label based", "the labels with more than n_samples entries self.labels_subset = [label", "len(batch_ids)) # Randomly shuffle batch ids batch_ids = random.sample(batch_ids, self.batch_size)", "# Randomly shuffle batch ids batch_ids = random.sample(batch_ids, self.batch_size) batch_ids", "Number of samples per class *** If batch_size > n_classes", "Choose classes and entries labels_choosen = random.sample(self.labels_subset, self.n_classes) # Randomly", "[entry_id] # Subset the labels with more than n_samples entries", "random.sample(remaining_entry_ids, self.batch_size - len(batch_ids)) # Randomly shuffle batch ids batch_ids", "n_classes : Number of classes n_samples : Number of samples", "+= random.sample(self.label_to_entry_ids[l], self.n_samples) if len(batch_ids) < self.batch_size: # Randomly sample", "else steps self.n_classes = n_classes self.n_samples = n_samples # Create", "in self.label_to_entry_ids.items(): if label not in labels_choosen: remaining_entry_ids += entry_ids", "n_classes, 'Too little labels have {} entries, choose a smaller", "labels have {} entries, choose a smaller n_classes or n_samples'.format(n_samples)", "labels_choosen = {l: None for l in labels_choosen} remaining_entry_ids =", "randomly filled \"\"\" self.batch_size = batch_size self.steps = len(dataset_labels) //", "if label not in labels_choosen: remaining_entry_ids += entry_ids batch_ids +=", "balanced batch sampler for label based datasets Args dataset_labels :", "rest of batch will be randomly filled \"\"\" self.batch_size =", "def __init__( self, dataset_labels, batch_size=1, steps=None, n_classes=0, n_samples=2 ): \"\"\"", "entries, choose a smaller n_classes or n_samples'.format(n_samples) def _make_batch_ids(self): batch_ids", ">= n_samples] assert len(self.labels_subset) >= n_classes, 'Too little labels have", "self.steps = len(dataset_labels) // batch_size if steps is None else", "None else steps self.n_classes = n_classes self.n_samples = n_samples #", "batch_size if steps is None else steps self.n_classes = n_classes", "samples per class *** If batch_size > n_classes * n_samples,", "// batch_size if steps is None else steps self.n_classes =", "random.sample(batch_ids, self.batch_size) batch_ids = torch.LongTensor(batch_ids) return batch_ids def __iter__(self): self.count", "entry_id, label in enumerate(dataset_labels): if label in self.label_to_entry_ids: self.label_to_entry_ids[label].append(entry_id) else:", "dataset_labels : Labels of every entry from a dataset (in", "sequence) batch_size : batch_size no explaination needed step_size : Number", "/ batch_size will be used) n_classes : Number of classes", "if label in self.label_to_entry_ids: self.label_to_entry_ids[label].append(entry_id) else: self.label_to_entry_ids[label] = [entry_id] #", "sample remainder labels_choosen = {l: None for l in labels_choosen}", "labels_choosen: remaining_entry_ids += entry_ids batch_ids += random.sample(remaining_entry_ids, self.batch_size - len(batch_ids))", "shuffle batch ids batch_ids = random.sample(batch_ids, self.batch_size) batch_ids = torch.LongTensor(batch_ids)", "if steps is None else steps self.n_classes = n_classes self.n_samples", "'Too little labels have {} entries, choose a smaller n_classes", "needed step_size : Number of batches to generate (if None,", "in self.label_to_entry_ids.items() if len(entry_ids) >= n_samples] assert len(self.labels_subset) >= n_classes,", "def __iter__(self): self.count = 0 while self.count < self.steps: self.count", "datasets Args dataset_labels : Labels of every entry from a", "ids batch_ids = random.sample(batch_ids, self.batch_size) batch_ids = torch.LongTensor(batch_ids) return batch_ids", "> n_classes * n_samples, rest of batch will be randomly", "l in labels_choosen: batch_ids += random.sample(self.label_to_entry_ids[l], self.n_samples) if len(batch_ids) <", "self.batch_size: # Randomly sample remainder labels_choosen = {l: None for", "self.n_classes = n_classes self.n_samples = n_samples # Create a label_to_entry_ids", "batch sampler for label based datasets Args dataset_labels : Labels", "n_samples=2 ): \"\"\" Create a balanced batch sampler for label", "len(entry_ids) >= n_samples] assert len(self.labels_subset) >= n_classes, 'Too little labels", "if len(entry_ids) >= n_samples] assert len(self.labels_subset) >= n_classes, 'Too little", "labels_choosen} remaining_entry_ids = [] for label, entry_ids in self.label_to_entry_ids.items(): if", "= random.sample(batch_ids, self.batch_size) batch_ids = torch.LongTensor(batch_ids) return batch_ids def __iter__(self):", "{} entries, choose a smaller n_classes or n_samples'.format(n_samples) def _make_batch_ids(self):", "of batch will be randomly filled \"\"\" self.batch_size = batch_size", "for label based datasets Args dataset_labels : Labels of every", "n_samples] assert len(self.labels_subset) >= n_classes, 'Too little labels have {}", ": batch_size no explaination needed step_size : Number of batches", "for l in labels_choosen} remaining_entry_ids = [] for label, entry_ids", "for label, entry_ids in self.label_to_entry_ids.items(): if label not in labels_choosen:", "+= random.sample(remaining_entry_ids, self.batch_size - len(batch_ids)) # Randomly shuffle batch ids", "import random import torch.utils.data.sampler class BalancedBatchSampler(torch.utils.data.sampler.BatchSampler): def __init__( self, dataset_labels,", "random.sample(self.labels_subset, self.n_classes) # Randomly sample n_samples entries from choosen labels", "batch_size will be used) n_classes : Number of classes n_samples", ": Number of samples per class *** If batch_size >", "# Subset the labels with more than n_samples entries self.labels_subset", "label not in labels_choosen: remaining_entry_ids += entry_ids batch_ids += random.sample(remaining_entry_ids,", "batch ids batch_ids = random.sample(batch_ids, self.batch_size) batch_ids = torch.LongTensor(batch_ids) return", "from a dataset (in the same sequence) batch_size : batch_size", "self.label_to_entry_ids = {} for entry_id, label in enumerate(dataset_labels): if label", "for entry_id, label in enumerate(dataset_labels): if label in self.label_to_entry_ids: self.label_to_entry_ids[label].append(entry_id)", "self.label_to_entry_ids.items() if len(entry_ids) >= n_samples] assert len(self.labels_subset) >= n_classes, 'Too", "labels for l in labels_choosen: batch_ids += random.sample(self.label_to_entry_ids[l], self.n_samples) if", "batch_ids = [] # Choose classes and entries labels_choosen =", "self.batch_size - len(batch_ids)) # Randomly shuffle batch ids batch_ids =", "Create a label_to_entry_ids table self.label_to_entry_ids = {} for entry_id, label", "self.labels_subset = [label for (label, entry_ids) in self.label_to_entry_ids.items() if len(entry_ids)", "= batch_size self.steps = len(dataset_labels) // batch_size if steps is", "__iter__(self): self.count = 0 while self.count < self.steps: self.count +=", "n_classes * n_samples, rest of batch will be randomly filled", "* n_samples, rest of batch will be randomly filled \"\"\"", "little labels have {} entries, choose a smaller n_classes or", "label, entry_ids in self.label_to_entry_ids.items(): if label not in labels_choosen: remaining_entry_ids", "BalancedBatchSampler(torch.utils.data.sampler.BatchSampler): def __init__( self, dataset_labels, batch_size=1, steps=None, n_classes=0, n_samples=2 ):", "the same sequence) batch_size : batch_size no explaination needed step_size", "steps is None else steps self.n_classes = n_classes self.n_samples =", "table self.label_to_entry_ids = {} for entry_id, label in enumerate(dataset_labels): if", "import torch.utils.data.sampler class BalancedBatchSampler(torch.utils.data.sampler.BatchSampler): def __init__( self, dataset_labels, batch_size=1, steps=None,", "_make_batch_ids(self): batch_ids = [] # Choose classes and entries labels_choosen", "Randomly sample remainder labels_choosen = {l: None for l in", "Randomly shuffle batch ids batch_ids = random.sample(batch_ids, self.batch_size) batch_ids =", "step_size : Number of batches to generate (if None, then", "classes and entries labels_choosen = random.sample(self.labels_subset, self.n_classes) # Randomly sample", "batch_ids = random.sample(batch_ids, self.batch_size) batch_ids = torch.LongTensor(batch_ids) return batch_ids def", "will be used) n_classes : Number of classes n_samples :", "dataset (in the same sequence) batch_size : batch_size no explaination", "Subset the labels with more than n_samples entries self.labels_subset =", "then dataset_size / batch_size will be used) n_classes : Number", "n_classes self.n_samples = n_samples # Create a label_to_entry_ids table self.label_to_entry_ids", "n_samples # Create a label_to_entry_ids table self.label_to_entry_ids = {} for", "entry from a dataset (in the same sequence) batch_size :", "remaining_entry_ids = [] for label, entry_ids in self.label_to_entry_ids.items(): if label", "n_samples'.format(n_samples) def _make_batch_ids(self): batch_ids = [] # Choose classes and", "used) n_classes : Number of classes n_samples : Number of", "label in self.label_to_entry_ids: self.label_to_entry_ids[label].append(entry_id) else: self.label_to_entry_ids[label] = [entry_id] # Subset", "= len(dataset_labels) // batch_size if steps is None else steps", "or n_samples'.format(n_samples) def _make_batch_ids(self): batch_ids = [] # Choose classes", "= [] for label, entry_ids in self.label_to_entry_ids.items(): if label not", "self.label_to_entry_ids.items(): if label not in labels_choosen: remaining_entry_ids += entry_ids batch_ids", "__init__( self, dataset_labels, batch_size=1, steps=None, n_classes=0, n_samples=2 ): \"\"\" Create", "a label_to_entry_ids table self.label_to_entry_ids = {} for entry_id, label in", "for (label, entry_ids) in self.label_to_entry_ids.items() if len(entry_ids) >= n_samples] assert", "= {l: None for l in labels_choosen} remaining_entry_ids = []", "else: self.label_to_entry_ids[label] = [entry_id] # Subset the labels with more", "n_classes or n_samples'.format(n_samples) def _make_batch_ids(self): batch_ids = [] # Choose", "- len(batch_ids)) # Randomly shuffle batch ids batch_ids = random.sample(batch_ids,", "of samples per class *** If batch_size > n_classes *", ": Labels of every entry from a dataset (in the", "labels with more than n_samples entries self.labels_subset = [label for", "dataset_labels, batch_size=1, steps=None, n_classes=0, n_samples=2 ): \"\"\" Create a balanced", "of every entry from a dataset (in the same sequence)", "entry_ids) in self.label_to_entry_ids.items() if len(entry_ids) >= n_samples] assert len(self.labels_subset) >=", "not in labels_choosen: remaining_entry_ids += entry_ids batch_ids += random.sample(remaining_entry_ids, self.batch_size", "Labels of every entry from a dataset (in the same", "steps=None, n_classes=0, n_samples=2 ): \"\"\" Create a balanced batch sampler", "# Randomly sample remainder labels_choosen = {l: None for l", "entries labels_choosen = random.sample(self.labels_subset, self.n_classes) # Randomly sample n_samples entries", "{} for entry_id, label in enumerate(dataset_labels): if label in self.label_to_entry_ids:", "than n_samples entries self.labels_subset = [label for (label, entry_ids) in", "sampler for label based datasets Args dataset_labels : Labels of", "be randomly filled \"\"\" self.batch_size = batch_size self.steps = len(dataset_labels)", "self.steps: self.count += 1 yield self._make_batch_ids() def __len__(self): return self.steps", "(in the same sequence) batch_size : batch_size no explaination needed", "(if None, then dataset_size / batch_size will be used) n_classes", "labels_choosen: batch_ids += random.sample(self.label_to_entry_ids[l], self.n_samples) if len(batch_ids) < self.batch_size: #", "None, then dataset_size / batch_size will be used) n_classes :", "len(batch_ids) < self.batch_size: # Randomly sample remainder labels_choosen = {l:", "every entry from a dataset (in the same sequence) batch_size", "entries self.labels_subset = [label for (label, entry_ids) in self.label_to_entry_ids.items() if", "be used) n_classes : Number of classes n_samples : Number", "steps self.n_classes = n_classes self.n_samples = n_samples # Create a", "torch.utils.data.sampler class BalancedBatchSampler(torch.utils.data.sampler.BatchSampler): def __init__( self, dataset_labels, batch_size=1, steps=None, n_classes=0,", "batch_size : batch_size no explaination needed step_size : Number of", "l in labels_choosen} remaining_entry_ids = [] for label, entry_ids in", "torch.LongTensor(batch_ids) return batch_ids def __iter__(self): self.count = 0 while self.count", "while self.count < self.steps: self.count += 1 yield self._make_batch_ids() def", "# Randomly sample n_samples entries from choosen labels for l", "batch_size=1, steps=None, n_classes=0, n_samples=2 ): \"\"\" Create a balanced batch", "*** If batch_size > n_classes * n_samples, rest of batch", "n_samples, rest of batch will be randomly filled \"\"\" self.batch_size", "dataset_size / batch_size will be used) n_classes : Number of", "batch_ids += random.sample(remaining_entry_ids, self.batch_size - len(batch_ids)) # Randomly shuffle batch", "in labels_choosen} remaining_entry_ids = [] for label, entry_ids in self.label_to_entry_ids.items():", "remaining_entry_ids += entry_ids batch_ids += random.sample(remaining_entry_ids, self.batch_size - len(batch_ids)) #", "= [] # Choose classes and entries labels_choosen = random.sample(self.labels_subset,", "n_samples entries from choosen labels for l in labels_choosen: batch_ids", "= [entry_id] # Subset the labels with more than n_samples", "Args dataset_labels : Labels of every entry from a dataset", "with more than n_samples entries self.labels_subset = [label for (label,", "label in enumerate(dataset_labels): if label in self.label_to_entry_ids: self.label_to_entry_ids[label].append(entry_id) else: self.label_to_entry_ids[label]", "self.label_to_entry_ids: self.label_to_entry_ids[label].append(entry_id) else: self.label_to_entry_ids[label] = [entry_id] # Subset the labels", "in labels_choosen: remaining_entry_ids += entry_ids batch_ids += random.sample(remaining_entry_ids, self.batch_size -", "of classes n_samples : Number of samples per class ***", "based datasets Args dataset_labels : Labels of every entry from", "self.count = 0 while self.count < self.steps: self.count += 1", "= n_classes self.n_samples = n_samples # Create a label_to_entry_ids table", "self, dataset_labels, batch_size=1, steps=None, n_classes=0, n_samples=2 ): \"\"\" Create a", "self.n_classes) # Randomly sample n_samples entries from choosen labels for", "Create a balanced batch sampler for label based datasets Args", "filled \"\"\" self.batch_size = batch_size self.steps = len(dataset_labels) // batch_size", "(label, entry_ids) in self.label_to_entry_ids.items() if len(entry_ids) >= n_samples] assert len(self.labels_subset)", "self.batch_size = batch_size self.steps = len(dataset_labels) // batch_size if steps", "Number of classes n_samples : Number of samples per class", "n_classes=0, n_samples=2 ): \"\"\" Create a balanced batch sampler for", "[] # Choose classes and entries labels_choosen = random.sample(self.labels_subset, self.n_classes)", "None for l in labels_choosen} remaining_entry_ids = [] for label,", "return batch_ids def __iter__(self): self.count = 0 while self.count <", "entry_ids batch_ids += random.sample(remaining_entry_ids, self.batch_size - len(batch_ids)) # Randomly shuffle", "self.n_samples = n_samples # Create a label_to_entry_ids table self.label_to_entry_ids =", "= torch.LongTensor(batch_ids) return batch_ids def __iter__(self): self.count = 0 while", "0 while self.count < self.steps: self.count += 1 yield self._make_batch_ids()", "batches to generate (if None, then dataset_size / batch_size will" ]
[ "string containing the principal name to process :return: a string", "(nimbus/c6501.ambari.apache.org@EXAMPLE.COM) returns just the primary component (nimbus) :param normalized_principal_name: a", "specific language governing permissions and limitations under the License. Ambari", "agreements. See the NOTICE file distributed with this work for", "a string containing the principal name to process :return: a", "Unless required by applicable law or agreed to in writing,", "to the Apache Software Foundation (ASF) under one or more", "by applicable law or agreed to in writing, software distributed", "software distributed under the License is distributed on an \"AS", "distributed under the License is distributed on an \"AS IS\"", "and limitations under the License. Ambari Agent \"\"\" import re", "None if normalized_principal_name: match = re.match(r\"([^/@]+)(?:/[^@])?(?:@.*)?\", normalized_principal_name) if match: bare_principal", "Foundation (ASF) under one or more contributor license agreements. See", "CONDITIONS OF ANY KIND, either express or implied. See the", "the License. Ambari Agent \"\"\" import re __all__ = [\"get_bare_principal\"]", "more contributor license agreements. See the NOTICE file distributed with", "Version 2.0 (the \"License\"); you may not use this file", "Ambari Agent \"\"\" import re __all__ = [\"get_bare_principal\"] def get_bare_principal(normalized_principal_name):", "[\"get_bare_principal\"] def get_bare_principal(normalized_principal_name): \"\"\" Given a normalized principal name (nimbus/c6501.ambari.apache.org@EXAMPLE.COM)", "writing, software distributed under the License is distributed on an", "Licensed to the Apache Software Foundation (ASF) under one or", "#!/usr/bin/env python \"\"\" Licensed to the Apache Software Foundation (ASF)", "the Apache Software Foundation (ASF) under one or more contributor", "component (nimbus) :param normalized_principal_name: a string containing the principal name", "See the NOTICE file distributed with this work for additional", "not use this file except in compliance with the License.", "NOTICE file distributed with this work for additional information regarding", "Apache License, Version 2.0 (the \"License\"); you may not use", "2.0 (the \"License\"); you may not use this file except", "copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "express or implied. See the License for the specific language", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "Given a normalized principal name (nimbus/c6501.ambari.apache.org@EXAMPLE.COM) returns just the primary", "to you under the Apache License, Version 2.0 (the \"License\");", "in compliance with the License. You may obtain a copy", "the primary component (nimbus) :param normalized_principal_name: a string containing the", "of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "or None if not valid \"\"\" bare_principal = None if", "Agent \"\"\" import re __all__ = [\"get_bare_principal\"] def get_bare_principal(normalized_principal_name): \"\"\"", "you may not use this file except in compliance with", "a string containing the primary component value or None if", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "the License. You may obtain a copy of the License", "agreed to in writing, software distributed under the License is", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "\"\"\" Licensed to the Apache Software Foundation (ASF) under one", "a normalized principal name (nimbus/c6501.ambari.apache.org@EXAMPLE.COM) returns just the primary component", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "use this file except in compliance with the License. You", "Apache Software Foundation (ASF) under one or more contributor license", "the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "process :return: a string containing the primary component value or", "ANY KIND, either express or implied. See the License for", "limitations under the License. Ambari Agent \"\"\" import re __all__", "http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless", "obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required", "one or more contributor license agreements. See the NOTICE file", "information regarding copyright ownership. The ASF licenses this file to", "normalized_principal_name: match = re.match(r\"([^/@]+)(?:/[^@])?(?:@.*)?\", normalized_principal_name) if match: bare_principal = match.group(1)", "containing the principal name to process :return: a string containing", "either express or implied. See the License for the specific", "ASF licenses this file to you under the Apache License,", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "under the License is distributed on an \"AS IS\" BASIS,", "\"License\"); you may not use this file except in compliance", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "with the License. You may obtain a copy of the", "The ASF licenses this file to you under the Apache", "file distributed with this work for additional information regarding copyright", "License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "contributor license agreements. See the NOTICE file distributed with this", "License for the specific language governing permissions and limitations under", "not valid \"\"\" bare_principal = None if normalized_principal_name: match =", "= re.match(r\"([^/@]+)(?:/[^@])?(?:@.*)?\", normalized_principal_name) if match: bare_principal = match.group(1) return bare_principal", "(nimbus) :param normalized_principal_name: a string containing the principal name to", ":param normalized_principal_name: a string containing the principal name to process", "\"\"\" bare_principal = None if normalized_principal_name: match = re.match(r\"([^/@]+)(?:/[^@])?(?:@.*)?\", normalized_principal_name)", "if not valid \"\"\" bare_principal = None if normalized_principal_name: match", "this file except in compliance with the License. You may", "bare_principal = None if normalized_principal_name: match = re.match(r\"([^/@]+)(?:/[^@])?(?:@.*)?\", normalized_principal_name) if", ":return: a string containing the primary component value or None", "additional information regarding copyright ownership. The ASF licenses this file", "License. Ambari Agent \"\"\" import re __all__ = [\"get_bare_principal\"] def", "(the \"License\"); you may not use this file except in", "value or None if not valid \"\"\" bare_principal = None", "__all__ = [\"get_bare_principal\"] def get_bare_principal(normalized_principal_name): \"\"\" Given a normalized principal", "applicable law or agreed to in writing, software distributed under", "\"\"\" Given a normalized principal name (nimbus/c6501.ambari.apache.org@EXAMPLE.COM) returns just the", "def get_bare_principal(normalized_principal_name): \"\"\" Given a normalized principal name (nimbus/c6501.ambari.apache.org@EXAMPLE.COM) returns", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0", "the specific language governing permissions and limitations under the License.", "component value or None if not valid \"\"\" bare_principal =", "None if not valid \"\"\" bare_principal = None if normalized_principal_name:", "the Apache License, Version 2.0 (the \"License\"); you may not", "file except in compliance with the License. You may obtain", "except in compliance with the License. You may obtain a", "or implied. See the License for the specific language governing", "KIND, either express or implied. See the License for the", "this work for additional information regarding copyright ownership. The ASF", "to in writing, software distributed under the License is distributed", "normalized principal name (nimbus/c6501.ambari.apache.org@EXAMPLE.COM) returns just the primary component (nimbus)", "or agreed to in writing, software distributed under the License", "this file to you under the Apache License, Version 2.0", "string containing the primary component value or None if not", "law or agreed to in writing, software distributed under the", "OR CONDITIONS OF ANY KIND, either express or implied. See", "if normalized_principal_name: match = re.match(r\"([^/@]+)(?:/[^@])?(?:@.*)?\", normalized_principal_name) if match: bare_principal =", "primary component value or None if not valid \"\"\" bare_principal", "compliance with the License. You may obtain a copy of", "under one or more contributor license agreements. See the NOTICE", "license agreements. See the NOTICE file distributed with this work", "principal name (nimbus/c6501.ambari.apache.org@EXAMPLE.COM) returns just the primary component (nimbus) :param", "OF ANY KIND, either express or implied. See the License", "name (nimbus/c6501.ambari.apache.org@EXAMPLE.COM) returns just the primary component (nimbus) :param normalized_principal_name:", "normalized_principal_name: a string containing the principal name to process :return:", "under the Apache License, Version 2.0 (the \"License\"); you may", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "under the License. Ambari Agent \"\"\" import re __all__ =", "name to process :return: a string containing the primary component", "primary component (nimbus) :param normalized_principal_name: a string containing the principal", "re __all__ = [\"get_bare_principal\"] def get_bare_principal(normalized_principal_name): \"\"\" Given a normalized", "= [\"get_bare_principal\"] def get_bare_principal(normalized_principal_name): \"\"\" Given a normalized principal name", "returns just the primary component (nimbus) :param normalized_principal_name: a string", "with this work for additional information regarding copyright ownership. The", "License, Version 2.0 (the \"License\"); you may not use this", "to process :return: a string containing the primary component value", "you under the Apache License, Version 2.0 (the \"License\"); you", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "the primary component value or None if not valid \"\"\"", "valid \"\"\" bare_principal = None if normalized_principal_name: match = re.match(r\"([^/@]+)(?:/[^@])?(?:@.*)?\",", "for the specific language governing permissions and limitations under the", "See the License for the specific language governing permissions and", "a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by", "governing permissions and limitations under the License. Ambari Agent \"\"\"", "(ASF) under one or more contributor license agreements. See the", "licenses this file to you under the Apache License, Version", "work for additional information regarding copyright ownership. The ASF licenses", "ownership. The ASF licenses this file to you under the", "regarding copyright ownership. The ASF licenses this file to you", "file to you under the Apache License, Version 2.0 (the", "License. You may obtain a copy of the License at", "match = re.match(r\"([^/@]+)(?:/[^@])?(?:@.*)?\", normalized_principal_name) if match: bare_principal = match.group(1) return", "for additional information regarding copyright ownership. The ASF licenses this", "containing the primary component value or None if not valid", "the License for the specific language governing permissions and limitations", "may not use this file except in compliance with the", "in writing, software distributed under the License is distributed on", "language governing permissions and limitations under the License. Ambari Agent", "required by applicable law or agreed to in writing, software", "principal name to process :return: a string containing the primary", "implied. See the License for the specific language governing permissions", "\"\"\" import re __all__ = [\"get_bare_principal\"] def get_bare_principal(normalized_principal_name): \"\"\" Given", "import re __all__ = [\"get_bare_principal\"] def get_bare_principal(normalized_principal_name): \"\"\" Given a", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "or more contributor license agreements. See the NOTICE file distributed", "permissions and limitations under the License. Ambari Agent \"\"\" import", "the principal name to process :return: a string containing the", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "copyright ownership. The ASF licenses this file to you under", "the NOTICE file distributed with this work for additional information", "python \"\"\" Licensed to the Apache Software Foundation (ASF) under", "Software Foundation (ASF) under one or more contributor license agreements.", "= None if normalized_principal_name: match = re.match(r\"([^/@]+)(?:/[^@])?(?:@.*)?\", normalized_principal_name) if match:", "just the primary component (nimbus) :param normalized_principal_name: a string containing", "get_bare_principal(normalized_principal_name): \"\"\" Given a normalized principal name (nimbus/c6501.ambari.apache.org@EXAMPLE.COM) returns just", "distributed with this work for additional information regarding copyright ownership." ]
[ "return float(n) if re.match(r\"^[0-9\\.]+$\", n) else n def to_columm(line): return", "to_number(n): return float(n) if re.match(r\"^[0-9\\.]+$\", n) else n def to_columm(line):", "k = 5 csv_k = [[] for i in range(k)]", "data: train += data score = calculate_score(train, test) scores.append(score) print(\"score", "row in rows: data.append(row[0:4]) labels.append(row[4]) return (data, labels) def calculate_score(train,", "score = calculate_score(train, test) scores.append(score) print(\"score = \", scores) print(\"avg", "labels.append(row[4]) return (data, labels) def calculate_score(train, test): train_data, train_label =", "def split(rows): data = [] labels = [] for row", "calculate_score(train, test) scores.append(score) print(\"score = \", scores) print(\"avg = \",", "train = [] for data in csv_k: if test !=", "= [] for data in csv_k: if test != data:", "random.shuffle(csv) k = 5 csv_k = [[] for i in", "from sklearn import svm, metrics import random import re def", "test): train_data, train_label = split(train) test_data, test_label = split(test) classifier", "% k].append(csv[i]) for test in csv_k: train = [] for", "rows: data.append(row[0:4]) labels.append(row[4]) return (data, labels) def calculate_score(train, test): train_data,", "train += data score = calculate_score(train, test) scores.append(score) print(\"score =", "= [] for row in rows: data.append(row[0:4]) labels.append(row[4]) return (data,", "sklearn import svm, metrics import random import re def split(rows):", "csv = list(map(to_columm, lines)) del csv[0] random.shuffle(csv) k = 5", "[[] for i in range(k)] scores = [] for i", "encoding=\"utf-8\").read().split(\"\\n\") csv = list(map(to_columm, lines)) del csv[0] random.shuffle(csv) k =", "data score = calculate_score(train, test) scores.append(score) print(\"score = \", scores)", "in csv_k: train = [] for data in csv_k: if", "open(\"iris.csv\", \"r\", encoding=\"utf-8\").read().split(\"\\n\") csv = list(map(to_columm, lines)) del csv[0] random.shuffle(csv)", "!= data: train += data score = calculate_score(train, test) scores.append(score)", "[] labels = [] for row in rows: data.append(row[0:4]) labels.append(row[4])", "[] for i in range(len(csv)): csv_k[i % k].append(csv[i]) for test", "re def split(rows): data = [] labels = [] for", "if re.match(r\"^[0-9\\.]+$\", n) else n def to_columm(line): return list(map(to_number, line.strip().split(\",\")))", "= [] for i in range(len(csv)): csv_k[i % k].append(csv[i]) for", "split(rows): data = [] labels = [] for row in", "test_label = split(test) classifier = svm.SVC() classifier.fit(train_data, train_label) predict =", "train_label = split(train) test_data, test_label = split(test) classifier = svm.SVC()", "= split(train) test_data, test_label = split(test) classifier = svm.SVC() classifier.fit(train_data,", "= [[] for i in range(k)] scores = [] for", "in range(k)] scores = [] for i in range(len(csv)): csv_k[i", "csv[0] random.shuffle(csv) k = 5 csv_k = [[] for i", "k].append(csv[i]) for test in csv_k: train = [] for data", "test) scores.append(score) print(\"score = \", scores) print(\"avg = \", sum(scores)", "train_data, train_label = split(train) test_data, test_label = split(test) classifier =", "range(k)] scores = [] for i in range(len(csv)): csv_k[i %", "def to_columm(line): return list(map(to_number, line.strip().split(\",\"))) lines = open(\"iris.csv\", \"r\", encoding=\"utf-8\").read().split(\"\\n\")", "def to_number(n): return float(n) if re.match(r\"^[0-9\\.]+$\", n) else n def", "labels) def calculate_score(train, test): train_data, train_label = split(train) test_data, test_label", "= 5 csv_k = [[] for i in range(k)] scores", "test in csv_k: train = [] for data in csv_k:", "list(map(to_columm, lines)) del csv[0] random.shuffle(csv) k = 5 csv_k =", "n) else n def to_columm(line): return list(map(to_number, line.strip().split(\",\"))) lines =", "range(len(csv)): csv_k[i % k].append(csv[i]) for test in csv_k: train =", "split(train) test_data, test_label = split(test) classifier = svm.SVC() classifier.fit(train_data, train_label)", "print(\"score = \", scores) print(\"avg = \", sum(scores) / len(scores))", "float(n) if re.match(r\"^[0-9\\.]+$\", n) else n def to_columm(line): return list(map(to_number,", "for i in range(k)] scores = [] for i in", "classifier.predict(test_data) return metrics.accuracy_score(test_label, predict) def to_number(n): return float(n) if re.match(r\"^[0-9\\.]+$\",", "[] for data in csv_k: if test != data: train", "5 csv_k = [[] for i in range(k)] scores =", "= split(test) classifier = svm.SVC() classifier.fit(train_data, train_label) predict = classifier.predict(test_data)", "predict) def to_number(n): return float(n) if re.match(r\"^[0-9\\.]+$\", n) else n", "metrics import random import re def split(rows): data = []", "scores.append(score) print(\"score = \", scores) print(\"avg = \", sum(scores) /", "import re def split(rows): data = [] labels = []", "return (data, labels) def calculate_score(train, test): train_data, train_label = split(train)", "test_data, test_label = split(test) classifier = svm.SVC() classifier.fit(train_data, train_label) predict", "else n def to_columm(line): return list(map(to_number, line.strip().split(\",\"))) lines = open(\"iris.csv\",", "for data in csv_k: if test != data: train +=", "= calculate_score(train, test) scores.append(score) print(\"score = \", scores) print(\"avg =", "i in range(len(csv)): csv_k[i % k].append(csv[i]) for test in csv_k:", "csv_k: train = [] for data in csv_k: if test", "split(test) classifier = svm.SVC() classifier.fit(train_data, train_label) predict = classifier.predict(test_data) return", "import svm, metrics import random import re def split(rows): data", "to_columm(line): return list(map(to_number, line.strip().split(\",\"))) lines = open(\"iris.csv\", \"r\", encoding=\"utf-8\").read().split(\"\\n\") csv", "line.strip().split(\",\"))) lines = open(\"iris.csv\", \"r\", encoding=\"utf-8\").read().split(\"\\n\") csv = list(map(to_columm, lines))", "predict = classifier.predict(test_data) return metrics.accuracy_score(test_label, predict) def to_number(n): return float(n)", "list(map(to_number, line.strip().split(\",\"))) lines = open(\"iris.csv\", \"r\", encoding=\"utf-8\").read().split(\"\\n\") csv = list(map(to_columm,", "svm.SVC() classifier.fit(train_data, train_label) predict = classifier.predict(test_data) return metrics.accuracy_score(test_label, predict) def", "test != data: train += data score = calculate_score(train, test)", "= open(\"iris.csv\", \"r\", encoding=\"utf-8\").read().split(\"\\n\") csv = list(map(to_columm, lines)) del csv[0]", "for row in rows: data.append(row[0:4]) labels.append(row[4]) return (data, labels) def", "classifier.fit(train_data, train_label) predict = classifier.predict(test_data) return metrics.accuracy_score(test_label, predict) def to_number(n):", "classifier = svm.SVC() classifier.fit(train_data, train_label) predict = classifier.predict(test_data) return metrics.accuracy_score(test_label,", "csv_k: if test != data: train += data score =", "def calculate_score(train, test): train_data, train_label = split(train) test_data, test_label =", "svm, metrics import random import re def split(rows): data =", "= [] labels = [] for row in rows: data.append(row[0:4])", "random import re def split(rows): data = [] labels =", "= classifier.predict(test_data) return metrics.accuracy_score(test_label, predict) def to_number(n): return float(n) if", "return metrics.accuracy_score(test_label, predict) def to_number(n): return float(n) if re.match(r\"^[0-9\\.]+$\", n)", "for i in range(len(csv)): csv_k[i % k].append(csv[i]) for test in", "data = [] labels = [] for row in rows:", "del csv[0] random.shuffle(csv) k = 5 csv_k = [[] for", "in rows: data.append(row[0:4]) labels.append(row[4]) return (data, labels) def calculate_score(train, test):", "if test != data: train += data score = calculate_score(train,", "+= data score = calculate_score(train, test) scores.append(score) print(\"score = \",", "[] for row in rows: data.append(row[0:4]) labels.append(row[4]) return (data, labels)", "lines = open(\"iris.csv\", \"r\", encoding=\"utf-8\").read().split(\"\\n\") csv = list(map(to_columm, lines)) del", "import random import re def split(rows): data = [] labels", "lines)) del csv[0] random.shuffle(csv) k = 5 csv_k = [[]", "csv_k = [[] for i in range(k)] scores = []", "n def to_columm(line): return list(map(to_number, line.strip().split(\",\"))) lines = open(\"iris.csv\", \"r\",", "\"r\", encoding=\"utf-8\").read().split(\"\\n\") csv = list(map(to_columm, lines)) del csv[0] random.shuffle(csv) k", "i in range(k)] scores = [] for i in range(len(csv)):", "scores = [] for i in range(len(csv)): csv_k[i % k].append(csv[i])", "= list(map(to_columm, lines)) del csv[0] random.shuffle(csv) k = 5 csv_k", "<filename>04/cross_validation.01.py<gh_stars>1-10 from sklearn import svm, metrics import random import re", "return list(map(to_number, line.strip().split(\",\"))) lines = open(\"iris.csv\", \"r\", encoding=\"utf-8\").read().split(\"\\n\") csv =", "in csv_k: if test != data: train += data score", "data.append(row[0:4]) labels.append(row[4]) return (data, labels) def calculate_score(train, test): train_data, train_label", "(data, labels) def calculate_score(train, test): train_data, train_label = split(train) test_data,", "csv_k[i % k].append(csv[i]) for test in csv_k: train = []", "data in csv_k: if test != data: train += data", "metrics.accuracy_score(test_label, predict) def to_number(n): return float(n) if re.match(r\"^[0-9\\.]+$\", n) else", "in range(len(csv)): csv_k[i % k].append(csv[i]) for test in csv_k: train", "calculate_score(train, test): train_data, train_label = split(train) test_data, test_label = split(test)", "= svm.SVC() classifier.fit(train_data, train_label) predict = classifier.predict(test_data) return metrics.accuracy_score(test_label, predict)", "re.match(r\"^[0-9\\.]+$\", n) else n def to_columm(line): return list(map(to_number, line.strip().split(\",\"))) lines", "labels = [] for row in rows: data.append(row[0:4]) labels.append(row[4]) return", "for test in csv_k: train = [] for data in", "train_label) predict = classifier.predict(test_data) return metrics.accuracy_score(test_label, predict) def to_number(n): return" ]
[ "\"84edd1cd6291f6686638225fcbaff970ae36da006efabf2228255c2127b2290c\", deps = [ \"@junit_junit\", \"@org_scala_lang_scala_library\", \"@org_scala_sbt_test_interface\", \"@org_specs2_specs2_core_2_12\" ], )", "\"org.specs2:specs2-common_2.12:4.8.3\", artifact_sha256 = \"3b08fecb9e21d3903e48b62cd95c19ea9253d466e03fd4cf9dc9227e7c368708\", srcjar_sha256 = \"b2f148c75d3939b3cd0d58afddd74a8ce03077bb3ccdc93dae55bd9c3993e9c3\", deps = [", "<gh_stars>1-10 load(\"@wix_oss_infra//:import_external.bzl\", import_external = \"safe_wix_scala_maven_import_external\") def dependencies(): import_external( name =", "[ \"@org_scala_lang_scala_library\", \"@org_specs2_specs2_common_2_12\" ], ) import_external( name = \"org_specs2_specs2_core_2_12\", artifact", "= \"5d7ad2c0b0bc142ea064edb7a1ea75ab7b17ad37e1a621ac7e578823845098e8\", srcjar_sha256 = \"84edd1cd6291f6686638225fcbaff970ae36da006efabf2228255c2127b2290c\", deps = [ \"@junit_junit\", \"@org_scala_lang_scala_library\",", "= [ \"@org_scala_lang_modules_scala_parser_combinators_2_12\", \"@org_scala_lang_modules_scala_xml_2_12\", \"@org_scala_lang_scala_library\", \"@org_scala_lang_scala_reflect\", \"@org_specs2_specs2_fp_2_12\" ], ) import_external(", "artifact = \"org.specs2:specs2-matcher_2.12:4.8.3\", artifact_sha256 = \"aadf27b6d015572b2e3842627c09bf0797153dbb329262ea3bcbbce129d51ad8\", srcjar_sha256 = \"01251acc28219aa17aabcb9a26a84e1871aa64980d335cd8f83c2bcea6f4f1be\", deps", "name = \"org_specs2_specs2_matcher_2_12\", artifact = \"org.specs2:specs2-matcher_2.12:4.8.3\", artifact_sha256 = \"aadf27b6d015572b2e3842627c09bf0797153dbb329262ea3bcbbce129d51ad8\", srcjar_sha256", "= \"b2f148c75d3939b3cd0d58afddd74a8ce03077bb3ccdc93dae55bd9c3993e9c3\", deps = [ \"@org_scala_lang_modules_scala_parser_combinators_2_12\", \"@org_scala_lang_modules_scala_xml_2_12\", \"@org_scala_lang_scala_library\", \"@org_scala_lang_scala_reflect\", \"@org_specs2_specs2_fp_2_12\"", "[ \"@org_scala_lang_scala_library\" ], ) import_external( name = \"org_specs2_specs2_common_2_12\", artifact =", "= \"01251acc28219aa17aabcb9a26a84e1871aa64980d335cd8f83c2bcea6f4f1be\", deps = [ \"@org_scala_lang_scala_library\", \"@org_specs2_specs2_common_2_12\" ], ) import_external(", "name = \"org_specs2_specs2_junit_2_12\", artifact = \"org.specs2:specs2-junit_2.12:4.8.3\", artifact_sha256 = \"5d7ad2c0b0bc142ea064edb7a1ea75ab7b17ad37e1a621ac7e578823845098e8\", srcjar_sha256", "], ) import_external( name = \"org_specs2_specs2_core_2_12\", artifact = \"org.specs2:specs2-core_2.12:4.8.3\", artifact_sha256", "= \"org.specs2:specs2-core_2.12:4.8.3\", artifact_sha256 = \"f73f32156a711a4e83e696dc83e269c5a165d62cc3dd7c652617cb03d140d063\", srcjar_sha256 = \"0e3cebfc7410051b70e627e35f13978add3d061b8f1233741f9b397638f193e9\", deps =", "= \"3b08fecb9e21d3903e48b62cd95c19ea9253d466e03fd4cf9dc9227e7c368708\", srcjar_sha256 = \"b2f148c75d3939b3cd0d58afddd74a8ce03077bb3ccdc93dae55bd9c3993e9c3\", deps = [ \"@org_scala_lang_modules_scala_parser_combinators_2_12\", \"@org_scala_lang_modules_scala_xml_2_12\",", "= \"org_specs2_specs2_common_2_12\", artifact = \"org.specs2:specs2-common_2.12:4.8.3\", artifact_sha256 = \"3b08fecb9e21d3903e48b62cd95c19ea9253d466e03fd4cf9dc9227e7c368708\", srcjar_sha256 =", "\"org_specs2_specs2_junit_2_12\", artifact = \"org.specs2:specs2-junit_2.12:4.8.3\", artifact_sha256 = \"5d7ad2c0b0bc142ea064edb7a1ea75ab7b17ad37e1a621ac7e578823845098e8\", srcjar_sha256 = \"84edd1cd6291f6686638225fcbaff970ae36da006efabf2228255c2127b2290c\",", "deps = [ \"@org_scala_lang_modules_scala_parser_combinators_2_12\", \"@org_scala_lang_modules_scala_xml_2_12\", \"@org_scala_lang_scala_library\", \"@org_scala_lang_scala_reflect\", \"@org_specs2_specs2_fp_2_12\" ], )", "\"@org_specs2_specs2_common_2_12\", \"@org_specs2_specs2_matcher_2_12\" ], ) import_external( name = \"org_specs2_specs2_junit_2_12\", artifact =", "import_external = \"safe_wix_scala_maven_import_external\") def dependencies(): import_external( name = \"org_specs2_specs2_fp_2_12\", artifact", "\"0e3cebfc7410051b70e627e35f13978add3d061b8f1233741f9b397638f193e9\", deps = [ \"@org_scala_lang_scala_library\", \"@org_scala_sbt_test_interface\", \"@org_specs2_specs2_common_2_12\", \"@org_specs2_specs2_matcher_2_12\" ], )", "], ) import_external( name = \"org_specs2_specs2_matcher_2_12\", artifact = \"org.specs2:specs2-matcher_2.12:4.8.3\", artifact_sha256", "deps = [ \"@org_scala_lang_scala_library\" ], ) import_external( name = \"org_specs2_specs2_common_2_12\",", "\"@org_scala_lang_modules_scala_parser_combinators_2_12\", \"@org_scala_lang_modules_scala_xml_2_12\", \"@org_scala_lang_scala_library\", \"@org_scala_lang_scala_reflect\", \"@org_specs2_specs2_fp_2_12\" ], ) import_external( name =", "srcjar_sha256 = \"01251acc28219aa17aabcb9a26a84e1871aa64980d335cd8f83c2bcea6f4f1be\", deps = [ \"@org_scala_lang_scala_library\", \"@org_specs2_specs2_common_2_12\" ], )", "= \"aadf27b6d015572b2e3842627c09bf0797153dbb329262ea3bcbbce129d51ad8\", srcjar_sha256 = \"01251acc28219aa17aabcb9a26a84e1871aa64980d335cd8f83c2bcea6f4f1be\", deps = [ \"@org_scala_lang_scala_library\", \"@org_specs2_specs2_common_2_12\"", "import_external( name = \"org_specs2_specs2_common_2_12\", artifact = \"org.specs2:specs2-common_2.12:4.8.3\", artifact_sha256 = \"3b08fecb9e21d3903e48b62cd95c19ea9253d466e03fd4cf9dc9227e7c368708\",", "load(\"@wix_oss_infra//:import_external.bzl\", import_external = \"safe_wix_scala_maven_import_external\") def dependencies(): import_external( name = \"org_specs2_specs2_fp_2_12\",", "= [ \"@org_scala_lang_scala_library\" ], ) import_external( name = \"org_specs2_specs2_common_2_12\", artifact", "= \"org_specs2_specs2_fp_2_12\", artifact = \"org.specs2:specs2-fp_2.12:4.8.3\", artifact_sha256 = \"777962ca58054a9ea86e294e025453ecf394c60084c28bd61956a00d16be31a7\", srcjar_sha256 =", "\"5d7ad2c0b0bc142ea064edb7a1ea75ab7b17ad37e1a621ac7e578823845098e8\", srcjar_sha256 = \"84edd1cd6291f6686638225fcbaff970ae36da006efabf2228255c2127b2290c\", deps = [ \"@junit_junit\", \"@org_scala_lang_scala_library\", \"@org_scala_sbt_test_interface\",", "artifact_sha256 = \"5d7ad2c0b0bc142ea064edb7a1ea75ab7b17ad37e1a621ac7e578823845098e8\", srcjar_sha256 = \"84edd1cd6291f6686638225fcbaff970ae36da006efabf2228255c2127b2290c\", deps = [ \"@junit_junit\",", "srcjar_sha256 = \"b2f148c75d3939b3cd0d58afddd74a8ce03077bb3ccdc93dae55bd9c3993e9c3\", deps = [ \"@org_scala_lang_modules_scala_parser_combinators_2_12\", \"@org_scala_lang_modules_scala_xml_2_12\", \"@org_scala_lang_scala_library\", \"@org_scala_lang_scala_reflect\",", "], ) import_external( name = \"org_specs2_specs2_common_2_12\", artifact = \"org.specs2:specs2-common_2.12:4.8.3\", artifact_sha256", "deps = [ \"@org_scala_lang_scala_library\", \"@org_scala_sbt_test_interface\", \"@org_specs2_specs2_common_2_12\", \"@org_specs2_specs2_matcher_2_12\" ], ) import_external(", "\"org_specs2_specs2_core_2_12\", artifact = \"org.specs2:specs2-core_2.12:4.8.3\", artifact_sha256 = \"f73f32156a711a4e83e696dc83e269c5a165d62cc3dd7c652617cb03d140d063\", srcjar_sha256 = \"0e3cebfc7410051b70e627e35f13978add3d061b8f1233741f9b397638f193e9\",", "= \"6b8bd1e7210754b768b68610709271c0dac29447936a976a2a9881389e6404ca\", deps = [ \"@org_scala_lang_scala_library\" ], ) import_external( name", "\"@org_specs2_specs2_matcher_2_12\" ], ) import_external( name = \"org_specs2_specs2_junit_2_12\", artifact = \"org.specs2:specs2-junit_2.12:4.8.3\",", "artifact = \"org.specs2:specs2-junit_2.12:4.8.3\", artifact_sha256 = \"5d7ad2c0b0bc142ea064edb7a1ea75ab7b17ad37e1a621ac7e578823845098e8\", srcjar_sha256 = \"84edd1cd6291f6686638225fcbaff970ae36da006efabf2228255c2127b2290c\", deps", "[ \"@org_scala_lang_modules_scala_parser_combinators_2_12\", \"@org_scala_lang_modules_scala_xml_2_12\", \"@org_scala_lang_scala_library\", \"@org_scala_lang_scala_reflect\", \"@org_specs2_specs2_fp_2_12\" ], ) import_external( name", "artifact = \"org.specs2:specs2-common_2.12:4.8.3\", artifact_sha256 = \"3b08fecb9e21d3903e48b62cd95c19ea9253d466e03fd4cf9dc9227e7c368708\", srcjar_sha256 = \"b2f148c75d3939b3cd0d58afddd74a8ce03077bb3ccdc93dae55bd9c3993e9c3\", deps", ") import_external( name = \"org_specs2_specs2_junit_2_12\", artifact = \"org.specs2:specs2-junit_2.12:4.8.3\", artifact_sha256 =", ") import_external( name = \"org_specs2_specs2_core_2_12\", artifact = \"org.specs2:specs2-core_2.12:4.8.3\", artifact_sha256 =", "\"@org_scala_lang_scala_library\", \"@org_specs2_specs2_common_2_12\" ], ) import_external( name = \"org_specs2_specs2_core_2_12\", artifact =", "\"@org_scala_lang_scala_library\" ], ) import_external( name = \"org_specs2_specs2_common_2_12\", artifact = \"org.specs2:specs2-common_2.12:4.8.3\",", "= \"org.specs2:specs2-fp_2.12:4.8.3\", artifact_sha256 = \"777962ca58054a9ea86e294e025453ecf394c60084c28bd61956a00d16be31a7\", srcjar_sha256 = \"6b8bd1e7210754b768b68610709271c0dac29447936a976a2a9881389e6404ca\", deps =", "\"f73f32156a711a4e83e696dc83e269c5a165d62cc3dd7c652617cb03d140d063\", srcjar_sha256 = \"0e3cebfc7410051b70e627e35f13978add3d061b8f1233741f9b397638f193e9\", deps = [ \"@org_scala_lang_scala_library\", \"@org_scala_sbt_test_interface\", \"@org_specs2_specs2_common_2_12\",", "artifact_sha256 = \"aadf27b6d015572b2e3842627c09bf0797153dbb329262ea3bcbbce129d51ad8\", srcjar_sha256 = \"01251acc28219aa17aabcb9a26a84e1871aa64980d335cd8f83c2bcea6f4f1be\", deps = [ \"@org_scala_lang_scala_library\",", "import_external( name = \"org_specs2_specs2_junit_2_12\", artifact = \"org.specs2:specs2-junit_2.12:4.8.3\", artifact_sha256 = \"5d7ad2c0b0bc142ea064edb7a1ea75ab7b17ad37e1a621ac7e578823845098e8\",", "dependencies(): import_external( name = \"org_specs2_specs2_fp_2_12\", artifact = \"org.specs2:specs2-fp_2.12:4.8.3\", artifact_sha256 =", "artifact_sha256 = \"3b08fecb9e21d3903e48b62cd95c19ea9253d466e03fd4cf9dc9227e7c368708\", srcjar_sha256 = \"b2f148c75d3939b3cd0d58afddd74a8ce03077bb3ccdc93dae55bd9c3993e9c3\", deps = [ \"@org_scala_lang_modules_scala_parser_combinators_2_12\",", "\"01251acc28219aa17aabcb9a26a84e1871aa64980d335cd8f83c2bcea6f4f1be\", deps = [ \"@org_scala_lang_scala_library\", \"@org_specs2_specs2_common_2_12\" ], ) import_external( name", "srcjar_sha256 = \"84edd1cd6291f6686638225fcbaff970ae36da006efabf2228255c2127b2290c\", deps = [ \"@junit_junit\", \"@org_scala_lang_scala_library\", \"@org_scala_sbt_test_interface\", \"@org_specs2_specs2_core_2_12\"", "artifact_sha256 = \"f73f32156a711a4e83e696dc83e269c5a165d62cc3dd7c652617cb03d140d063\", srcjar_sha256 = \"0e3cebfc7410051b70e627e35f13978add3d061b8f1233741f9b397638f193e9\", deps = [ \"@org_scala_lang_scala_library\",", "= \"777962ca58054a9ea86e294e025453ecf394c60084c28bd61956a00d16be31a7\", srcjar_sha256 = \"6b8bd1e7210754b768b68610709271c0dac29447936a976a2a9881389e6404ca\", deps = [ \"@org_scala_lang_scala_library\" ],", "\"org.specs2:specs2-core_2.12:4.8.3\", artifact_sha256 = \"f73f32156a711a4e83e696dc83e269c5a165d62cc3dd7c652617cb03d140d063\", srcjar_sha256 = \"0e3cebfc7410051b70e627e35f13978add3d061b8f1233741f9b397638f193e9\", deps = [", "artifact = \"org.specs2:specs2-fp_2.12:4.8.3\", artifact_sha256 = \"777962ca58054a9ea86e294e025453ecf394c60084c28bd61956a00d16be31a7\", srcjar_sha256 = \"6b8bd1e7210754b768b68610709271c0dac29447936a976a2a9881389e6404ca\", deps", "= \"0e3cebfc7410051b70e627e35f13978add3d061b8f1233741f9b397638f193e9\", deps = [ \"@org_scala_lang_scala_library\", \"@org_scala_sbt_test_interface\", \"@org_specs2_specs2_common_2_12\", \"@org_specs2_specs2_matcher_2_12\" ],", "artifact_sha256 = \"777962ca58054a9ea86e294e025453ecf394c60084c28bd61956a00d16be31a7\", srcjar_sha256 = \"6b8bd1e7210754b768b68610709271c0dac29447936a976a2a9881389e6404ca\", deps = [ \"@org_scala_lang_scala_library\"", ") import_external( name = \"org_specs2_specs2_common_2_12\", artifact = \"org.specs2:specs2-common_2.12:4.8.3\", artifact_sha256 =", "artifact = \"org.specs2:specs2-core_2.12:4.8.3\", artifact_sha256 = \"f73f32156a711a4e83e696dc83e269c5a165d62cc3dd7c652617cb03d140d063\", srcjar_sha256 = \"0e3cebfc7410051b70e627e35f13978add3d061b8f1233741f9b397638f193e9\", deps", "= \"org.specs2:specs2-matcher_2.12:4.8.3\", artifact_sha256 = \"aadf27b6d015572b2e3842627c09bf0797153dbb329262ea3bcbbce129d51ad8\", srcjar_sha256 = \"01251acc28219aa17aabcb9a26a84e1871aa64980d335cd8f83c2bcea6f4f1be\", deps =", "= \"84edd1cd6291f6686638225fcbaff970ae36da006efabf2228255c2127b2290c\", deps = [ \"@junit_junit\", \"@org_scala_lang_scala_library\", \"@org_scala_sbt_test_interface\", \"@org_specs2_specs2_core_2_12\" ],", "\"org.specs2:specs2-junit_2.12:4.8.3\", artifact_sha256 = \"5d7ad2c0b0bc142ea064edb7a1ea75ab7b17ad37e1a621ac7e578823845098e8\", srcjar_sha256 = \"84edd1cd6291f6686638225fcbaff970ae36da006efabf2228255c2127b2290c\", deps = [", "= \"org.specs2:specs2-junit_2.12:4.8.3\", artifact_sha256 = \"5d7ad2c0b0bc142ea064edb7a1ea75ab7b17ad37e1a621ac7e578823845098e8\", srcjar_sha256 = \"84edd1cd6291f6686638225fcbaff970ae36da006efabf2228255c2127b2290c\", deps =", "\"org_specs2_specs2_common_2_12\", artifact = \"org.specs2:specs2-common_2.12:4.8.3\", artifact_sha256 = \"3b08fecb9e21d3903e48b62cd95c19ea9253d466e03fd4cf9dc9227e7c368708\", srcjar_sha256 = \"b2f148c75d3939b3cd0d58afddd74a8ce03077bb3ccdc93dae55bd9c3993e9c3\",", "\"@org_scala_lang_scala_library\", \"@org_scala_lang_scala_reflect\", \"@org_specs2_specs2_fp_2_12\" ], ) import_external( name = \"org_specs2_specs2_matcher_2_12\", artifact", "name = \"org_specs2_specs2_common_2_12\", artifact = \"org.specs2:specs2-common_2.12:4.8.3\", artifact_sha256 = \"3b08fecb9e21d3903e48b62cd95c19ea9253d466e03fd4cf9dc9227e7c368708\", srcjar_sha256", "= [ \"@org_scala_lang_scala_library\", \"@org_specs2_specs2_common_2_12\" ], ) import_external( name = \"org_specs2_specs2_core_2_12\",", "deps = [ \"@org_scala_lang_scala_library\", \"@org_specs2_specs2_common_2_12\" ], ) import_external( name =", "\"org_specs2_specs2_fp_2_12\", artifact = \"org.specs2:specs2-fp_2.12:4.8.3\", artifact_sha256 = \"777962ca58054a9ea86e294e025453ecf394c60084c28bd61956a00d16be31a7\", srcjar_sha256 = \"6b8bd1e7210754b768b68610709271c0dac29447936a976a2a9881389e6404ca\",", "name = \"org_specs2_specs2_fp_2_12\", artifact = \"org.specs2:specs2-fp_2.12:4.8.3\", artifact_sha256 = \"777962ca58054a9ea86e294e025453ecf394c60084c28bd61956a00d16be31a7\", srcjar_sha256", "\"6b8bd1e7210754b768b68610709271c0dac29447936a976a2a9881389e6404ca\", deps = [ \"@org_scala_lang_scala_library\" ], ) import_external( name =", "import_external( name = \"org_specs2_specs2_core_2_12\", artifact = \"org.specs2:specs2-core_2.12:4.8.3\", artifact_sha256 = \"f73f32156a711a4e83e696dc83e269c5a165d62cc3dd7c652617cb03d140d063\",", "name = \"org_specs2_specs2_core_2_12\", artifact = \"org.specs2:specs2-core_2.12:4.8.3\", artifact_sha256 = \"f73f32156a711a4e83e696dc83e269c5a165d62cc3dd7c652617cb03d140d063\", srcjar_sha256", "\"@org_scala_lang_modules_scala_xml_2_12\", \"@org_scala_lang_scala_library\", \"@org_scala_lang_scala_reflect\", \"@org_specs2_specs2_fp_2_12\" ], ) import_external( name = \"org_specs2_specs2_matcher_2_12\",", "\"safe_wix_scala_maven_import_external\") def dependencies(): import_external( name = \"org_specs2_specs2_fp_2_12\", artifact = \"org.specs2:specs2-fp_2.12:4.8.3\",", "= \"f73f32156a711a4e83e696dc83e269c5a165d62cc3dd7c652617cb03d140d063\", srcjar_sha256 = \"0e3cebfc7410051b70e627e35f13978add3d061b8f1233741f9b397638f193e9\", deps = [ \"@org_scala_lang_scala_library\", \"@org_scala_sbt_test_interface\",", "= \"org_specs2_specs2_matcher_2_12\", artifact = \"org.specs2:specs2-matcher_2.12:4.8.3\", artifact_sha256 = \"aadf27b6d015572b2e3842627c09bf0797153dbb329262ea3bcbbce129d51ad8\", srcjar_sha256 =", "\"@org_specs2_specs2_common_2_12\" ], ) import_external( name = \"org_specs2_specs2_core_2_12\", artifact = \"org.specs2:specs2-core_2.12:4.8.3\",", "import_external( name = \"org_specs2_specs2_fp_2_12\", artifact = \"org.specs2:specs2-fp_2.12:4.8.3\", artifact_sha256 = \"777962ca58054a9ea86e294e025453ecf394c60084c28bd61956a00d16be31a7\",", "\"@org_scala_lang_scala_library\", \"@org_scala_sbt_test_interface\", \"@org_specs2_specs2_common_2_12\", \"@org_specs2_specs2_matcher_2_12\" ], ) import_external( name = \"org_specs2_specs2_junit_2_12\",", "\"aadf27b6d015572b2e3842627c09bf0797153dbb329262ea3bcbbce129d51ad8\", srcjar_sha256 = \"01251acc28219aa17aabcb9a26a84e1871aa64980d335cd8f83c2bcea6f4f1be\", deps = [ \"@org_scala_lang_scala_library\", \"@org_specs2_specs2_common_2_12\" ],", "= \"org.specs2:specs2-common_2.12:4.8.3\", artifact_sha256 = \"3b08fecb9e21d3903e48b62cd95c19ea9253d466e03fd4cf9dc9227e7c368708\", srcjar_sha256 = \"b2f148c75d3939b3cd0d58afddd74a8ce03077bb3ccdc93dae55bd9c3993e9c3\", deps =", "srcjar_sha256 = \"0e3cebfc7410051b70e627e35f13978add3d061b8f1233741f9b397638f193e9\", deps = [ \"@org_scala_lang_scala_library\", \"@org_scala_sbt_test_interface\", \"@org_specs2_specs2_common_2_12\", \"@org_specs2_specs2_matcher_2_12\"", "\"org.specs2:specs2-fp_2.12:4.8.3\", artifact_sha256 = \"777962ca58054a9ea86e294e025453ecf394c60084c28bd61956a00d16be31a7\", srcjar_sha256 = \"6b8bd1e7210754b768b68610709271c0dac29447936a976a2a9881389e6404ca\", deps = [", "= [ \"@org_scala_lang_scala_library\", \"@org_scala_sbt_test_interface\", \"@org_specs2_specs2_common_2_12\", \"@org_specs2_specs2_matcher_2_12\" ], ) import_external( name", "\"org_specs2_specs2_matcher_2_12\", artifact = \"org.specs2:specs2-matcher_2.12:4.8.3\", artifact_sha256 = \"aadf27b6d015572b2e3842627c09bf0797153dbb329262ea3bcbbce129d51ad8\", srcjar_sha256 = \"01251acc28219aa17aabcb9a26a84e1871aa64980d335cd8f83c2bcea6f4f1be\",", "], ) import_external( name = \"org_specs2_specs2_junit_2_12\", artifact = \"org.specs2:specs2-junit_2.12:4.8.3\", artifact_sha256", "\"b2f148c75d3939b3cd0d58afddd74a8ce03077bb3ccdc93dae55bd9c3993e9c3\", deps = [ \"@org_scala_lang_modules_scala_parser_combinators_2_12\", \"@org_scala_lang_modules_scala_xml_2_12\", \"@org_scala_lang_scala_library\", \"@org_scala_lang_scala_reflect\", \"@org_specs2_specs2_fp_2_12\" ],", "\"@org_scala_lang_scala_reflect\", \"@org_specs2_specs2_fp_2_12\" ], ) import_external( name = \"org_specs2_specs2_matcher_2_12\", artifact =", "srcjar_sha256 = \"6b8bd1e7210754b768b68610709271c0dac29447936a976a2a9881389e6404ca\", deps = [ \"@org_scala_lang_scala_library\" ], ) import_external(", "= \"org_specs2_specs2_core_2_12\", artifact = \"org.specs2:specs2-core_2.12:4.8.3\", artifact_sha256 = \"f73f32156a711a4e83e696dc83e269c5a165d62cc3dd7c652617cb03d140d063\", srcjar_sha256 =", "= \"safe_wix_scala_maven_import_external\") def dependencies(): import_external( name = \"org_specs2_specs2_fp_2_12\", artifact =", ") import_external( name = \"org_specs2_specs2_matcher_2_12\", artifact = \"org.specs2:specs2-matcher_2.12:4.8.3\", artifact_sha256 =", "import_external( name = \"org_specs2_specs2_matcher_2_12\", artifact = \"org.specs2:specs2-matcher_2.12:4.8.3\", artifact_sha256 = \"aadf27b6d015572b2e3842627c09bf0797153dbb329262ea3bcbbce129d51ad8\",", "\"@org_specs2_specs2_fp_2_12\" ], ) import_external( name = \"org_specs2_specs2_matcher_2_12\", artifact = \"org.specs2:specs2-matcher_2.12:4.8.3\",", "\"3b08fecb9e21d3903e48b62cd95c19ea9253d466e03fd4cf9dc9227e7c368708\", srcjar_sha256 = \"b2f148c75d3939b3cd0d58afddd74a8ce03077bb3ccdc93dae55bd9c3993e9c3\", deps = [ \"@org_scala_lang_modules_scala_parser_combinators_2_12\", \"@org_scala_lang_modules_scala_xml_2_12\", \"@org_scala_lang_scala_library\",", "[ \"@org_scala_lang_scala_library\", \"@org_scala_sbt_test_interface\", \"@org_specs2_specs2_common_2_12\", \"@org_specs2_specs2_matcher_2_12\" ], ) import_external( name =", "def dependencies(): import_external( name = \"org_specs2_specs2_fp_2_12\", artifact = \"org.specs2:specs2-fp_2.12:4.8.3\", artifact_sha256", "= \"org_specs2_specs2_junit_2_12\", artifact = \"org.specs2:specs2-junit_2.12:4.8.3\", artifact_sha256 = \"5d7ad2c0b0bc142ea064edb7a1ea75ab7b17ad37e1a621ac7e578823845098e8\", srcjar_sha256 =", "\"777962ca58054a9ea86e294e025453ecf394c60084c28bd61956a00d16be31a7\", srcjar_sha256 = \"6b8bd1e7210754b768b68610709271c0dac29447936a976a2a9881389e6404ca\", deps = [ \"@org_scala_lang_scala_library\" ], )", "\"@org_scala_sbt_test_interface\", \"@org_specs2_specs2_common_2_12\", \"@org_specs2_specs2_matcher_2_12\" ], ) import_external( name = \"org_specs2_specs2_junit_2_12\", artifact", "\"org.specs2:specs2-matcher_2.12:4.8.3\", artifact_sha256 = \"aadf27b6d015572b2e3842627c09bf0797153dbb329262ea3bcbbce129d51ad8\", srcjar_sha256 = \"01251acc28219aa17aabcb9a26a84e1871aa64980d335cd8f83c2bcea6f4f1be\", deps = [" ]
[ "d = int(input()) if a == 0 and b ==", "b / a) == (- b // a): print(- b", "a = int(input()) b = int(input()) c = int(input()) d", "0: print(\"INF\") else: if (d - b * c /", "* c / a) != 0 and (- b /", "== 0: print(\"INF\") else: if (d - b * c", "c / a) != 0 and (- b / a)", "and (- b / a) == (- b // a):", "int(input()) d = int(input()) if a == 0 and b", "int(input()) c = int(input()) d = int(input()) if a ==", "b == 0: print(\"INF\") else: if (d - b *", "!= 0 and (- b / a) == (- b", "== (- b // a): print(- b // a) else:", "(- b / a) == (- b // a): print(-", "if (d - b * c / a) != 0", "b = int(input()) c = int(input()) d = int(input()) if", "a) != 0 and (- b / a) == (-", "= int(input()) c = int(input()) d = int(input()) if a", "0 and b == 0: print(\"INF\") else: if (d -", "- b * c / a) != 0 and (-", "a == 0 and b == 0: print(\"INF\") else: if", "int(input()) if a == 0 and b == 0: print(\"INF\")", "c = int(input()) d = int(input()) if a == 0", "== 0 and b == 0: print(\"INF\") else: if (d", "(d - b * c / a) != 0 and", "else: if (d - b * c / a) !=", "and b == 0: print(\"INF\") else: if (d - b", "b * c / a) != 0 and (- b", "a) == (- b // a): print(- b // a)", "0 and (- b / a) == (- b //", "/ a) == (- b // a): print(- b //", "= int(input()) d = int(input()) if a == 0 and", "/ a) != 0 and (- b / a) ==", "if a == 0 and b == 0: print(\"INF\") else:", "print(\"INF\") else: if (d - b * c / a)", "int(input()) b = int(input()) c = int(input()) d = int(input())", "= int(input()) b = int(input()) c = int(input()) d =", "= int(input()) if a == 0 and b == 0:", "(- b // a): print(- b // a) else: print(\"NO\")" ]
[ "fit(self, X, target, bins=3, min_n_samples=6, balanced_binning=False, verbose=2): self.bins = bins", "bins=self.bins) # Pandas outputs ranges after binning. Convert ranges to", "import Counter import numpy as np self.bins = 3 self.pd", "= Counter self.X = 0 self.Y_classes = 0 self.target =", "data is numpy, then convert it into pandas if type(target)", "\"+str(count)) print() # Finally concatenate and return as dataframe or", "def fit(self, X, target, bins=3, min_n_samples=6, balanced_binning=False, verbose=2): self.bins =", "is more than the number of samples classes_count = list(map(list,", "= X.copy() # Use qcut if balanced binning is required", "def resample(self, sampler_obj, trainX, trainY): # If classes haven't yet", "+ str(classes_count[i][0]) + \" has been merged into Class \"", "sorted(classes_count, key = lambda x: x[0]) for class_, count in", "min_n_samples=6, balanced_binning=False, verbose=2): self.bins = bins tmp = target #", "class_, count in classes_count: print(str(class_)+\": \"+str(count)) print() # Finally concatenate", "self.Y_classes = le.fit_transform(self.Y_classes) # Merge classes if number of neighbours", "# Finally, perform the re-sampling resampled_data, _ = sampler_obj.fit_resample(trainX, trainY)", "count in classes_count: print(str(class_)+\": \"+str(count)) print() # Finally concatenate and", "as a dataframe/numpy array (as per input) # It also", "3 self.pd = pd self.LabelEncoder = LabelEncoder self.Counter = Counter", "else: self.X = X.copy() # Use qcut if balanced binning", "haven't yet been created, then run the \"fit\" function if", "also merges classes as and when required def fit(self, X,", "Finally concatenate and return as dataframe or numpy # Based", "Logic for merging for i in range(len(classes_count)): if classes_count[i][1] <", "X, target, bins=3, min_n_samples=6, balanced_binning=False, verbose=2): self.bins = bins tmp", "type(resampled_data).__module__ == 'numpy': resampled_data = self.pd.DataFrame(resampled_data, columns=self.X.drop(\"classes\", axis=1).columns) # Return", "self.Y_classes # This function performs the re-sampling def resample(self, sampler_obj,", "= classes_count[i-1][0] if verbose > 0: print() # Perform label-encoding", "adds classes to each sample and returns the class list", "int: if target < 0: target = X.shape[1]+target tmp =", "or numpy # Based on what type of target was", "= self.Y_classes if type(tmp) == int: self.target = tmp else:", "= self.LabelEncoder() self.Y_classes = le.fit_transform(self.Y_classes) # Pretty print if verbose", "= pd self.LabelEncoder = LabelEncoder self.Counter = Counter self.X =", "for class_, count in classes_count: print(str(class_)+\": \"+str(count)) print() # Finally", "Merge classes if number of neighbours is more than the", "= list(map(list, self.Counter(self.Y_classes).items())) classes_count = sorted(classes_count, key = lambda x:", "the re-sampling def resample(self, sampler_obj, trainX, trainY): # If classes", "list as a dataframe/numpy array (as per input) # It", "classes_count = sorted(classes_count, key = lambda x: x[0]) mid_point =", "x[0]) for class_, count in classes_count: print(str(class_)+\": \"+str(count)) print() #", "import pandas as pd from sklearn.preprocessing import LabelEncoder from collections", "in range(X.shape[1]): if i!=target: self.X[str(i)] = X[:,i] self.X[\"target\"] = X[:,target]", "is numpy, then convert it into pandas if type(target) ==", "= X[:,target] target = \"target\" else: self.X = X.copy() #", "qcut if balanced binning is required if balanced_binning: self.Y_classes =", "run the \"fit\" function if type(self.Y_classes) == int: print(\"Error! Run", "self.X = 0 self.Y_classes = 0 self.target = 0 self.np", "pandas if type(target) == int: if target < 0: target", "on what type of target was sent self.X[\"classes\"] = self.Y_classes", "self.target = 0 self.np = np # This function adds", "re-sampling resampled_data, _ = sampler_obj.fit_resample(trainX, trainY) if type(resampled_data).__module__ == 'numpy':", "sampler_obj, trainX, trainY): # If classes haven't yet been created,", "classes as and when required def fit(self, X, target, bins=3,", "the re-sampling resampled_data, _ = sampler_obj.fit_resample(trainX, trainY) if type(resampled_data).__module__ ==", "what type of target was sent self.X[\"classes\"] = self.Y_classes if", "# Pretty print if verbose > 1: print(\"Class Distribution:\\n-------------------\") classes_count", "neighbours is more than the number of samples classes_count =", "label-encoding once again # Avoids class skipping after merging le", "= classes_count[i-1][0] if verbose > 0: print(\"INFO: Class \" +", "been merged into Class \" + str(classes_count[i-1][0]) + \" due", "import LabelEncoder from collections import Counter import numpy as np", "as and when required def fit(self, X, target, bins=3, min_n_samples=6,", "= self.pd.cut(self.X[target], bins=self.bins) # Pandas outputs ranges after binning. Convert", "once again # Avoids class skipping after merging le =", "i!=target: self.X[str(i)] = X[:,i] self.X[\"target\"] = X[:,target] target = \"target\"", "> 0: print(\"INFO: Class \" + str(classes_count[i][0]) + \" has", "Finally, perform the re-sampling resampled_data, _ = sampler_obj.fit_resample(trainX, trainY) if", "self.Counter = Counter self.X = 0 self.Y_classes = 0 self.target", "mid_point = len(classes_count) # Logic for merging for i in", "i in range(len(classes_count)): if classes_count[i][1] < min_n_samples: self.Y_classes[self.np.where(self.Y_classes == classes_count[i][0])[0]]", "# If data is numpy, then convert it into pandas", "skipping after merging le = self.LabelEncoder() self.Y_classes = le.fit_transform(self.Y_classes) #", "== int: if target < 0: target = X.shape[1]+target tmp", "number of samples classes_count = list(map(list, self.Counter(self.Y_classes).items())) classes_count = sorted(classes_count,", "precision=0) else: self.Y_classes = self.pd.cut(self.X[target], bins=self.bins) # Pandas outputs ranges", "target was sent self.X[\"classes\"] = self.Y_classes if type(tmp) == int:", "= le.fit_transform(self.Y_classes) # Merge classes if number of neighbours is", "array (as per input) # It also merges classes as", "# This function adds classes to each sample and returns", "if type(self.target) == int: return resampled_data.drop(\"target\", axis=1).values, resampled_data[\"target\"].values else: return", "self.X[\"target\"] = X[:,target] target = \"target\" else: self.X = X.copy()", "to low number of samples\") classes_count[i][0] = classes_count[i-1][0] if verbose", "self.Y_classes = le.fit_transform(self.Y_classes) # Pretty print if verbose > 1:", "classes haven't yet been created, then run the \"fit\" function", "self.pd.DataFrame() for i in range(X.shape[1]): if i!=target: self.X[str(i)] = X[:,i]", "lambda x: x[0]) mid_point = len(classes_count) # Logic for merging", "for i in range(X.shape[1]): if i!=target: self.X[str(i)] = X[:,i] self.X[\"target\"]", "Counter import numpy as np self.bins = 3 self.pd =", "0 self.Y_classes = 0 self.target = 0 self.np = np", "type(target) == int: if target < 0: target = X.shape[1]+target", "self.LabelEncoder() self.Y_classes = le.fit_transform(self.Y_classes) # Merge classes if number of", "into Class \" + str(classes_count[i-1][0]) + \" due to low", "x: x[0]) for class_, count in classes_count: print(str(class_)+\": \"+str(count)) print()", "binning is required if balanced_binning: self.Y_classes = self.pd.qcut(self.X[target], q=self.bins, precision=0)", "# Logic for merging for i in range(len(classes_count)): if classes_count[i][1]", "dataframe or numpy # Based on what type of target", "re-sampling def resample(self, sampler_obj, trainX, trainY): # If classes haven't", "print(\"INFO: Class \" + str(classes_count[i][0]) + \" has been merged", "function if type(self.Y_classes) == int: print(\"Error! Run fit method first!!\")", "sklearn.preprocessing import LabelEncoder from collections import Counter import numpy as", "if verbose > 1: print(\"Class Distribution:\\n-------------------\") classes_count = list(map(list, self.Counter(self.Y_classes).items()))", "to classes le = self.LabelEncoder() self.Y_classes = le.fit_transform(self.Y_classes) # Merge", "then run the \"fit\" function if type(self.Y_classes) == int: print(\"Error!", "def __init__(self): import pandas as pd from sklearn.preprocessing import LabelEncoder", "i in range(X.shape[1]): if i!=target: self.X[str(i)] = X[:,i] self.X[\"target\"] =", "it into pandas if type(target) == int: if target <", "== 'numpy': resampled_data = self.pd.DataFrame(resampled_data, columns=self.X.drop(\"classes\", axis=1).columns) # Return the", "+ str(classes_count[i-1][0]) + \" due to low number of samples\")", "the number of samples classes_count = list(map(list, self.Counter(self.Y_classes).items())) classes_count =", "due to low number of samples\") classes_count[i][0] = classes_count[i-1][0] if", "been created, then run the \"fit\" function if type(self.Y_classes) ==", "int: self.target = tmp else: self.target = target return self.Y_classes", "\" has been merged into Class \" + str(classes_count[i-1][0]) +", "balanced binning is required if balanced_binning: self.Y_classes = self.pd.qcut(self.X[target], q=self.bins,", "binning. Convert ranges to classes le = self.LabelEncoder() self.Y_classes =", "target, bins=3, min_n_samples=6, balanced_binning=False, verbose=2): self.bins = bins tmp =", "(as per input) # It also merges classes as and", "self.bins = bins tmp = target # If data is", "sampler_obj.fit_resample(trainX, trainY) if type(resampled_data).__module__ == 'numpy': resampled_data = self.pd.DataFrame(resampled_data, columns=self.X.drop(\"classes\",", "= X.shape[1]+target tmp = target self.X = self.pd.DataFrame() for i", "trainY) if type(resampled_data).__module__ == 'numpy': resampled_data = self.pd.DataFrame(resampled_data, columns=self.X.drop(\"classes\", axis=1).columns)", "len(classes_count) # Logic for merging for i in range(len(classes_count)): if", "self.X[str(i)] = X[:,i] self.X[\"target\"] = X[:,target] target = \"target\" else:", "\" + str(classes_count[i][0]) + \" has been merged into Class", "= self.LabelEncoder() self.Y_classes = le.fit_transform(self.Y_classes) # Merge classes if number", "> 1: print(\"Class Distribution:\\n-------------------\") classes_count = list(map(list, self.Counter(self.Y_classes).items())) classes_count =", "np self.bins = 3 self.pd = pd self.LabelEncoder = LabelEncoder", "resample(self, sampler_obj, trainX, trainY): # If classes haven't yet been", "of samples\") classes_count[i][0] = classes_count[i-1][0] if verbose > 0: print()", "target = X.shape[1]+target tmp = target self.X = self.pd.DataFrame() for", "X[:,target] target = \"target\" else: self.X = X.copy() # Use", "int: print(\"Error! Run fit method first!!\") return None # Finally,", "X[:,i] self.X[\"target\"] = X[:,target] target = \"target\" else: self.X =", "str(classes_count[i-1][0]) + \" due to low number of samples\") classes_count[i][0]", "range(X.shape[1]): if i!=target: self.X[str(i)] = X[:,i] self.X[\"target\"] = X[:,target] target", "0: target = X.shape[1]+target tmp = target self.X = self.pd.DataFrame()", "< 0: target = X.shape[1]+target tmp = target self.X =", "# This function performs the re-sampling def resample(self, sampler_obj, trainX,", "x[0]) mid_point = len(classes_count) # Logic for merging for i", "ranges after binning. Convert ranges to classes le = self.LabelEncoder()", "number of samples\") classes_count[i][0] = classes_count[i-1][0] if verbose > 0:", "of neighbours is more than the number of samples classes_count", "dataframe/numpy array (as per input) # It also merges classes", "\"fit\" function if type(self.Y_classes) == int: print(\"Error! Run fit method", "perform the re-sampling resampled_data, _ = sampler_obj.fit_resample(trainX, trainY) if type(resampled_data).__module__", "0: print(\"INFO: Class \" + str(classes_count[i][0]) + \" has been", "bins=3, min_n_samples=6, balanced_binning=False, verbose=2): self.bins = bins tmp = target", "= lambda x: x[0]) for class_, count in classes_count: print(str(class_)+\":", "__init__(self): import pandas as pd from sklearn.preprocessing import LabelEncoder from", "= le.fit_transform(self.Y_classes) # Pretty print if verbose > 1: print(\"Class", "classes_count = sorted(classes_count, key = lambda x: x[0]) for class_,", "int: return resampled_data.drop(\"target\", axis=1).values, resampled_data[\"target\"].values else: return resampled_data.drop(self.target, axis=1), resampled_data[self.target]", "target < 0: target = X.shape[1]+target tmp = target self.X", "else: self.target = target return self.Y_classes # This function performs", "# Avoids class skipping after merging le = self.LabelEncoder() self.Y_classes", "le = self.LabelEncoder() self.Y_classes = le.fit_transform(self.Y_classes) # Merge classes if", "per input) # It also merges classes as and when", "Class \" + str(classes_count[i][0]) + \" has been merged into", "Class \" + str(classes_count[i-1][0]) + \" due to low number", "class skipping after merging le = self.LabelEncoder() self.Y_classes = le.fit_transform(self.Y_classes)", "self.X = self.pd.DataFrame() for i in range(X.shape[1]): if i!=target: self.X[str(i)]", "classes_count[i][0] = classes_count[i-1][0] if verbose > 0: print() # Perform", "# It also merges classes as and when required def", "self.pd.DataFrame(resampled_data, columns=self.X.drop(\"classes\", axis=1).columns) # Return the correct X and Y", "self.LabelEncoder() self.Y_classes = le.fit_transform(self.Y_classes) # Pretty print if verbose >", "# Pandas outputs ranges after binning. Convert ranges to classes", "self.target = tmp else: self.target = target return self.Y_classes #", "Distribution:\\n-------------------\") classes_count = list(map(list, self.Counter(self.Y_classes).items())) classes_count = sorted(classes_count, key =", "if verbose > 0: print() # Perform label-encoding once again", "low number of samples\") classes_count[i][0] = classes_count[i-1][0] if verbose >", "If data is numpy, then convert it into pandas if", "Pandas outputs ranges after binning. Convert ranges to classes le", "self.Counter(self.Y_classes).items())) classes_count = sorted(classes_count, key = lambda x: x[0]) mid_point", "if verbose > 0: print(\"INFO: Class \" + str(classes_count[i][0]) +", "le = self.LabelEncoder() self.Y_classes = le.fit_transform(self.Y_classes) # Pretty print if", "a dataframe/numpy array (as per input) # It also merges", "function adds classes to each sample and returns the class", "each sample and returns the class list as a dataframe/numpy", "self.np = np # This function adds classes to each", "the class list as a dataframe/numpy array (as per input)", "from collections import Counter import numpy as np self.bins =", "LabelEncoder self.Counter = Counter self.X = 0 self.Y_classes = 0", "== int: print(\"Error! Run fit method first!!\") return None #", "if balanced_binning: self.Y_classes = self.pd.qcut(self.X[target], q=self.bins, precision=0) else: self.Y_classes =", "numpy # Based on what type of target was sent", "print(\"Error! Run fit method first!!\") return None # Finally, perform", "self.Counter(self.Y_classes).items())) classes_count = sorted(classes_count, key = lambda x: x[0]) for", "Counter self.X = 0 self.Y_classes = 0 self.target = 0", "self.pd.cut(self.X[target], bins=self.bins) # Pandas outputs ranges after binning. Convert ranges", "resampled_data, _ = sampler_obj.fit_resample(trainX, trainY) if type(resampled_data).__module__ == 'numpy': resampled_data", "after binning. Convert ranges to classes le = self.LabelEncoder() self.Y_classes", "if type(tmp) == int: self.target = tmp else: self.target =", "performs the re-sampling def resample(self, sampler_obj, trainX, trainY): # If", "sent self.X[\"classes\"] = self.Y_classes if type(tmp) == int: self.target =", "le.fit_transform(self.Y_classes) # Pretty print if verbose > 1: print(\"Class Distribution:\\n-------------------\")", "classes_count[i][0])[0]] = classes_count[i-1][0] if verbose > 0: print(\"INFO: Class \"", "np # This function adds classes to each sample and", "ranges to classes le = self.LabelEncoder() self.Y_classes = le.fit_transform(self.Y_classes) #", "Perform label-encoding once again # Avoids class skipping after merging", "= sorted(classes_count, key = lambda x: x[0]) for class_, count", "again # Avoids class skipping after merging le = self.LabelEncoder()", "and Y if type(self.target) == int: return resampled_data.drop(\"target\", axis=1).values, resampled_data[\"target\"].values", "if target < 0: target = X.shape[1]+target tmp = target", "yet been created, then run the \"fit\" function if type(self.Y_classes)", "# Merge classes if number of neighbours is more than", "# Use qcut if balanced binning is required if balanced_binning:", "for i in range(len(classes_count)): if classes_count[i][1] < min_n_samples: self.Y_classes[self.np.where(self.Y_classes ==", "# Finally concatenate and return as dataframe or numpy #", "X and Y if type(self.target) == int: return resampled_data.drop(\"target\", axis=1).values,", "self.Y_classes if type(tmp) == int: self.target = tmp else: self.target", "returns the class list as a dataframe/numpy array (as per", "q=self.bins, precision=0) else: self.Y_classes = self.pd.cut(self.X[target], bins=self.bins) # Pandas outputs", "as pd from sklearn.preprocessing import LabelEncoder from collections import Counter", "outputs ranges after binning. Convert ranges to classes le =", "type(tmp) == int: self.target = tmp else: self.target = target", "Convert ranges to classes le = self.LabelEncoder() self.Y_classes = le.fit_transform(self.Y_classes)", "and return as dataframe or numpy # Based on what", "return self.Y_classes # This function performs the re-sampling def resample(self,", "numpy as np self.bins = 3 self.pd = pd self.LabelEncoder", "if classes_count[i][1] < min_n_samples: self.Y_classes[self.np.where(self.Y_classes == classes_count[i][0])[0]] = classes_count[i-1][0] if", "self.X[\"classes\"] = self.Y_classes if type(tmp) == int: self.target = tmp", "self.pd.qcut(self.X[target], q=self.bins, precision=0) else: self.Y_classes = self.pd.cut(self.X[target], bins=self.bins) # Pandas", "Pretty print if verbose > 1: print(\"Class Distribution:\\n-------------------\") classes_count =", "columns=self.X.drop(\"classes\", axis=1).columns) # Return the correct X and Y if", "0: print() # Perform label-encoding once again # Avoids class", "self.Y_classes = self.pd.qcut(self.X[target], q=self.bins, precision=0) else: self.Y_classes = self.pd.cut(self.X[target], bins=self.bins)", "Based on what type of target was sent self.X[\"classes\"] =", "self.target = target return self.Y_classes # This function performs the", "_ = sampler_obj.fit_resample(trainX, trainY) if type(resampled_data).__module__ == 'numpy': resampled_data =", "has been merged into Class \" + str(classes_count[i-1][0]) + \"", "= 0 self.target = 0 self.np = np # This", "list(map(list, self.Counter(self.Y_classes).items())) classes_count = sorted(classes_count, key = lambda x: x[0])", "then convert it into pandas if type(target) == int: if", "self.Y_classes = self.pd.cut(self.X[target], bins=self.bins) # Pandas outputs ranges after binning.", "range(len(classes_count)): if classes_count[i][1] < min_n_samples: self.Y_classes[self.np.where(self.Y_classes == classes_count[i][0])[0]] = classes_count[i-1][0]", "if type(target) == int: if target < 0: target =", "str(classes_count[i][0]) + \" has been merged into Class \" +", "self.X = X.copy() # Use qcut if balanced binning is", "class list as a dataframe/numpy array (as per input) #", "x: x[0]) mid_point = len(classes_count) # Logic for merging for", "collections import Counter import numpy as np self.bins = 3", "if i!=target: self.X[str(i)] = X[:,i] self.X[\"target\"] = X[:,target] target =", "LabelEncoder from collections import Counter import numpy as np self.bins", "min_n_samples: self.Y_classes[self.np.where(self.Y_classes == classes_count[i][0])[0]] = classes_count[i-1][0] if verbose > 0:", "= self.pd.DataFrame(resampled_data, columns=self.X.drop(\"classes\", axis=1).columns) # Return the correct X and", "if balanced binning is required if balanced_binning: self.Y_classes = self.pd.qcut(self.X[target],", "= np # This function adds classes to each sample", "# If classes haven't yet been created, then run the", "in range(len(classes_count)): if classes_count[i][1] < min_n_samples: self.Y_classes[self.np.where(self.Y_classes == classes_count[i][0])[0]] =", "in classes_count: print(str(class_)+\": \"+str(count)) print() # Finally concatenate and return", "is required if balanced_binning: self.Y_classes = self.pd.qcut(self.X[target], q=self.bins, precision=0) else:", "print(\"Class Distribution:\\n-------------------\") classes_count = list(map(list, self.Counter(self.Y_classes).items())) classes_count = sorted(classes_count, key", "the correct X and Y if type(self.target) == int: return", "self.pd = pd self.LabelEncoder = LabelEncoder self.Counter = Counter self.X", "numpy, then convert it into pandas if type(target) == int:", "1: print(\"Class Distribution:\\n-------------------\") classes_count = list(map(list, self.Counter(self.Y_classes).items())) classes_count = sorted(classes_count,", "== int: self.target = tmp else: self.target = target return", "Y if type(self.target) == int: return resampled_data.drop(\"target\", axis=1).values, resampled_data[\"target\"].values else:", "target # If data is numpy, then convert it into", "as np self.bins = 3 self.pd = pd self.LabelEncoder =", "< min_n_samples: self.Y_classes[self.np.where(self.Y_classes == classes_count[i][0])[0]] = classes_count[i-1][0] if verbose >", "print(str(class_)+\": \"+str(count)) print() # Finally concatenate and return as dataframe", "import numpy as np self.bins = 3 self.pd = pd", "resampled_data = self.pd.DataFrame(resampled_data, columns=self.X.drop(\"classes\", axis=1).columns) # Return the correct X", "0 self.np = np # This function adds classes to", "else: self.Y_classes = self.pd.cut(self.X[target], bins=self.bins) # Pandas outputs ranges after", "return None # Finally, perform the re-sampling resampled_data, _ =", "verbose > 0: print(\"INFO: Class \" + str(classes_count[i][0]) + \"", "== int: return resampled_data.drop(\"target\", axis=1).values, resampled_data[\"target\"].values else: return resampled_data.drop(self.target, axis=1),", "= sampler_obj.fit_resample(trainX, trainY) if type(resampled_data).__module__ == 'numpy': resampled_data = self.pd.DataFrame(resampled_data,", "Use qcut if balanced binning is required if balanced_binning: self.Y_classes", "bins tmp = target # If data is numpy, then", "= bins tmp = target # If data is numpy,", "key = lambda x: x[0]) for class_, count in classes_count:", "for merging for i in range(len(classes_count)): if classes_count[i][1] < min_n_samples:", "classes_count = list(map(list, self.Counter(self.Y_classes).items())) classes_count = sorted(classes_count, key = lambda", "+ \" due to low number of samples\") classes_count[i][0] =", "as dataframe or numpy # Based on what type of", "target return self.Y_classes # This function performs the re-sampling def", "tmp = target # If data is numpy, then convert", "type of target was sent self.X[\"classes\"] = self.Y_classes if type(tmp)", "# Based on what type of target was sent self.X[\"classes\"]", "sorted(classes_count, key = lambda x: x[0]) mid_point = len(classes_count) #", "classes_count[i][1] < min_n_samples: self.Y_classes[self.np.where(self.Y_classes == classes_count[i][0])[0]] = classes_count[i-1][0] if verbose", "if number of neighbours is more than the number of", "input) # It also merges classes as and when required", "classes_count[i-1][0] if verbose > 0: print() # Perform label-encoding once", "If classes haven't yet been created, then run the \"fit\"", "self.Y_classes = 0 self.target = 0 self.np = np #", "if type(resampled_data).__module__ == 'numpy': resampled_data = self.pd.DataFrame(resampled_data, columns=self.X.drop(\"classes\", axis=1).columns) #", "= len(classes_count) # Logic for merging for i in range(len(classes_count)):", "# Perform label-encoding once again # Avoids class skipping after", "axis=1).columns) # Return the correct X and Y if type(self.target)", "+ \" has been merged into Class \" + str(classes_count[i-1][0])", "number of neighbours is more than the number of samples", "classes_count[i-1][0] if verbose > 0: print(\"INFO: Class \" + str(classes_count[i][0])", "pd self.LabelEncoder = LabelEncoder self.Counter = Counter self.X = 0", "method first!!\") return None # Finally, perform the re-sampling resampled_data,", "= lambda x: x[0]) mid_point = len(classes_count) # Logic for", "verbose > 0: print() # Perform label-encoding once again #", "tmp else: self.target = target return self.Y_classes # This function", "tmp = target self.X = self.pd.DataFrame() for i in range(X.shape[1]):", "more than the number of samples classes_count = list(map(list, self.Counter(self.Y_classes).items()))", "self.LabelEncoder = LabelEncoder self.Counter = Counter self.X = 0 self.Y_classes", "samples\") classes_count[i][0] = classes_count[i-1][0] if verbose > 0: print() #", "== classes_count[i][0])[0]] = classes_count[i-1][0] if verbose > 0: print(\"INFO: Class", "class resampler: def __init__(self): import pandas as pd from sklearn.preprocessing", "the \"fit\" function if type(self.Y_classes) == int: print(\"Error! Run fit", "required if balanced_binning: self.Y_classes = self.pd.qcut(self.X[target], q=self.bins, precision=0) else: self.Y_classes", "merging for i in range(len(classes_count)): if classes_count[i][1] < min_n_samples: self.Y_classes[self.np.where(self.Y_classes", "balanced_binning: self.Y_classes = self.pd.qcut(self.X[target], q=self.bins, precision=0) else: self.Y_classes = self.pd.cut(self.X[target],", "= self.pd.qcut(self.X[target], q=self.bins, precision=0) else: self.Y_classes = self.pd.cut(self.X[target], bins=self.bins) #", "trainY): # If classes haven't yet been created, then run", "created, then run the \"fit\" function if type(self.Y_classes) == int:", "= LabelEncoder self.Counter = Counter self.X = 0 self.Y_classes =", "self.Y_classes[self.np.where(self.Y_classes == classes_count[i][0])[0]] = classes_count[i-1][0] if verbose > 0: print(\"INFO:", "= target # If data is numpy, then convert it", "trainX, trainY): # If classes haven't yet been created, then", "merged into Class \" + str(classes_count[i-1][0]) + \" due to", "balanced_binning=False, verbose=2): self.bins = bins tmp = target # If", "\" + str(classes_count[i-1][0]) + \" due to low number of", "= sorted(classes_count, key = lambda x: x[0]) mid_point = len(classes_count)", "was sent self.X[\"classes\"] = self.Y_classes if type(tmp) == int: self.target", "concatenate and return as dataframe or numpy # Based on", "target self.X = self.pd.DataFrame() for i in range(X.shape[1]): if i!=target:", "lambda x: x[0]) for class_, count in classes_count: print(str(class_)+\": \"+str(count))", "type(self.Y_classes) == int: print(\"Error! Run fit method first!!\") return None", "type(self.target) == int: return resampled_data.drop(\"target\", axis=1).values, resampled_data[\"target\"].values else: return resampled_data.drop(self.target,", "merges classes as and when required def fit(self, X, target,", "classes to each sample and returns the class list as", "le.fit_transform(self.Y_classes) # Merge classes if number of neighbours is more", "print() # Perform label-encoding once again # Avoids class skipping", "from sklearn.preprocessing import LabelEncoder from collections import Counter import numpy", "Avoids class skipping after merging le = self.LabelEncoder() self.Y_classes =", "verbose=2): self.bins = bins tmp = target # If data", "merging le = self.LabelEncoder() self.Y_classes = le.fit_transform(self.Y_classes) # Pretty print", "pd from sklearn.preprocessing import LabelEncoder from collections import Counter import", "classes le = self.LabelEncoder() self.Y_classes = le.fit_transform(self.Y_classes) # Merge classes", "into pandas if type(target) == int: if target < 0:", "and returns the class list as a dataframe/numpy array (as", "= 0 self.np = np # This function adds classes", "after merging le = self.LabelEncoder() self.Y_classes = le.fit_transform(self.Y_classes) # Pretty", "function performs the re-sampling def resample(self, sampler_obj, trainX, trainY): #", "print() # Finally concatenate and return as dataframe or numpy", "when required def fit(self, X, target, bins=3, min_n_samples=6, balanced_binning=False, verbose=2):", "This function performs the re-sampling def resample(self, sampler_obj, trainX, trainY):", "This function adds classes to each sample and returns the", "than the number of samples classes_count = list(map(list, self.Counter(self.Y_classes).items())) classes_count", "classes_count: print(str(class_)+\": \"+str(count)) print() # Finally concatenate and return as", "sample and returns the class list as a dataframe/numpy array", "0 self.target = 0 self.np = np # This function", "\" due to low number of samples\") classes_count[i][0] = classes_count[i-1][0]", "= \"target\" else: self.X = X.copy() # Use qcut if", "\"target\" else: self.X = X.copy() # Use qcut if balanced", "correct X and Y if type(self.target) == int: return resampled_data.drop(\"target\",", "if type(self.Y_classes) == int: print(\"Error! Run fit method first!!\") return", "= target return self.Y_classes # This function performs the re-sampling", "convert it into pandas if type(target) == int: if target", "= self.pd.DataFrame() for i in range(X.shape[1]): if i!=target: self.X[str(i)] =", "= 0 self.Y_classes = 0 self.target = 0 self.np =", "'numpy': resampled_data = self.pd.DataFrame(resampled_data, columns=self.X.drop(\"classes\", axis=1).columns) # Return the correct", "samples classes_count = list(map(list, self.Counter(self.Y_classes).items())) classes_count = sorted(classes_count, key =", "= X[:,i] self.X[\"target\"] = X[:,target] target = \"target\" else: self.X", "None # Finally, perform the re-sampling resampled_data, _ = sampler_obj.fit_resample(trainX,", "classes if number of neighbours is more than the number", "resampler: def __init__(self): import pandas as pd from sklearn.preprocessing import", "and when required def fit(self, X, target, bins=3, min_n_samples=6, balanced_binning=False,", "> 0: print() # Perform label-encoding once again # Avoids", "return as dataframe or numpy # Based on what type", "self.bins = 3 self.pd = pd self.LabelEncoder = LabelEncoder self.Counter", "Run fit method first!!\") return None # Finally, perform the", "first!!\") return None # Finally, perform the re-sampling resampled_data, _", "print if verbose > 1: print(\"Class Distribution:\\n-------------------\") classes_count = list(map(list,", "= 3 self.pd = pd self.LabelEncoder = LabelEncoder self.Counter =", "It also merges classes as and when required def fit(self,", "to each sample and returns the class list as a", "Return the correct X and Y if type(self.target) == int:", "fit method first!!\") return None # Finally, perform the re-sampling", "= tmp else: self.target = target return self.Y_classes # This", "of samples classes_count = list(map(list, self.Counter(self.Y_classes).items())) classes_count = sorted(classes_count, key", "X.copy() # Use qcut if balanced binning is required if", "# Return the correct X and Y if type(self.target) ==", "key = lambda x: x[0]) mid_point = len(classes_count) # Logic", "of target was sent self.X[\"classes\"] = self.Y_classes if type(tmp) ==", "required def fit(self, X, target, bins=3, min_n_samples=6, balanced_binning=False, verbose=2): self.bins", "verbose > 1: print(\"Class Distribution:\\n-------------------\") classes_count = list(map(list, self.Counter(self.Y_classes).items())) classes_count", "X.shape[1]+target tmp = target self.X = self.pd.DataFrame() for i in", "pandas as pd from sklearn.preprocessing import LabelEncoder from collections import", "target = \"target\" else: self.X = X.copy() # Use qcut", "= target self.X = self.pd.DataFrame() for i in range(X.shape[1]): if" ]
[ "in os.listdir(loc): audio, _ = torchaudio.load_wav(loc / filename, channels_first=False, normalization=False)", "base_base_loc = str(here / '../experiments/data') if not os.path.exists(base_base_loc): raise RuntimeError(\"data", "so detach. train_X, _, _ = _split_data(X, y) out =", "A few samples are shorter than the full length; for", "2 ** 15 # Normalization argument doesn't seem to work", "_process_data() loc = here / '..' / 'experiments' / 'data'", "we discard them. if len(audio) != 16000: continue X[batch_index] =", "of shape (batch=34975, length=81, channels=40). For some crazy reason it", "'speech_commands_data' if not os.path.exists(loc): os.mkdir(loc) _save_data(loc, train_X=train_X, val_X=val_X, test_X=test_X, train_y=train_y,", "out.append((Xi - mean) / (std + 1e-5)) X = torch.stack(out,", "is of shape (batch=34975, length=81, channels=40). For some crazy reason", "def _process_data(): base_loc = here / '..' / 'experiments' /", "base_loc = base_base_loc + '/SpeechCommands' loc = base_loc + '/speech_commands.tar.gz'", "if not os.path.exists(loc): os.mkdir(loc) _save_data(loc, train_X=train_X, val_X=val_X, test_X=test_X, train_y=train_y, val_y=val_y,", "filename in os.listdir(loc): audio, _ = torchaudio.load_wav(loc / filename, channels_first=False,", "2).detach() # X is of shape (batch=34975, length=81, channels=40). For", "reason it requires a gradient, so detach. train_X, _, _", "\"thus giving you the opportunity to make it a symlink", "= [] means = [] stds = [] for Xi,", "directory. (We're going to put a lot of data there,", "audio = audio / 2 ** 15 # Normalization argument", "raise RuntimeError(\"data directory does not exist. Please create a directory", "[] for Xi, train_Xi in zip(X.unbind(dim=-1), train_X.unbind(dim=-1)): mean = train_Xi.mean()", "testval_stratify) = sklearn.model_selection.train_test_split(tensor, stratify, train_size=0.7, random_state=0, shuffle=True, stratify=stratify) val_tensor, test_tensor", "train_size=0.7, random_state=0, shuffle=True, stratify=stratify) val_tensor, test_tensor = sklearn.model_selection.train_test_split(testval_tensor, train_size=0.5, random_state=1,", "train_stratify, testval_stratify) = sklearn.model_selection.train_test_split(tensor, stratify, train_size=0.7, random_state=0, shuffle=True, stratify=stratify) val_tensor,", "'SpeechCommands' X = torch.empty(34975, 16000, 1) y = torch.empty(34975, dtype=torch.long)", "train_y, val_y, test_y = _split_data(y, y) return train_X, val_X, test_X,", "(batch=34975, length=16000, channels=1) X = torchaudio.transforms.MFCC(log_mels=True)(X.squeeze(-1)).transpose(1, 2).detach() # X is", "to put a lot of data there, so we don't", "random_state=0, shuffle=True, stratify=stratify) val_tensor, test_tensor = sklearn.model_selection.train_test_split(testval_tensor, train_size=0.5, random_state=1, shuffle=True,", "** 15 # Normalization argument doesn't seem to work so", "forward compatbility if they fix it audio = audio /", "if os.path.exists(loc): return if not os.path.exists(base_loc): os.mkdir(base_loc) urllib.request.urlretrieve('http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz', loc) with", "val_audio_X, test_audio_X = _split_data(audio_X, y) train_X, val_X, test_X = _split_data(X,", "the 'experiments'\" \" directory. (We're going to put a lot", "test_tensor = sklearn.model_selection.train_test_split(testval_tensor, train_size=0.5, random_state=1, shuffle=True, stratify=testval_stratify) return train_tensor, val_tensor,", "# X is of shape (batch=34975, length=81, channels=40). For some", "discard them. if len(audio) != 16000: continue X[batch_index] = audio", "/ 'data' / 'SpeechCommands' X = torch.empty(34975, 16000, 1) y", "std = train_Xi.std() means.append(mean) stds.append(std) out.append((Xi - mean) / (std", "os.path.exists(loc): os.mkdir(loc) _save_data(loc, train_X=train_X, val_X=val_X, test_X=test_X, train_y=train_y, val_y=val_y, test_y=test_y, means=means,", "= pathlib.Path(__file__).resolve().parent def _split_data(tensor, stratify): # 0.7/0.15/0.15 train/val/test split (train_tensor,", "batch_index += 1 y_index += 1 assert batch_index == 34975,", "= train_Xi.mean() std = train_Xi.std() means.append(mean) stds.append(std) out.append((Xi - mean)", "continue X[batch_index] = audio y[batch_index] = y_index batch_index += 1", "_, _ = _split_data(X, y) out = [] means =", "argument doesn't seem to work so we do it manually.", "length=81, channels=40). For some crazy reason it requires a gradient,", "(std + 1e-5)) X = torch.stack(out, dim=-1) train_audio_X, val_audio_X, test_audio_X", "34975, \"batch_index is {}\".format(batch_index) audio_X = X # X is", "be stored elsewhere if you wish.)\") base_loc = base_base_loc +", "+= 1 y_index += 1 assert batch_index == 34975, \"batch_index", "os import pathlib import sklearn.model_selection import tarfile import torch import", "For some crazy reason it requires a gradient, so detach.", "def main(): download() (train_X, val_X, test_X, train_y, val_y, test_y, means,", "return train_tensor, val_tensor, test_tensor def _save_data(dir, **tensors): for tensor_name, tensor_value", "sklearn.model_selection import tarfile import torch import torchaudio import urllib.request here", "base_loc + '/speech_commands.tar.gz' if os.path.exists(loc): return if not os.path.exists(base_loc): os.mkdir(base_loc)", "dim=-1) train_audio_X, val_audio_X, test_audio_X = _split_data(audio_X, y) train_X, val_X, test_X", "Please create a directory called 'data' in the 'experiments'\" \"", "= here / '..' / 'experiments' / 'data' / 'speech_commands_data'", "we do it manually. # A few samples are shorter", "+ '/SpeechCommands' loc = base_loc + '/speech_commands.tar.gz' if os.path.exists(loc): return", "you wish.)\") base_loc = base_base_loc + '/SpeechCommands' loc = base_loc", "'/SpeechCommands' loc = base_loc + '/speech_commands.tar.gz' if os.path.exists(loc): return if", "seem to work so we do it manually. # A", "str(dir / tensor_name) + '.pt') def download(): base_base_loc = str(here", "train_y, val_y, test_y, means, stds, train_audio_X, val_audio_X, test_audio_X) = _process_data()", "= 0 for foldername in ('yes', 'no', 'up', 'down', 'left',", "exist. Please create a directory called 'data' in the 'experiments'\"", "'on', 'off', 'stop', 'go'): loc = base_loc / foldername for", "make it automatically - \" \"thus giving you the opportunity", "= _split_data(X, y) train_y, val_y, test_y = _split_data(y, y) return", "it requires a gradient, so detach. train_X, _, _ =", "directory does not exist. Please create a directory called 'data'", "'..' / 'experiments' / 'data' / 'SpeechCommands' X = torch.empty(34975,", "so we do it manually. # A few samples are", "= base_loc + '/speech_commands.tar.gz' if os.path.exists(loc): return if not os.path.exists(base_loc):", "def download(): base_base_loc = str(here / '../experiments/data') if not os.path.exists(base_base_loc):", "to make it a symlink rather than a normal directory,", "\"that the data can be stored elsewhere if you wish.)\")", "if you wish.)\") base_loc = base_base_loc + '/SpeechCommands' loc =", "torchaudio.transforms.MFCC(log_mels=True)(X.squeeze(-1)).transpose(1, 2).detach() # X is of shape (batch=34975, length=81, channels=40).", "\"batch_index is {}\".format(batch_index) audio_X = X # X is of", "RuntimeError(\"data directory does not exist. Please create a directory called", "shorter than the full length; for simplicity we discard them.", "= here / '..' / 'experiments' / 'data' / 'SpeechCommands'", "os.listdir(loc): audio, _ = torchaudio.load_wav(loc / filename, channels_first=False, normalization=False) #", "normalization=False) # for forward compatbility if they fix it audio", "loc = base_loc + '/speech_commands.tar.gz' if os.path.exists(loc): return if not", "= audio y[batch_index] = y_index batch_index += 1 y_index +=", "'right', 'on', 'off', 'stop', 'go'): loc = base_loc / foldername", "dtype=torch.long) batch_index = 0 y_index = 0 for foldername in", "0 y_index = 0 for foldername in ('yes', 'no', 'up',", "'off', 'stop', 'go'): loc = base_loc / foldername for filename", "# Normalization argument doesn't seem to work so we do", "[] means = [] stds = [] for Xi, train_Xi", "train_Xi in zip(X.unbind(dim=-1), train_X.unbind(dim=-1)): mean = train_Xi.mean() std = train_Xi.std()", "os.path.exists(base_loc): os.mkdir(base_loc) urllib.request.urlretrieve('http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz', loc) with tarfile.open(loc, 'r') as f: f.extractall(base_loc)", "(train_tensor, testval_tensor, train_stratify, testval_stratify) = sklearn.model_selection.train_test_split(tensor, stratify, train_size=0.7, random_state=0, shuffle=True,", "few samples are shorter than the full length; for simplicity", "os.mkdir(loc) _save_data(loc, train_X=train_X, val_X=val_X, test_X=test_X, train_y=train_y, val_y=val_y, test_y=test_y, means=means, stds=stds,", "stratify): # 0.7/0.15/0.15 train/val/test split (train_tensor, testval_tensor, train_stratify, testval_stratify) =", "torch.empty(34975, dtype=torch.long) batch_index = 0 y_index = 0 for foldername", "train_audio_X, val_audio_X, test_audio_X = _split_data(audio_X, y) train_X, val_X, test_X =", "= torch.stack(out, dim=-1) train_audio_X, val_audio_X, test_audio_X = _split_data(audio_X, y) train_X,", "/ 'data' / 'speech_commands_data' if not os.path.exists(loc): os.mkdir(loc) _save_data(loc, train_X=train_X,", "torchaudio import urllib.request here = pathlib.Path(__file__).resolve().parent def _split_data(tensor, stratify): #", "for foldername in ('yes', 'no', 'up', 'down', 'left', 'right', 'on',", "if not os.path.exists(base_base_loc): raise RuntimeError(\"data directory does not exist. Please", "(batch=34975, length=81, channels=40). For some crazy reason it requires a", "here = pathlib.Path(__file__).resolve().parent def _split_data(tensor, stratify): # 0.7/0.15/0.15 train/val/test split", "the data can be stored elsewhere if you wish.)\") base_loc", "not os.path.exists(base_loc): os.mkdir(base_loc) urllib.request.urlretrieve('http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz', loc) with tarfile.open(loc, 'r') as f:", "compatbility if they fix it audio = audio / 2", "val_X, test_X, train_y, val_y, test_y, torch.stack(means), torch.stack(stds), train_audio_X, \\ val_audio_X,", "shape (batch=34975, length=16000, channels=1) X = torchaudio.transforms.MFCC(log_mels=True)(X.squeeze(-1)).transpose(1, 2).detach() # X", "with tarfile.open(loc, 'r') as f: f.extractall(base_loc) def _process_data(): base_loc =", "out = [] means = [] stds = [] for", "X[batch_index] = audio y[batch_index] = y_index batch_index += 1 y_index", "for tensor_name, tensor_value in tensors.items(): torch.save(tensor_value, str(dir / tensor_name) +", "requires a gradient, so detach. train_X, _, _ = _split_data(X,", "tarfile.open(loc, 'r') as f: f.extractall(base_loc) def _process_data(): base_loc = here", "split (train_tensor, testval_tensor, train_stratify, testval_stratify) = sklearn.model_selection.train_test_split(tensor, stratify, train_size=0.7, random_state=0,", "= str(here / '../experiments/data') if not os.path.exists(base_base_loc): raise RuntimeError(\"data directory", "stds.append(std) out.append((Xi - mean) / (std + 1e-5)) X =", "/ filename, channels_first=False, normalization=False) # for forward compatbility if they", "'data' in the 'experiments'\" \" directory. (We're going to put", "download() (train_X, val_X, test_X, train_y, val_y, test_y, means, stds, train_audio_X,", "manually. # A few samples are shorter than the full", "audio / 2 ** 15 # Normalization argument doesn't seem", "torch.empty(34975, 16000, 1) y = torch.empty(34975, dtype=torch.long) batch_index = 0", "Normalization argument doesn't seem to work so we do it", "a gradient, so detach. train_X, _, _ = _split_data(X, y)", "zip(X.unbind(dim=-1), train_X.unbind(dim=-1)): mean = train_Xi.mean() std = train_Xi.std() means.append(mean) stds.append(std)", "1 assert batch_index == 34975, \"batch_index is {}\".format(batch_index) audio_X =", "torch.save(tensor_value, str(dir / tensor_name) + '.pt') def download(): base_base_loc =", "import os import pathlib import sklearn.model_selection import tarfile import torch", "tensors.items(): torch.save(tensor_value, str(dir / tensor_name) + '.pt') def download(): base_base_loc", "_ = torchaudio.load_wav(loc / filename, channels_first=False, normalization=False) # for forward", "sklearn.model_selection.train_test_split(testval_tensor, train_size=0.5, random_state=1, shuffle=True, stratify=testval_stratify) return train_tensor, val_tensor, test_tensor def", "automatically - \" \"thus giving you the opportunity to make", "wish.)\") base_loc = base_base_loc + '/SpeechCommands' loc = base_loc +", "train_X=train_X, val_X=val_X, test_X=test_X, train_y=train_y, val_y=val_y, test_y=test_y, means=means, stds=stds, train_audio_X=train_audio_X, val_audio_X=val_audio_X,", "shape (batch=34975, length=81, channels=40). For some crazy reason it requires", "1 y_index += 1 assert batch_index == 34975, \"batch_index is", "than the full length; for simplicity we discard them. if", "audio y[batch_index] = y_index batch_index += 1 y_index += 1", "opportunity to make it a symlink rather than a normal", "base_loc = here / '..' / 'experiments' / 'data' /", "test_y, torch.stack(means), torch.stack(stds), train_audio_X, \\ val_audio_X, test_audio_X def main(): download()", "X is of shape (batch=34975, length=16000, channels=1) X = torchaudio.transforms.MFCC(log_mels=True)(X.squeeze(-1)).transpose(1,", "# for forward compatbility if they fix it audio =", "/ (std + 1e-5)) X = torch.stack(out, dim=-1) train_audio_X, val_audio_X,", "lot of data there, so we don't make it automatically", "- \" \"thus giving you the opportunity to make it", "def _split_data(tensor, stratify): # 0.7/0.15/0.15 train/val/test split (train_tensor, testval_tensor, train_stratify,", "stds = [] for Xi, train_Xi in zip(X.unbind(dim=-1), train_X.unbind(dim=-1)): mean", "# 0.7/0.15/0.15 train/val/test split (train_tensor, testval_tensor, train_stratify, testval_stratify) = sklearn.model_selection.train_test_split(tensor,", "tensor_name, tensor_value in tensors.items(): torch.save(tensor_value, str(dir / tensor_name) + '.pt')", "= sklearn.model_selection.train_test_split(tensor, stratify, train_size=0.7, random_state=0, shuffle=True, stratify=stratify) val_tensor, test_tensor =", "= [] for Xi, train_Xi in zip(X.unbind(dim=-1), train_X.unbind(dim=-1)): mean =", "there, so we don't make it automatically - \" \"thus", "filename, channels_first=False, normalization=False) # for forward compatbility if they fix", "+ 1e-5)) X = torch.stack(out, dim=-1) train_audio_X, val_audio_X, test_audio_X =", "length=16000, channels=1) X = torchaudio.transforms.MFCC(log_mels=True)(X.squeeze(-1)).transpose(1, 2).detach() # X is of", "0 for foldername in ('yes', 'no', 'up', 'down', 'left', 'right',", "directory called 'data' in the 'experiments'\" \" directory. (We're going", "it a symlink rather than a normal directory, so \"", "/ 'experiments' / 'data' / 'speech_commands_data' if not os.path.exists(loc): os.mkdir(loc)", "'no', 'up', 'down', 'left', 'right', 'on', 'off', 'stop', 'go'): loc", "= y_index batch_index += 1 y_index += 1 assert batch_index", "import torchaudio import urllib.request here = pathlib.Path(__file__).resolve().parent def _split_data(tensor, stratify):", "test_y, means, stds, train_audio_X, val_audio_X, test_audio_X) = _process_data() loc =", "for filename in os.listdir(loc): audio, _ = torchaudio.load_wav(loc / filename,", "full length; for simplicity we discard them. if len(audio) !=", "channels=40). For some crazy reason it requires a gradient, so", "symlink rather than a normal directory, so \" \"that the", "if they fix it audio = audio / 2 **", "'down', 'left', 'right', 'on', 'off', 'stop', 'go'): loc = base_loc", "val_X, test_X = _split_data(X, y) train_y, val_y, test_y = _split_data(y,", "= audio / 2 ** 15 # Normalization argument doesn't", "= torchaudio.transforms.MFCC(log_mels=True)(X.squeeze(-1)).transpose(1, 2).detach() # X is of shape (batch=34975, length=81,", "('yes', 'no', 'up', 'down', 'left', 'right', 'on', 'off', 'stop', 'go'):", "_split_data(X, y) out = [] means = [] stds =", "it audio = audio / 2 ** 15 # Normalization", "make it a symlink rather than a normal directory, so", "train_X, val_X, test_X, train_y, val_y, test_y, torch.stack(means), torch.stack(stds), train_audio_X, \\", "y[batch_index] = y_index batch_index += 1 y_index += 1 assert", "test_X, train_y, val_y, test_y, torch.stack(means), torch.stack(stds), train_audio_X, \\ val_audio_X, test_audio_X", "is {}\".format(batch_index) audio_X = X # X is of shape", "1e-5)) X = torch.stack(out, dim=-1) train_audio_X, val_audio_X, test_audio_X = _split_data(audio_X,", "samples are shorter than the full length; for simplicity we", "so we don't make it automatically - \" \"thus giving", "'../experiments/data') if not os.path.exists(base_base_loc): raise RuntimeError(\"data directory does not exist.", "'data' / 'speech_commands_data' if not os.path.exists(loc): os.mkdir(loc) _save_data(loc, train_X=train_X, val_X=val_X,", "means=means, stds=stds, train_audio_X=train_audio_X, val_audio_X=val_audio_X, test_audio_X=test_audio_X) if __name__ == '__main__': main()", "train_y=train_y, val_y=val_y, test_y=test_y, means=means, stds=stds, train_audio_X=train_audio_X, val_audio_X=val_audio_X, test_audio_X=test_audio_X) if __name__", "put a lot of data there, so we don't make", "y_index = 0 for foldername in ('yes', 'no', 'up', 'down',", "main(): download() (train_X, val_X, test_X, train_y, val_y, test_y, means, stds,", "foldername for filename in os.listdir(loc): audio, _ = torchaudio.load_wav(loc /", "train_X, _, _ = _split_data(X, y) out = [] means", "shuffle=True, stratify=testval_stratify) return train_tensor, val_tensor, test_tensor def _save_data(dir, **tensors): for", "f.extractall(base_loc) def _process_data(): base_loc = here / '..' / 'experiments'", "urllib.request.urlretrieve('http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz', loc) with tarfile.open(loc, 'r') as f: f.extractall(base_loc) def _process_data():", "/ 'SpeechCommands' X = torch.empty(34975, 16000, 1) y = torch.empty(34975,", "val_y, test_y, torch.stack(means), torch.stack(stds), train_audio_X, \\ val_audio_X, test_audio_X def main():", "download(): base_base_loc = str(here / '../experiments/data') if not os.path.exists(base_base_loc): raise", "doesn't seem to work so we do it manually. #", "f: f.extractall(base_loc) def _process_data(): base_loc = here / '..' /", "it manually. # A few samples are shorter than the", "in ('yes', 'no', 'up', 'down', 'left', 'right', 'on', 'off', 'stop',", "/ 'speech_commands_data' if not os.path.exists(loc): os.mkdir(loc) _save_data(loc, train_X=train_X, val_X=val_X, test_X=test_X,", "normal directory, so \" \"that the data can be stored", "train_size=0.5, random_state=1, shuffle=True, stratify=testval_stratify) return train_tensor, val_tensor, test_tensor def _save_data(dir,", "_split_data(y, y) return train_X, val_X, test_X, train_y, val_y, test_y, torch.stack(means),", "of shape (batch=34975, length=16000, channels=1) X = torchaudio.transforms.MFCC(log_mels=True)(X.squeeze(-1)).transpose(1, 2).detach() #", "\\ val_audio_X, test_audio_X def main(): download() (train_X, val_X, test_X, train_y,", "gradient, so detach. train_X, _, _ = _split_data(X, y) out", "y = torch.empty(34975, dtype=torch.long) batch_index = 0 y_index = 0", "going to put a lot of data there, so we", "= sklearn.model_selection.train_test_split(testval_tensor, train_size=0.5, random_state=1, shuffle=True, stratify=testval_stratify) return train_tensor, val_tensor, test_tensor", "= _process_data() loc = here / '..' / 'experiments' /", "import sklearn.model_selection import tarfile import torch import torchaudio import urllib.request", "import pathlib import sklearn.model_selection import tarfile import torch import torchaudio", "import torch import torchaudio import urllib.request here = pathlib.Path(__file__).resolve().parent def", "_split_data(X, y) train_y, val_y, test_y = _split_data(y, y) return train_X,", "data can be stored elsewhere if you wish.)\") base_loc =", "base_base_loc + '/SpeechCommands' loc = base_loc + '/speech_commands.tar.gz' if os.path.exists(loc):", "tarfile import torch import torchaudio import urllib.request here = pathlib.Path(__file__).resolve().parent", "detach. train_X, _, _ = _split_data(X, y) out = []", "import tarfile import torch import torchaudio import urllib.request here =", "y) out = [] means = [] stds = []", "y) return train_X, val_X, test_X, train_y, val_y, test_y, torch.stack(means), torch.stack(stds),", "_split_data(tensor, stratify): # 0.7/0.15/0.15 train/val/test split (train_tensor, testval_tensor, train_stratify, testval_stratify)", "does not exist. Please create a directory called 'data' in", "- mean) / (std + 1e-5)) X = torch.stack(out, dim=-1)", "data there, so we don't make it automatically - \"", "[] stds = [] for Xi, train_Xi in zip(X.unbind(dim=-1), train_X.unbind(dim=-1)):", "loc = here / '..' / 'experiments' / 'data' /", "# A few samples are shorter than the full length;", "base_loc / foldername for filename in os.listdir(loc): audio, _ =", "+ '.pt') def download(): base_base_loc = str(here / '../experiments/data') if", "\" directory. (We're going to put a lot of data", "train_Xi.mean() std = train_Xi.std() means.append(mean) stds.append(std) out.append((Xi - mean) /", "val_y, test_y = _split_data(y, y) return train_X, val_X, test_X, train_y,", "in zip(X.unbind(dim=-1), train_X.unbind(dim=-1)): mean = train_Xi.mean() std = train_Xi.std() means.append(mean)", "the opportunity to make it a symlink rather than a", "= X # X is of shape (batch=34975, length=16000, channels=1)", "channels_first=False, normalization=False) # for forward compatbility if they fix it", "we don't make it automatically - \" \"thus giving you", "def _save_data(dir, **tensors): for tensor_name, tensor_value in tensors.items(): torch.save(tensor_value, str(dir", "y_index += 1 assert batch_index == 34975, \"batch_index is {}\".format(batch_index)", "'stop', 'go'): loc = base_loc / foldername for filename in", "= 0 y_index = 0 for foldername in ('yes', 'no',", "length; for simplicity we discard them. if len(audio) != 16000:", "os.mkdir(base_loc) urllib.request.urlretrieve('http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz', loc) with tarfile.open(loc, 'r') as f: f.extractall(base_loc) def", "<reponame>patrick-kidger/generalised_shapelets import os import pathlib import sklearn.model_selection import tarfile import", "_save_data(dir, **tensors): for tensor_name, tensor_value in tensors.items(): torch.save(tensor_value, str(dir /", "test_y=test_y, means=means, stds=stds, train_audio_X=train_audio_X, val_audio_X=val_audio_X, test_audio_X=test_audio_X) if __name__ == '__main__':", "\" \"thus giving you the opportunity to make it a", "it automatically - \" \"thus giving you the opportunity to", "in tensors.items(): torch.save(tensor_value, str(dir / tensor_name) + '.pt') def download():", "shuffle=True, stratify=stratify) val_tensor, test_tensor = sklearn.model_selection.train_test_split(testval_tensor, train_size=0.5, random_state=1, shuffle=True, stratify=testval_stratify)", "'data' / 'SpeechCommands' X = torch.empty(34975, 16000, 1) y =", "len(audio) != 16000: continue X[batch_index] = audio y[batch_index] = y_index", "= [] stds = [] for Xi, train_Xi in zip(X.unbind(dim=-1),", "called 'data' in the 'experiments'\" \" directory. (We're going to", "a normal directory, so \" \"that the data can be", "than a normal directory, so \" \"that the data can", "if len(audio) != 16000: continue X[batch_index] = audio y[batch_index] =", "stored elsewhere if you wish.)\") base_loc = base_base_loc + '/SpeechCommands'", "_ = _split_data(X, y) out = [] means = []", "channels=1) X = torchaudio.transforms.MFCC(log_mels=True)(X.squeeze(-1)).transpose(1, 2).detach() # X is of shape", "torch.stack(out, dim=-1) train_audio_X, val_audio_X, test_audio_X = _split_data(audio_X, y) train_X, val_X,", "train_y, val_y, test_y, torch.stack(means), torch.stack(stds), train_audio_X, \\ val_audio_X, test_audio_X def", "1) y = torch.empty(34975, dtype=torch.long) batch_index = 0 y_index =", "torch import torchaudio import urllib.request here = pathlib.Path(__file__).resolve().parent def _split_data(tensor,", "15 # Normalization argument doesn't seem to work so we", "= _split_data(X, y) out = [] means = [] stds", "'experiments' / 'data' / 'speech_commands_data' if not os.path.exists(loc): os.mkdir(loc) _save_data(loc,", "= torch.empty(34975, 16000, 1) y = torch.empty(34975, dtype=torch.long) batch_index =", "are shorter than the full length; for simplicity we discard", "# X is of shape (batch=34975, length=16000, channels=1) X =", "for forward compatbility if they fix it audio = audio", "test_audio_X) = _process_data() loc = here / '..' / 'experiments'", "= base_loc / foldername for filename in os.listdir(loc): audio, _", "is of shape (batch=34975, length=16000, channels=1) X = torchaudio.transforms.MFCC(log_mels=True)(X.squeeze(-1)).transpose(1, 2).detach()", "as f: f.extractall(base_loc) def _process_data(): base_loc = here / '..'", "_split_data(audio_X, y) train_X, val_X, test_X = _split_data(X, y) train_y, val_y,", "train_Xi.std() means.append(mean) stds.append(std) out.append((Xi - mean) / (std + 1e-5))", "'r') as f: f.extractall(base_loc) def _process_data(): base_loc = here /", "val_tensor, test_tensor = sklearn.model_selection.train_test_split(testval_tensor, train_size=0.5, random_state=1, shuffle=True, stratify=testval_stratify) return train_tensor,", "testval_tensor, train_stratify, testval_stratify) = sklearn.model_selection.train_test_split(tensor, stratify, train_size=0.7, random_state=0, shuffle=True, stratify=stratify)", "val_X=val_X, test_X=test_X, train_y=train_y, val_y=val_y, test_y=test_y, means=means, stds=stds, train_audio_X=train_audio_X, val_audio_X=val_audio_X, test_audio_X=test_audio_X)", "can be stored elsewhere if you wish.)\") base_loc = base_base_loc", "train_audio_X, val_audio_X, test_audio_X) = _process_data() loc = here / '..'", "to work so we do it manually. # A few", "assert batch_index == 34975, \"batch_index is {}\".format(batch_index) audio_X = X", "'up', 'down', 'left', 'right', 'on', 'off', 'stop', 'go'): loc =", "the full length; for simplicity we discard them. if len(audio)", "means.append(mean) stds.append(std) out.append((Xi - mean) / (std + 1e-5)) X", "for simplicity we discard them. if len(audio) != 16000: continue", "val_y, test_y, means, stds, train_audio_X, val_audio_X, test_audio_X) = _process_data() loc", "'experiments' / 'data' / 'SpeechCommands' X = torch.empty(34975, 16000, 1)", "a symlink rather than a normal directory, so \" \"that", "/ '..' / 'experiments' / 'data' / 'SpeechCommands' X =", "X = torch.stack(out, dim=-1) train_audio_X, val_audio_X, test_audio_X = _split_data(audio_X, y)", "mean = train_Xi.mean() std = train_Xi.std() means.append(mean) stds.append(std) out.append((Xi -", "return train_X, val_X, test_X, train_y, val_y, test_y, torch.stack(means), torch.stack(stds), train_audio_X,", "train_tensor, val_tensor, test_tensor def _save_data(dir, **tensors): for tensor_name, tensor_value in", "/ '..' / 'experiments' / 'data' / 'speech_commands_data' if not", "simplicity we discard them. if len(audio) != 16000: continue X[batch_index]", "Xi, train_Xi in zip(X.unbind(dim=-1), train_X.unbind(dim=-1)): mean = train_Xi.mean() std =", "torchaudio.load_wav(loc / filename, channels_first=False, normalization=False) # for forward compatbility if", "os.path.exists(loc): return if not os.path.exists(base_loc): os.mkdir(base_loc) urllib.request.urlretrieve('http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz', loc) with tarfile.open(loc,", "not exist. Please create a directory called 'data' in the", "not os.path.exists(loc): os.mkdir(loc) _save_data(loc, train_X=train_X, val_X=val_X, test_X=test_X, train_y=train_y, val_y=val_y, test_y=test_y,", "{}\".format(batch_index) audio_X = X # X is of shape (batch=34975,", "y) train_X, val_X, test_X = _split_data(X, y) train_y, val_y, test_y", "X = torchaudio.transforms.MFCC(log_mels=True)(X.squeeze(-1)).transpose(1, 2).detach() # X is of shape (batch=34975,", "tensor_value in tensors.items(): torch.save(tensor_value, str(dir / tensor_name) + '.pt') def", "16000: continue X[batch_index] = audio y[batch_index] = y_index batch_index +=", "return if not os.path.exists(base_loc): os.mkdir(base_loc) urllib.request.urlretrieve('http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz', loc) with tarfile.open(loc, 'r')", "them. if len(audio) != 16000: continue X[batch_index] = audio y[batch_index]", "'experiments'\" \" directory. (We're going to put a lot of", "here / '..' / 'experiments' / 'data' / 'SpeechCommands' X", "some crazy reason it requires a gradient, so detach. train_X,", "(We're going to put a lot of data there, so", "/ '../experiments/data') if not os.path.exists(base_base_loc): raise RuntimeError(\"data directory does not", "import urllib.request here = pathlib.Path(__file__).resolve().parent def _split_data(tensor, stratify): # 0.7/0.15/0.15", "random_state=1, shuffle=True, stratify=testval_stratify) return train_tensor, val_tensor, test_tensor def _save_data(dir, **tensors):", "val_y=val_y, test_y=test_y, means=means, stds=stds, train_audio_X=train_audio_X, val_audio_X=val_audio_X, test_audio_X=test_audio_X) if __name__ ==", "test_X, train_y, val_y, test_y, means, stds, train_audio_X, val_audio_X, test_audio_X) =", "means = [] stds = [] for Xi, train_Xi in", "audio, _ = torchaudio.load_wav(loc / filename, channels_first=False, normalization=False) # for", "do it manually. # A few samples are shorter than", "/ 2 ** 15 # Normalization argument doesn't seem to", "train_X, val_X, test_X = _split_data(X, y) train_y, val_y, test_y =", "test_tensor def _save_data(dir, **tensors): for tensor_name, tensor_value in tensors.items(): torch.save(tensor_value,", "/ foldername for filename in os.listdir(loc): audio, _ = torchaudio.load_wav(loc", "'..' / 'experiments' / 'data' / 'speech_commands_data' if not os.path.exists(loc):", "sklearn.model_selection.train_test_split(tensor, stratify, train_size=0.7, random_state=0, shuffle=True, stratify=stratify) val_tensor, test_tensor = sklearn.model_selection.train_test_split(testval_tensor,", "stratify=stratify) val_tensor, test_tensor = sklearn.model_selection.train_test_split(testval_tensor, train_size=0.5, random_state=1, shuffle=True, stratify=testval_stratify) return", "torch.stack(means), torch.stack(stds), train_audio_X, \\ val_audio_X, test_audio_X def main(): download() (train_X,", "mean) / (std + 1e-5)) X = torch.stack(out, dim=-1) train_audio_X,", "tensor_name) + '.pt') def download(): base_base_loc = str(here / '../experiments/data')", "= _split_data(audio_X, y) train_X, val_X, test_X = _split_data(X, y) train_y,", "(train_X, val_X, test_X, train_y, val_y, test_y, means, stds, train_audio_X, val_audio_X,", "**tensors): for tensor_name, tensor_value in tensors.items(): torch.save(tensor_value, str(dir / tensor_name)", "X # X is of shape (batch=34975, length=16000, channels=1) X", "loc) with tarfile.open(loc, 'r') as f: f.extractall(base_loc) def _process_data(): base_loc", "means, stds, train_audio_X, val_audio_X, test_audio_X) = _process_data() loc = here", "test_X = _split_data(X, y) train_y, val_y, test_y = _split_data(y, y)", "y) train_y, val_y, test_y = _split_data(y, y) return train_X, val_X,", "'left', 'right', 'on', 'off', 'stop', 'go'): loc = base_loc /", "test_audio_X def main(): download() (train_X, val_X, test_X, train_y, val_y, test_y,", "_save_data(loc, train_X=train_X, val_X=val_X, test_X=test_X, train_y=train_y, val_y=val_y, test_y=test_y, means=means, stds=stds, train_audio_X=train_audio_X,", "0.7/0.15/0.15 train/val/test split (train_tensor, testval_tensor, train_stratify, testval_stratify) = sklearn.model_selection.train_test_split(tensor, stratify,", "test_y = _split_data(y, y) return train_X, val_X, test_X, train_y, val_y,", "train/val/test split (train_tensor, testval_tensor, train_stratify, testval_stratify) = sklearn.model_selection.train_test_split(tensor, stratify, train_size=0.7,", "they fix it audio = audio / 2 ** 15", "X = torch.empty(34975, 16000, 1) y = torch.empty(34975, dtype=torch.long) batch_index", "_process_data(): base_loc = here / '..' / 'experiments' / 'data'", "'.pt') def download(): base_base_loc = str(here / '../experiments/data') if not", "= base_base_loc + '/SpeechCommands' loc = base_loc + '/speech_commands.tar.gz' if", "foldername in ('yes', 'no', 'up', 'down', 'left', 'right', 'on', 'off',", "audio_X = X # X is of shape (batch=34975, length=16000,", "= train_Xi.std() means.append(mean) stds.append(std) out.append((Xi - mean) / (std +", "don't make it automatically - \" \"thus giving you the", "16000, 1) y = torch.empty(34975, dtype=torch.long) batch_index = 0 y_index", "elsewhere if you wish.)\") base_loc = base_base_loc + '/SpeechCommands' loc", "you the opportunity to make it a symlink rather than", "batch_index == 34975, \"batch_index is {}\".format(batch_index) audio_X = X #", "not os.path.exists(base_base_loc): raise RuntimeError(\"data directory does not exist. Please create", "/ 'experiments' / 'data' / 'SpeechCommands' X = torch.empty(34975, 16000,", "+= 1 assert batch_index == 34975, \"batch_index is {}\".format(batch_index) audio_X", "stratify, train_size=0.7, random_state=0, shuffle=True, stratify=stratify) val_tensor, test_tensor = sklearn.model_selection.train_test_split(testval_tensor, train_size=0.5,", "a directory called 'data' in the 'experiments'\" \" directory. (We're", "= torch.empty(34975, dtype=torch.long) batch_index = 0 y_index = 0 for", "= _split_data(y, y) return train_X, val_X, test_X, train_y, val_y, test_y,", "if not os.path.exists(base_loc): os.mkdir(base_loc) urllib.request.urlretrieve('http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz', loc) with tarfile.open(loc, 'r') as", "pathlib import sklearn.model_selection import tarfile import torch import torchaudio import", "'go'): loc = base_loc / foldername for filename in os.listdir(loc):", "y_index batch_index += 1 y_index += 1 assert batch_index ==", "test_audio_X = _split_data(audio_X, y) train_X, val_X, test_X = _split_data(X, y)", "rather than a normal directory, so \" \"that the data", "here / '..' / 'experiments' / 'data' / 'speech_commands_data' if", "val_audio_X, test_audio_X) = _process_data() loc = here / '..' /", "work so we do it manually. # A few samples", "pathlib.Path(__file__).resolve().parent def _split_data(tensor, stratify): # 0.7/0.15/0.15 train/val/test split (train_tensor, testval_tensor,", "for Xi, train_Xi in zip(X.unbind(dim=-1), train_X.unbind(dim=-1)): mean = train_Xi.mean() std", "!= 16000: continue X[batch_index] = audio y[batch_index] = y_index batch_index", "val_audio_X, test_audio_X def main(): download() (train_X, val_X, test_X, train_y, val_y,", "fix it audio = audio / 2 ** 15 #", "str(here / '../experiments/data') if not os.path.exists(base_base_loc): raise RuntimeError(\"data directory does", "stratify=testval_stratify) return train_tensor, val_tensor, test_tensor def _save_data(dir, **tensors): for tensor_name,", "'/speech_commands.tar.gz' if os.path.exists(loc): return if not os.path.exists(base_loc): os.mkdir(base_loc) urllib.request.urlretrieve('http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz', loc)", "urllib.request here = pathlib.Path(__file__).resolve().parent def _split_data(tensor, stratify): # 0.7/0.15/0.15 train/val/test", "== 34975, \"batch_index is {}\".format(batch_index) audio_X = X # X", "in the 'experiments'\" \" directory. (We're going to put a", "/ tensor_name) + '.pt') def download(): base_base_loc = str(here /", "create a directory called 'data' in the 'experiments'\" \" directory.", "\" \"that the data can be stored elsewhere if you", "a lot of data there, so we don't make it", "= torchaudio.load_wav(loc / filename, channels_first=False, normalization=False) # for forward compatbility", "torch.stack(stds), train_audio_X, \\ val_audio_X, test_audio_X def main(): download() (train_X, val_X,", "of data there, so we don't make it automatically -", "crazy reason it requires a gradient, so detach. train_X, _,", "train_X.unbind(dim=-1)): mean = train_Xi.mean() std = train_Xi.std() means.append(mean) stds.append(std) out.append((Xi", "loc = base_loc / foldername for filename in os.listdir(loc): audio,", "giving you the opportunity to make it a symlink rather", "val_X, test_X, train_y, val_y, test_y, means, stds, train_audio_X, val_audio_X, test_audio_X)", "batch_index = 0 y_index = 0 for foldername in ('yes',", "train_audio_X, \\ val_audio_X, test_audio_X def main(): download() (train_X, val_X, test_X,", "+ '/speech_commands.tar.gz' if os.path.exists(loc): return if not os.path.exists(base_loc): os.mkdir(base_loc) urllib.request.urlretrieve('http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz',", "directory, so \" \"that the data can be stored elsewhere", "test_X=test_X, train_y=train_y, val_y=val_y, test_y=test_y, means=means, stds=stds, train_audio_X=train_audio_X, val_audio_X=val_audio_X, test_audio_X=test_audio_X) if", "os.path.exists(base_base_loc): raise RuntimeError(\"data directory does not exist. Please create a", "X is of shape (batch=34975, length=81, channels=40). For some crazy", "so \" \"that the data can be stored elsewhere if", "val_tensor, test_tensor def _save_data(dir, **tensors): for tensor_name, tensor_value in tensors.items():", "stds, train_audio_X, val_audio_X, test_audio_X) = _process_data() loc = here /" ]
[ "= True [cat.products.append(new_prod) for cat in categories] [db.session.add(cat) for cat", "else: return jsonify({\"status\": \"warning\", \"msg\": \"%d products deleted, also %d", "payload = json.loads(data) datetime_format = '%Y-%m-%d %H:%M:%S' product = Product.query.filter(Product.id", "as e: return '{\"status\": \"error\", \"msg\": \"field %s have not", "float(payload['rating'])}) if product.rating > 8.0: product.featured = True if 'items_in_stock'", "[cat.products.append(product_obj) for cat in categories] [db.session.add(cat) for cat in categories]", "\"msg\": \"TypeError occured: check values of fields\"}' except KeyError as", "for cat in recevied_categories: exists = db.session.query(db.exists().where(Category.name == cat.name)).all()[0][0] if", "product.update({'featured': bool(payload['featured'])}) if 'rating' in payload.keys(): product.update({'rating': float(payload['rating'])}) if product.rating", "def create_product(): data = request.get_data().decode('utf-8') payload = json.loads(data) datetime_format =", "in payload.keys(): product.update({'brand': int(payload['brand'])}) if 'categories' in payload.keys(): categories =", "if product.rating > 8.0: product.featured = True if 'items_in_stock' in", "KeyError as e: return '{\"status\": \"error\", \"msg\": \"field %s have", "json.loads(data) datetime_format = '%Y-%m-%d %H:%M:%S' product = Product.query.filter(Product.id == payload['id'])", "product.update({'expiration_date': datetime.strptime(payload['expiration_date'], datetime_format)}) db.session.commit() return jsonify({\"status\": \"ok\", \"msg\": \"product updated\"})", "dict) -> List[Category]: \"\"\" Func to get existing categories objects", "Category.query.filter(Category.name == cat.name).all()[0] categories.append(existing_category) else: categories.append(cat) return categories @products_blueprint.route('/products', methods=['GET'])", "5: return '{\"status\": \"error\", \"msg\": \"categories number must be between", "featured=bool(payload['featured'] if 'featured' in payload.keys() else None), expiration_date=(datetime.strptime(payload['expiration_date'], datetime_format) if", "in categories] db.session.commit() return jsonify({\"status\": \"ok\", \"msg\": \"product received\"}) @products_blueprint.route('/update_product',", "def update_product(): data = request.get_data().decode('utf-8') payload = json.loads(data) datetime_format =", "== cat.name)).all()[0][0] if exists: existing_category = Category.query.filter(Category.name == cat.name).all()[0] categories.append(existing_category)", "1 or len(payload['categories']) > 5: return '{\"status\": \"error\", \"msg\": \"categories", "% products_categories_result}) else: return jsonify({\"status\": \"warning\", \"msg\": \"%d products deleted,", "try: new_prod = Product(name=payload['name'], rating=float(payload['rating']), featured=bool(payload['featured'] if 'featured' in payload.keys()", "[cat.products.append(new_prod) for cat in categories] [db.session.add(cat) for cat in categories]", ":param p: payload of request :return: list of categories \"\"\"", "404 @products_blueprint.route('/delete_product', methods=['DELETE']) def delete_product(): data = request.get_data().decode('utf-8') p =", "return '{\"status\": \"error\", \"msg\": \"no product found with given id\"}',", "= json.loads(data) datetime_format = '%Y-%m-%d %H:%M:%S' product = Product.query.filter(Product.id ==", "datetime import datetime from typing import List from flask import", "'{\"status\": \"error\", \"msg\": \"TypeError occured: check values of fields\"}' except", "Product(name=payload['name'], rating=float(payload['rating']), featured=bool(payload['featured'] if 'featured' in payload.keys() else None), expiration_date=(datetime.strptime(payload['expiration_date'],", "found with given id\"}', 404 @products_blueprint.route('/delete_product', methods=['DELETE']) def delete_product(): data", "cat in categories] [db.session.add(cat) for cat in categories] if 'expiration_date'", "\"msg\": \"field %s have not been found, but is required\"}'", "payload.keys() else None), expiration_date=(datetime.strptime(payload['expiration_date'], datetime_format) if ('expiration_date' in payload.keys()) else", "except TypeError as e: return '{\"status\": \"error\", \"msg\": \"TypeError occured:", "'brand' in payload.keys(): product.update({'brand': int(payload['brand'])}) if 'categories' in payload.keys(): categories", "p = json.loads(data) products_result = Product.query.filter(Product.id == int(p['id'])).delete(synchronize_session=False) products_categories_result =", "products_categories_result}) else: return jsonify({\"status\": \"warning\", \"msg\": \"%d products deleted, also", "categories = [] for cat in recevied_categories: exists = db.session.query(db.exists().where(Category.name", "Blueprint, jsonify, request, json from app.models.products import Product, Category, products_categories", "payload.keys(): product.update({'brand': int(payload['brand'])}) if 'categories' in payload.keys(): categories = create_or_get_categories(payload)", "len(payload['categories']) < 1 or len(payload['categories']) > 5: return '{\"status\": \"error\",", "<reponame>duch94/spark_crud_test from datetime import datetime from typing import List from", "%H:%M:%S' if len(payload['categories']) < 1 or len(payload['categories']) > 5: return", "and 5\"}', 400 categories = create_or_get_categories(payload) try: new_prod = Product(name=payload['name'],", "Product.query.filter(Product.id == int(p['id'])).delete(synchronize_session=False) products_categories_result = db.session.query(products_categories).filter( products_categories.c.product_id == int(p['id'])).delete(synchronize_session=False) db.session.commit()", "'results': [p.serialized for p in Product.query.all()] }) @products_blueprint.route('/create_product', methods=['POST']) def", "('receipt_date' in payload.keys()) else None)) except TypeError as e: return", "received\"}) @products_blueprint.route('/update_product', methods=['PUT']) def update_product(): data = request.get_data().decode('utf-8') payload =", "= Product.query.filter(Product.id == payload['id']) if product: if 'name' in payload.keys():", "\"no product found with given id\"}', 404 @products_blueprint.route('/delete_product', methods=['DELETE']) def", "methods=['PUT']) def update_product(): data = request.get_data().decode('utf-8') payload = json.loads(data) datetime_format", "return jsonify({\"status\": \"warning\", \"msg\": \"%d products deleted, also %d product_categories", "recevied_categories: exists = db.session.query(db.exists().where(Category.name == cat.name)).all()[0][0] if exists: existing_category =", "jsonify, request, json from app.models.products import Product, Category, products_categories from", "[] for cat in recevied_categories: exists = db.session.query(db.exists().where(Category.name == cat.name)).all()[0][0]", "products_categories.c.product_id == int(payload['id'])).delete(synchronize_session=False) product_obj = product.all()[0] [cat.products.append(product_obj) for cat in", "len(payload['categories']) > 5: return '{\"status\": \"error\", \"msg\": \"categories number must", "in categories] [db.session.add(cat) for cat in categories] if 'expiration_date' in", "request.get_data().decode('utf-8') payload = json.loads(data) datetime_format = '%Y-%m-%d %H:%M:%S' if len(payload['categories'])", "('expiration_date' in payload.keys()) else None), brand_id=int(payload['brand_id']), items_in_stock=int(payload['items_in_stock']), receipt_date=(datetime.strptime(payload['receipt_date'], datetime_format) if", "from app.models.products import Product, Category, products_categories from app import db", "categories.append(cat) return categories @products_blueprint.route('/products', methods=['GET']) def get_products(): return jsonify({ 'results':", "cat.name).all()[0] categories.append(existing_category) else: categories.append(cat) return categories @products_blueprint.route('/products', methods=['GET']) def get_products():", "Blueprint('products', __name__) def create_or_get_categories(p: dict) -> List[Category]: \"\"\" Func to", "'receipt_date' in payload.keys(): product.update({'receipt_date': datetime.strptime(payload['receipt_date'], datetime_format)}) if 'brand' in payload.keys():", "400 if new_prod.rating > 8.0: new_prod.featured = True [cat.products.append(new_prod) for", "occured: check values of fields\"}' except KeyError as e: return", "import datetime from typing import List from flask import Blueprint,", "product found with given id\"}', 404 @products_blueprint.route('/delete_product', methods=['DELETE']) def delete_product():", "db.session.commit() if products_result == 1: return jsonify({\"status\": \"ok\", \"msg\": \"product", "None), expiration_date=(datetime.strptime(payload['expiration_date'], datetime_format) if ('expiration_date' in payload.keys()) else None), brand_id=int(payload['brand_id']),", "def create_or_get_categories(p: dict) -> List[Category]: \"\"\" Func to get existing", "db products_blueprint = Blueprint('products', __name__) def create_or_get_categories(p: dict) -> List[Category]:", "existing_category = Category.query.filter(Category.name == cat.name).all()[0] categories.append(existing_category) else: categories.append(cat) return categories", "import Blueprint, jsonify, request, json from app.models.products import Product, Category,", "for cat in categories] [db.session.add(cat) for cat in categories] db.session.commit()", "\"msg\": \"product received\"}) @products_blueprint.route('/update_product', methods=['PUT']) def update_product(): data = request.get_data().decode('utf-8')", "given id\"}', 404 @products_blueprint.route('/delete_product', methods=['DELETE']) def delete_product(): data = request.get_data().decode('utf-8')", "'%Y-%m-%d %H:%M:%S' product = Product.query.filter(Product.id == payload['id']) if product: if", "\"msg\": \"categories number must be between 1 and 5\"}', 400", "return jsonify({\"status\": \"ok\", \"msg\": \"product received\"}) @products_blueprint.route('/update_product', methods=['PUT']) def update_product():", "payload.keys()) else None), brand_id=int(payload['brand_id']), items_in_stock=int(payload['items_in_stock']), receipt_date=(datetime.strptime(payload['receipt_date'], datetime_format) if ('receipt_date' in", "in payload.keys(): product.update({'name': payload['name']}) if 'featured' in payload.keys(): product.update({'featured': bool(payload['featured'])})", "json.loads(data) datetime_format = '%Y-%m-%d %H:%M:%S' if len(payload['categories']) < 1 or", "payload.keys()) else None)) except TypeError as e: return '{\"status\": \"error\",", "== int(p['id'])).delete(synchronize_session=False) db.session.commit() if products_result == 1: return jsonify({\"status\": \"ok\",", "if len(payload['categories']) < 1 or len(payload['categories']) > 5: return '{\"status\":", "datetime_format) if ('receipt_date' in payload.keys()) else None)) except TypeError as", "List from flask import Blueprint, jsonify, request, json from app.models.products", "\"msg\": \"no product found with given id\"}', 404 @products_blueprint.route('/delete_product', methods=['DELETE'])", "in payload.keys(): categories = create_or_get_categories(payload) db.session.query(products_categories).filter( products_categories.c.product_id == int(payload['id'])).delete(synchronize_session=False) product_obj", "'items_in_stock' in payload.keys(): product.update({'items_in_stock': int(payload['items_in_stock'])}) if 'receipt_date' in payload.keys(): product.update({'receipt_date':", "check values of fields\"}' except KeyError as e: return '{\"status\":", "get_products(): return jsonify({ 'results': [p.serialized for p in Product.query.all()] })", "else None), brand_id=int(payload['brand_id']), items_in_stock=int(payload['items_in_stock']), receipt_date=(datetime.strptime(payload['receipt_date'], datetime_format) if ('receipt_date' in payload.keys())", "in recevied_categories: exists = db.session.query(db.exists().where(Category.name == cat.name)).all()[0][0] if exists: existing_category", "or create new otherwise :param p: payload of request :return:", "db.session.query(db.exists().where(Category.name == cat.name)).all()[0][0] if exists: existing_category = Category.query.filter(Category.name == cat.name).all()[0]", "payload.keys(): product.update({'name': payload['name']}) if 'featured' in payload.keys(): product.update({'featured': bool(payload['featured'])}) if", "product_categories relations deleted\" % products_categories_result}) else: return jsonify({\"status\": \"warning\", \"msg\":", "db.session.commit() return jsonify({\"status\": \"ok\", \"msg\": \"product updated\"}) else: return '{\"status\":", "rating=float(payload['rating']), featured=bool(payload['featured'] if 'featured' in payload.keys() else None), expiration_date=(datetime.strptime(payload['expiration_date'], datetime_format)", "> 5: return '{\"status\": \"error\", \"msg\": \"categories number must be", "jsonify({ 'results': [p.serialized for p in Product.query.all()] }) @products_blueprint.route('/create_product', methods=['POST'])", "jsonify({\"status\": \"ok\", \"msg\": \"product deleted, also %d product_categories relations deleted\"", "if 'featured' in payload.keys(): product.update({'featured': bool(payload['featured'])}) if 'rating' in payload.keys():", "categories = create_or_get_categories(payload) try: new_prod = Product(name=payload['name'], rating=float(payload['rating']), featured=bool(payload['featured'] if", "int(payload['items_in_stock'])}) if 'receipt_date' in payload.keys(): product.update({'receipt_date': datetime.strptime(payload['receipt_date'], datetime_format)}) if 'brand'", "new otherwise :param p: payload of request :return: list of", "cat in categories] if 'expiration_date' in payload.keys(): product.update({'expiration_date': datetime.strptime(payload['expiration_date'], datetime_format)})", "= Product.query.filter(Product.id == int(p['id'])).delete(synchronize_session=False) products_categories_result = db.session.query(products_categories).filter( products_categories.c.product_id == int(p['id'])).delete(synchronize_session=False)", "if 'receipt_date' in payload.keys(): product.update({'receipt_date': datetime.strptime(payload['receipt_date'], datetime_format)}) if 'brand' in", "== cat.name).all()[0] categories.append(existing_category) else: categories.append(cat) return categories @products_blueprint.route('/products', methods=['GET']) def", "from typing import List from flask import Blueprint, jsonify, request,", "categories] [db.session.add(cat) for cat in categories] db.session.commit() return jsonify({\"status\": \"ok\",", "List[Category]: \"\"\" Func to get existing categories objects or create", "8.0: product.featured = True if 'items_in_stock' in payload.keys(): product.update({'items_in_stock': int(payload['items_in_stock'])})", "app import db products_blueprint = Blueprint('products', __name__) def create_or_get_categories(p: dict)", "cat in categories] [db.session.add(cat) for cat in categories] db.session.commit() return", "= True if 'items_in_stock' in payload.keys(): product.update({'items_in_stock': int(payload['items_in_stock'])}) if 'receipt_date'", "for cat in categories] [db.session.add(cat) for cat in categories] if", "if new_prod.rating > 8.0: new_prod.featured = True [cat.products.append(new_prod) for cat", "expiration_date=(datetime.strptime(payload['expiration_date'], datetime_format) if ('expiration_date' in payload.keys()) else None), brand_id=int(payload['brand_id']), items_in_stock=int(payload['items_in_stock']),", "categories] if 'expiration_date' in payload.keys(): product.update({'expiration_date': datetime.strptime(payload['expiration_date'], datetime_format)}) db.session.commit() return", "p['categories']] categories = [] for cat in recevied_categories: exists =", "json from app.models.products import Product, Category, products_categories from app import", "__name__) def create_or_get_categories(p: dict) -> List[Category]: \"\"\" Func to get", "== int(p['id'])).delete(synchronize_session=False) products_categories_result = db.session.query(products_categories).filter( products_categories.c.product_id == int(p['id'])).delete(synchronize_session=False) db.session.commit() if", "cat in p['categories']] categories = [] for cat in recevied_categories:", "'{\"status\": \"error\", \"msg\": \"no product found with given id\"}', 404", "must be between 1 and 5\"}', 400 categories = create_or_get_categories(payload)", "except KeyError as e: return '{\"status\": \"error\", \"msg\": \"field %s", "%s have not been found, but is required\"}' % str(e),", "TypeError as e: return '{\"status\": \"error\", \"msg\": \"TypeError occured: check", "product = Product.query.filter(Product.id == payload['id']) if product: if 'name' in", "\"msg\": \"product deleted, also %d product_categories relations deleted\" % products_categories_result})", "'categories' in payload.keys(): categories = create_or_get_categories(payload) db.session.query(products_categories).filter( products_categories.c.product_id == int(payload['id'])).delete(synchronize_session=False)", "if products_result == 1: return jsonify({\"status\": \"ok\", \"msg\": \"product deleted,", "products_result == 1: return jsonify({\"status\": \"ok\", \"msg\": \"product deleted, also", "400 categories = create_or_get_categories(payload) try: new_prod = Product(name=payload['name'], rating=float(payload['rating']), featured=bool(payload['featured']", "= Product(name=payload['name'], rating=float(payload['rating']), featured=bool(payload['featured'] if 'featured' in payload.keys() else None),", "in payload.keys(): product.update({'receipt_date': datetime.strptime(payload['receipt_date'], datetime_format)}) if 'brand' in payload.keys(): product.update({'brand':", "if ('expiration_date' in payload.keys()) else None), brand_id=int(payload['brand_id']), items_in_stock=int(payload['items_in_stock']), receipt_date=(datetime.strptime(payload['receipt_date'], datetime_format)", "import db products_blueprint = Blueprint('products', __name__) def create_or_get_categories(p: dict) ->", "'rating' in payload.keys(): product.update({'rating': float(payload['rating'])}) if product.rating > 8.0: product.featured", "methods=['DELETE']) def delete_product(): data = request.get_data().decode('utf-8') p = json.loads(data) products_result", "bool(payload['featured'])}) if 'rating' in payload.keys(): product.update({'rating': float(payload['rating'])}) if product.rating >", "is required\"}' % str(e), 400 if new_prod.rating > 8.0: new_prod.featured", "def get_products(): return jsonify({ 'results': [p.serialized for p in Product.query.all()]", "get existing categories objects or create new otherwise :param p:", "for p in Product.query.all()] }) @products_blueprint.route('/create_product', methods=['POST']) def create_product(): data", "= request.get_data().decode('utf-8') payload = json.loads(data) datetime_format = '%Y-%m-%d %H:%M:%S' product", "datetime_format)}) db.session.commit() return jsonify({\"status\": \"ok\", \"msg\": \"product updated\"}) else: return", "\"categories number must be between 1 and 5\"}', 400 categories", "int(p['id'])).delete(synchronize_session=False) products_categories_result = db.session.query(products_categories).filter( products_categories.c.product_id == int(p['id'])).delete(synchronize_session=False) db.session.commit() if products_result", "categories.append(existing_category) else: categories.append(cat) return categories @products_blueprint.route('/products', methods=['GET']) def get_products(): return", "return jsonify({\"status\": \"ok\", \"msg\": \"product deleted, also %d product_categories relations", "Product.query.filter(Product.id == payload['id']) if product: if 'name' in payload.keys(): product.update({'name':", "return jsonify({\"status\": \"ok\", \"msg\": \"product updated\"}) else: return '{\"status\": \"error\",", "categories] [db.session.add(cat) for cat in categories] if 'expiration_date' in payload.keys():", "as e: return '{\"status\": \"error\", \"msg\": \"TypeError occured: check values", "int(payload['brand'])}) if 'categories' in payload.keys(): categories = create_or_get_categories(payload) db.session.query(products_categories).filter( products_categories.c.product_id", "'expiration_date' in payload.keys(): product.update({'expiration_date': datetime.strptime(payload['expiration_date'], datetime_format)}) db.session.commit() return jsonify({\"status\": \"ok\",", "payload = json.loads(data) datetime_format = '%Y-%m-%d %H:%M:%S' if len(payload['categories']) <", "of request :return: list of categories \"\"\" recevied_categories: List[Category] =", "categories @products_blueprint.route('/products', methods=['GET']) def get_products(): return jsonify({ 'results': [p.serialized for", "if 'name' in payload.keys(): product.update({'name': payload['name']}) if 'featured' in payload.keys():", "or len(payload['categories']) > 5: return '{\"status\": \"error\", \"msg\": \"categories number", "'{\"status\": \"error\", \"msg\": \"categories number must be between 1 and", "not been found, but is required\"}' % str(e), 400 if", "% str(e), 400 if new_prod.rating > 8.0: new_prod.featured = True", "create new otherwise :param p: payload of request :return: list", "datetime_format = '%Y-%m-%d %H:%M:%S' product = Product.query.filter(Product.id == payload['id']) if", "number must be between 1 and 5\"}', 400 categories =", "cat in recevied_categories: exists = db.session.query(db.exists().where(Category.name == cat.name)).all()[0][0] if exists:", "to get existing categories objects or create new otherwise :param", "= request.get_data().decode('utf-8') p = json.loads(data) products_result = Product.query.filter(Product.id == int(p['id'])).delete(synchronize_session=False)", "else None), expiration_date=(datetime.strptime(payload['expiration_date'], datetime_format) if ('expiration_date' in payload.keys()) else None),", "jsonify({\"status\": \"warning\", \"msg\": \"%d products deleted, also %d product_categories relations", "payload.keys(): product.update({'receipt_date': datetime.strptime(payload['receipt_date'], datetime_format)}) if 'brand' in payload.keys(): product.update({'brand': int(payload['brand'])})", "from datetime import datetime from typing import List from flask", "if 'categories' in payload.keys(): categories = create_or_get_categories(payload) db.session.query(products_categories).filter( products_categories.c.product_id ==", "in payload.keys() else None), expiration_date=(datetime.strptime(payload['expiration_date'], datetime_format) if ('expiration_date' in payload.keys())", "Func to get existing categories objects or create new otherwise", "@products_blueprint.route('/create_product', methods=['POST']) def create_product(): data = request.get_data().decode('utf-8') payload = json.loads(data)", "True [cat.products.append(new_prod) for cat in categories] [db.session.add(cat) for cat in", "of fields\"}' except KeyError as e: return '{\"status\": \"error\", \"msg\":", "= create_or_get_categories(payload) db.session.query(products_categories).filter( products_categories.c.product_id == int(payload['id'])).delete(synchronize_session=False) product_obj = product.all()[0] [cat.products.append(product_obj)", "None), brand_id=int(payload['brand_id']), items_in_stock=int(payload['items_in_stock']), receipt_date=(datetime.strptime(payload['receipt_date'], datetime_format) if ('receipt_date' in payload.keys()) else", "request.get_data().decode('utf-8') p = json.loads(data) products_result = Product.query.filter(Product.id == int(p['id'])).delete(synchronize_session=False) products_categories_result", "datetime from typing import List from flask import Blueprint, jsonify,", "= create_or_get_categories(payload) try: new_prod = Product(name=payload['name'], rating=float(payload['rating']), featured=bool(payload['featured'] if 'featured'", "1: return jsonify({\"status\": \"ok\", \"msg\": \"product deleted, also %d product_categories", "if exists: existing_category = Category.query.filter(Category.name == cat.name).all()[0] categories.append(existing_category) else: categories.append(cat)", "create_or_get_categories(payload) try: new_prod = Product(name=payload['name'], rating=float(payload['rating']), featured=bool(payload['featured'] if 'featured' in", "\"ok\", \"msg\": \"product received\"}) @products_blueprint.route('/update_product', methods=['PUT']) def update_product(): data =", "payload.keys(): product.update({'items_in_stock': int(payload['items_in_stock'])}) if 'receipt_date' in payload.keys(): product.update({'receipt_date': datetime.strptime(payload['receipt_date'], datetime_format)})", "exists: existing_category = Category.query.filter(Category.name == cat.name).all()[0] categories.append(existing_category) else: categories.append(cat) return", "create_or_get_categories(payload) db.session.query(products_categories).filter( products_categories.c.product_id == int(payload['id'])).delete(synchronize_session=False) product_obj = product.all()[0] [cat.products.append(product_obj) for", "new_prod.rating > 8.0: new_prod.featured = True [cat.products.append(new_prod) for cat in", "= '%Y-%m-%d %H:%M:%S' product = Product.query.filter(Product.id == payload['id']) if product:", "also %d product_categories relations deleted\" % products_categories_result}) else: return jsonify({\"status\":", "products deleted, also %d product_categories relations deleted\" % (products_result, products_categories_result)})", "product.all()[0] [cat.products.append(product_obj) for cat in categories] [db.session.add(cat) for cat in", "deleted, also %d product_categories relations deleted\" % products_categories_result}) else: return", "product.update({'items_in_stock': int(payload['items_in_stock'])}) if 'receipt_date' in payload.keys(): product.update({'receipt_date': datetime.strptime(payload['receipt_date'], datetime_format)}) if", "values of fields\"}' except KeyError as e: return '{\"status\": \"error\",", "= [] for cat in recevied_categories: exists = db.session.query(db.exists().where(Category.name ==", "from app import db products_blueprint = Blueprint('products', __name__) def create_or_get_categories(p:", "between 1 and 5\"}', 400 categories = create_or_get_categories(payload) try: new_prod", "db.session.query(products_categories).filter( products_categories.c.product_id == int(payload['id'])).delete(synchronize_session=False) product_obj = product.all()[0] [cat.products.append(product_obj) for cat", "categories = create_or_get_categories(payload) db.session.query(products_categories).filter( products_categories.c.product_id == int(payload['id'])).delete(synchronize_session=False) product_obj = product.all()[0]", "else: categories.append(cat) return categories @products_blueprint.route('/products', methods=['GET']) def get_products(): return jsonify({", "objects or create new otherwise :param p: payload of request", "in Product.query.all()] }) @products_blueprint.route('/create_product', methods=['POST']) def create_product(): data = request.get_data().decode('utf-8')", "return '{\"status\": \"error\", \"msg\": \"TypeError occured: check values of fields\"}'", "be between 1 and 5\"}', 400 categories = create_or_get_categories(payload) try:", "1 and 5\"}', 400 categories = create_or_get_categories(payload) try: new_prod =", "required\"}' % str(e), 400 if new_prod.rating > 8.0: new_prod.featured =", "payload.keys(): product.update({'featured': bool(payload['featured'])}) if 'rating' in payload.keys(): product.update({'rating': float(payload['rating'])}) if", "for cat in categories] if 'expiration_date' in payload.keys(): product.update({'expiration_date': datetime.strptime(payload['expiration_date'],", "Category, products_categories from app import db products_blueprint = Blueprint('products', __name__)", "\"product deleted, also %d product_categories relations deleted\" % products_categories_result}) else:", "datetime_format) if ('expiration_date' in payload.keys()) else None), brand_id=int(payload['brand_id']), items_in_stock=int(payload['items_in_stock']), receipt_date=(datetime.strptime(payload['receipt_date'],", "[Category(name=cat) for cat in p['categories']] categories = [] for cat", "in payload.keys()) else None)) except TypeError as e: return '{\"status\":", "create_product(): data = request.get_data().decode('utf-8') payload = json.loads(data) datetime_format = '%Y-%m-%d", "categories] db.session.commit() return jsonify({\"status\": \"ok\", \"msg\": \"product received\"}) @products_blueprint.route('/update_product', methods=['PUT'])", "> 8.0: product.featured = True if 'items_in_stock' in payload.keys(): product.update({'items_in_stock':", "\"ok\", \"msg\": \"product deleted, also %d product_categories relations deleted\" %", "\"error\", \"msg\": \"TypeError occured: check values of fields\"}' except KeyError", "8.0: new_prod.featured = True [cat.products.append(new_prod) for cat in categories] [db.session.add(cat)", "> 8.0: new_prod.featured = True [cat.products.append(new_prod) for cat in categories]", "brand_id=int(payload['brand_id']), items_in_stock=int(payload['items_in_stock']), receipt_date=(datetime.strptime(payload['receipt_date'], datetime_format) if ('receipt_date' in payload.keys()) else None))", "products_categories.c.product_id == int(p['id'])).delete(synchronize_session=False) db.session.commit() if products_result == 1: return jsonify({\"status\":", "Product.query.all()] }) @products_blueprint.route('/create_product', methods=['POST']) def create_product(): data = request.get_data().decode('utf-8') payload", "= json.loads(data) products_result = Product.query.filter(Product.id == int(p['id'])).delete(synchronize_session=False) products_categories_result = db.session.query(products_categories).filter(", "id\"}', 404 @products_blueprint.route('/delete_product', methods=['DELETE']) def delete_product(): data = request.get_data().decode('utf-8') p", "json.loads(data) products_result = Product.query.filter(Product.id == int(p['id'])).delete(synchronize_session=False) products_categories_result = db.session.query(products_categories).filter( products_categories.c.product_id", "return '{\"status\": \"error\", \"msg\": \"categories number must be between 1", "if 'rating' in payload.keys(): product.update({'rating': float(payload['rating'])}) if product.rating > 8.0:", "products_categories_result = db.session.query(products_categories).filter( products_categories.c.product_id == int(p['id'])).delete(synchronize_session=False) db.session.commit() if products_result ==", "of categories \"\"\" recevied_categories: List[Category] = [Category(name=cat) for cat in", "create_or_get_categories(p: dict) -> List[Category]: \"\"\" Func to get existing categories", "payload.keys(): product.update({'expiration_date': datetime.strptime(payload['expiration_date'], datetime_format)}) db.session.commit() return jsonify({\"status\": \"ok\", \"msg\": \"product", "in p['categories']] categories = [] for cat in recevied_categories: exists", "payload.keys(): product.update({'rating': float(payload['rating'])}) if product.rating > 8.0: product.featured = True", "product: if 'name' in payload.keys(): product.update({'name': payload['name']}) if 'featured' in", "existing categories objects or create new otherwise :param p: payload", "== payload['id']) if product: if 'name' in payload.keys(): product.update({'name': payload['name']})", "exists = db.session.query(db.exists().where(Category.name == cat.name)).all()[0][0] if exists: existing_category = Category.query.filter(Category.name", "receipt_date=(datetime.strptime(payload['receipt_date'], datetime_format) if ('receipt_date' in payload.keys()) else None)) except TypeError", "\"error\", \"msg\": \"categories number must be between 1 and 5\"}',", "if 'items_in_stock' in payload.keys(): product.update({'items_in_stock': int(payload['items_in_stock'])}) if 'receipt_date' in payload.keys():", "'featured' in payload.keys(): product.update({'featured': bool(payload['featured'])}) if 'rating' in payload.keys(): product.update({'rating':", "@products_blueprint.route('/update_product', methods=['PUT']) def update_product(): data = request.get_data().decode('utf-8') payload = json.loads(data)", "payload['id']) if product: if 'name' in payload.keys(): product.update({'name': payload['name']}) if", "request, json from app.models.products import Product, Category, products_categories from app", "\"product received\"}) @products_blueprint.route('/update_product', methods=['PUT']) def update_product(): data = request.get_data().decode('utf-8') payload", "else None)) except TypeError as e: return '{\"status\": \"error\", \"msg\":", "product.featured = True if 'items_in_stock' in payload.keys(): product.update({'items_in_stock': int(payload['items_in_stock'])}) if", "< 1 or len(payload['categories']) > 5: return '{\"status\": \"error\", \"msg\":", "request.get_data().decode('utf-8') payload = json.loads(data) datetime_format = '%Y-%m-%d %H:%M:%S' product =", "'name' in payload.keys(): product.update({'name': payload['name']}) if 'featured' in payload.keys(): product.update({'featured':", "\"\"\" recevied_categories: List[Category] = [Category(name=cat) for cat in p['categories']] categories", "return categories @products_blueprint.route('/products', methods=['GET']) def get_products(): return jsonify({ 'results': [p.serialized", "@products_blueprint.route('/products', methods=['GET']) def get_products(): return jsonify({ 'results': [p.serialized for p", "= json.loads(data) datetime_format = '%Y-%m-%d %H:%M:%S' if len(payload['categories']) < 1", "have not been found, but is required\"}' % str(e), 400", "product.update({'name': payload['name']}) if 'featured' in payload.keys(): product.update({'featured': bool(payload['featured'])}) if 'rating'", "items_in_stock=int(payload['items_in_stock']), receipt_date=(datetime.strptime(payload['receipt_date'], datetime_format) if ('receipt_date' in payload.keys()) else None)) except", "products_blueprint = Blueprint('products', __name__) def create_or_get_categories(p: dict) -> List[Category]: \"\"\"", "product.update({'rating': float(payload['rating'])}) if product.rating > 8.0: product.featured = True if", "\"error\", \"msg\": \"field %s have not been found, but is", "\"TypeError occured: check values of fields\"}' except KeyError as e:", "Product, Category, products_categories from app import db products_blueprint = Blueprint('products',", "categories \"\"\" recevied_categories: List[Category] = [Category(name=cat) for cat in p['categories']]", "return jsonify({ 'results': [p.serialized for p in Product.query.all()] }) @products_blueprint.route('/create_product',", "def delete_product(): data = request.get_data().decode('utf-8') p = json.loads(data) products_result =", "== 1: return jsonify({\"status\": \"ok\", \"msg\": \"product deleted, also %d", "request :return: list of categories \"\"\" recevied_categories: List[Category] = [Category(name=cat)", "recevied_categories: List[Category] = [Category(name=cat) for cat in p['categories']] categories =", "\"msg\": \"product updated\"}) else: return '{\"status\": \"error\", \"msg\": \"no product", "typing import List from flask import Blueprint, jsonify, request, json", "'{\"status\": \"error\", \"msg\": \"field %s have not been found, but", "new_prod.featured = True [cat.products.append(new_prod) for cat in categories] [db.session.add(cat) for", "in payload.keys(): product.update({'rating': float(payload['rating'])}) if product.rating > 8.0: product.featured =", "from flask import Blueprint, jsonify, request, json from app.models.products import", "p: payload of request :return: list of categories \"\"\" recevied_categories:", "= db.session.query(products_categories).filter( products_categories.c.product_id == int(p['id'])).delete(synchronize_session=False) db.session.commit() if products_result == 1:", "= product.all()[0] [cat.products.append(product_obj) for cat in categories] [db.session.add(cat) for cat", "been found, but is required\"}' % str(e), 400 if new_prod.rating", "product.rating > 8.0: product.featured = True if 'items_in_stock' in payload.keys():", "list of categories \"\"\" recevied_categories: List[Category] = [Category(name=cat) for cat", "%H:%M:%S' product = Product.query.filter(Product.id == payload['id']) if product: if 'name'", "datetime_format = '%Y-%m-%d %H:%M:%S' if len(payload['categories']) < 1 or len(payload['categories'])", "for cat in categories] db.session.commit() return jsonify({\"status\": \"ok\", \"msg\": \"product", "\"warning\", \"msg\": \"%d products deleted, also %d product_categories relations deleted\"", "found, but is required\"}' % str(e), 400 if new_prod.rating >", "if 'expiration_date' in payload.keys(): product.update({'expiration_date': datetime.strptime(payload['expiration_date'], datetime_format)}) db.session.commit() return jsonify({\"status\":", "methods=['GET']) def get_products(): return jsonify({ 'results': [p.serialized for p in", "= request.get_data().decode('utf-8') payload = json.loads(data) datetime_format = '%Y-%m-%d %H:%M:%S' if", "data = request.get_data().decode('utf-8') p = json.loads(data) products_result = Product.query.filter(Product.id ==", "relations deleted\" % products_categories_result}) else: return jsonify({\"status\": \"warning\", \"msg\": \"%d", "new_prod = Product(name=payload['name'], rating=float(payload['rating']), featured=bool(payload['featured'] if 'featured' in payload.keys() else", "product.update({'brand': int(payload['brand'])}) if 'categories' in payload.keys(): categories = create_or_get_categories(payload) db.session.query(products_categories).filter(", "= [Category(name=cat) for cat in p['categories']] categories = [] for", "\"\"\" Func to get existing categories objects or create new", "return '{\"status\": \"error\", \"msg\": \"field %s have not been found,", "if product: if 'name' in payload.keys(): product.update({'name': payload['name']}) if 'featured'", "else: return '{\"status\": \"error\", \"msg\": \"no product found with given", "\"product updated\"}) else: return '{\"status\": \"error\", \"msg\": \"no product found", "for cat in p['categories']] categories = [] for cat in", "in payload.keys()) else None), brand_id=int(payload['brand_id']), items_in_stock=int(payload['items_in_stock']), receipt_date=(datetime.strptime(payload['receipt_date'], datetime_format) if ('receipt_date'", "deleted\" % products_categories_result}) else: return jsonify({\"status\": \"warning\", \"msg\": \"%d products", ":return: list of categories \"\"\" recevied_categories: List[Category] = [Category(name=cat) for", "\"field %s have not been found, but is required\"}' %", "import List from flask import Blueprint, jsonify, request, json from", "[db.session.add(cat) for cat in categories] db.session.commit() return jsonify({\"status\": \"ok\", \"msg\":", "categories objects or create new otherwise :param p: payload of", "if 'brand' in payload.keys(): product.update({'brand': int(payload['brand'])}) if 'categories' in payload.keys():", "updated\"}) else: return '{\"status\": \"error\", \"msg\": \"no product found with", "products_categories from app import db products_blueprint = Blueprint('products', __name__) def", "with given id\"}', 404 @products_blueprint.route('/delete_product', methods=['DELETE']) def delete_product(): data =", "payload.keys(): categories = create_or_get_categories(payload) db.session.query(products_categories).filter( products_categories.c.product_id == int(payload['id'])).delete(synchronize_session=False) product_obj =", "delete_product(): data = request.get_data().decode('utf-8') p = json.loads(data) products_result = Product.query.filter(Product.id", "db.session.query(products_categories).filter( products_categories.c.product_id == int(p['id'])).delete(synchronize_session=False) db.session.commit() if products_result == 1: return", "%d product_categories relations deleted\" % products_categories_result}) else: return jsonify({\"status\": \"warning\",", "cat in categories] db.session.commit() return jsonify({\"status\": \"ok\", \"msg\": \"product received\"})", "int(p['id'])).delete(synchronize_session=False) db.session.commit() if products_result == 1: return jsonify({\"status\": \"ok\", \"msg\":", "if ('receipt_date' in payload.keys()) else None)) except TypeError as e:", "5\"}', 400 categories = create_or_get_categories(payload) try: new_prod = Product(name=payload['name'], rating=float(payload['rating']),", "}) @products_blueprint.route('/create_product', methods=['POST']) def create_product(): data = request.get_data().decode('utf-8') payload =", "db.session.commit() return jsonify({\"status\": \"ok\", \"msg\": \"product received\"}) @products_blueprint.route('/update_product', methods=['PUT']) def", "payload of request :return: list of categories \"\"\" recevied_categories: List[Category]", "in payload.keys(): product.update({'expiration_date': datetime.strptime(payload['expiration_date'], datetime_format)}) db.session.commit() return jsonify({\"status\": \"ok\", \"msg\":", "== int(payload['id'])).delete(synchronize_session=False) product_obj = product.all()[0] [cat.products.append(product_obj) for cat in categories]", "in categories] [db.session.add(cat) for cat in categories] db.session.commit() return jsonify({\"status\":", "datetime.strptime(payload['expiration_date'], datetime_format)}) db.session.commit() return jsonify({\"status\": \"ok\", \"msg\": \"product updated\"}) else:", "-> List[Category]: \"\"\" Func to get existing categories objects or", "jsonify({\"status\": \"ok\", \"msg\": \"product updated\"}) else: return '{\"status\": \"error\", \"msg\":", "cat.name)).all()[0][0] if exists: existing_category = Category.query.filter(Category.name == cat.name).all()[0] categories.append(existing_category) else:", "datetime_format)}) if 'brand' in payload.keys(): product.update({'brand': int(payload['brand'])}) if 'categories' in", "[p.serialized for p in Product.query.all()] }) @products_blueprint.route('/create_product', methods=['POST']) def create_product():", "[db.session.add(cat) for cat in categories] if 'expiration_date' in payload.keys(): product.update({'expiration_date':", "\"%d products deleted, also %d product_categories relations deleted\" % (products_result,", "product_obj = product.all()[0] [cat.products.append(product_obj) for cat in categories] [db.session.add(cat) for", "jsonify({\"status\": \"ok\", \"msg\": \"product received\"}) @products_blueprint.route('/update_product', methods=['PUT']) def update_product(): data", "in categories] if 'expiration_date' in payload.keys(): product.update({'expiration_date': datetime.strptime(payload['expiration_date'], datetime_format)}) db.session.commit()", "None)) except TypeError as e: return '{\"status\": \"error\", \"msg\": \"TypeError", "int(payload['id'])).delete(synchronize_session=False) product_obj = product.all()[0] [cat.products.append(product_obj) for cat in categories] [db.session.add(cat)", "\"error\", \"msg\": \"no product found with given id\"}', 404 @products_blueprint.route('/delete_product',", "in payload.keys(): product.update({'featured': bool(payload['featured'])}) if 'rating' in payload.keys(): product.update({'rating': float(payload['rating'])})", "e: return '{\"status\": \"error\", \"msg\": \"field %s have not been", "\"ok\", \"msg\": \"product updated\"}) else: return '{\"status\": \"error\", \"msg\": \"no", "in payload.keys(): product.update({'items_in_stock': int(payload['items_in_stock'])}) if 'receipt_date' in payload.keys(): product.update({'receipt_date': datetime.strptime(payload['receipt_date'],", "products_result = Product.query.filter(Product.id == int(p['id'])).delete(synchronize_session=False) products_categories_result = db.session.query(products_categories).filter( products_categories.c.product_id ==", "= Blueprint('products', __name__) def create_or_get_categories(p: dict) -> List[Category]: \"\"\" Func", "= '%Y-%m-%d %H:%M:%S' if len(payload['categories']) < 1 or len(payload['categories']) >", "True if 'items_in_stock' in payload.keys(): product.update({'items_in_stock': int(payload['items_in_stock'])}) if 'receipt_date' in", "= db.session.query(db.exists().where(Category.name == cat.name)).all()[0][0] if exists: existing_category = Category.query.filter(Category.name ==", "\"msg\": \"%d products deleted, also %d product_categories relations deleted\" %", "flask import Blueprint, jsonify, request, json from app.models.products import Product,", "update_product(): data = request.get_data().decode('utf-8') payload = json.loads(data) datetime_format = '%Y-%m-%d", "import Product, Category, products_categories from app import db products_blueprint =", "fields\"}' except KeyError as e: return '{\"status\": \"error\", \"msg\": \"field", "datetime.strptime(payload['receipt_date'], datetime_format)}) if 'brand' in payload.keys(): product.update({'brand': int(payload['brand'])}) if 'categories'", "'featured' in payload.keys() else None), expiration_date=(datetime.strptime(payload['expiration_date'], datetime_format) if ('expiration_date' in", "if 'featured' in payload.keys() else None), expiration_date=(datetime.strptime(payload['expiration_date'], datetime_format) if ('expiration_date'", "data = request.get_data().decode('utf-8') payload = json.loads(data) datetime_format = '%Y-%m-%d %H:%M:%S'", "p in Product.query.all()] }) @products_blueprint.route('/create_product', methods=['POST']) def create_product(): data =", "methods=['POST']) def create_product(): data = request.get_data().decode('utf-8') payload = json.loads(data) datetime_format", "@products_blueprint.route('/delete_product', methods=['DELETE']) def delete_product(): data = request.get_data().decode('utf-8') p = json.loads(data)", "'%Y-%m-%d %H:%M:%S' if len(payload['categories']) < 1 or len(payload['categories']) > 5:", "app.models.products import Product, Category, products_categories from app import db products_blueprint", "otherwise :param p: payload of request :return: list of categories", "payload['name']}) if 'featured' in payload.keys(): product.update({'featured': bool(payload['featured'])}) if 'rating' in", "str(e), 400 if new_prod.rating > 8.0: new_prod.featured = True [cat.products.append(new_prod)", "List[Category] = [Category(name=cat) for cat in p['categories']] categories = []", "= Category.query.filter(Category.name == cat.name).all()[0] categories.append(existing_category) else: categories.append(cat) return categories @products_blueprint.route('/products',", "but is required\"}' % str(e), 400 if new_prod.rating > 8.0:", "product.update({'receipt_date': datetime.strptime(payload['receipt_date'], datetime_format)}) if 'brand' in payload.keys(): product.update({'brand': int(payload['brand'])}) if", "e: return '{\"status\": \"error\", \"msg\": \"TypeError occured: check values of" ]
[ "from util.config.validators import ConfigValidationException from util.config.validators.validate_bitbucket_trigger import BitbucketTriggerValidator from test.fixtures", "url_hit = [False] @urlmatch(netloc=r\"bitbucket.org\") def handler(url, request): url_hit[0] = True", "from util.config import URLSchemeAndHostname from util.config.validator import ValidatorContext from util.config.validators", "\"foo\"}})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {\"CONSUMER_SECRET\": \"foo\"}})), ], ) def test_validate_invalid_bitbucket_trigger_config(unvalidated_config, app): validator", "ValidatorContext from util.config.validators import ConfigValidationException from util.config.validators.validate_bitbucket_trigger import BitbucketTriggerValidator from", "test_validate_bitbucket_trigger(app): url_hit = [False] @urlmatch(netloc=r\"bitbucket.org\") def handler(url, request): url_hit[0] =", "@pytest.mark.parametrize( \"unvalidated_config\", [ (ValidatorContext({})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {}})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {\"CONSUMER_KEY\": \"foo\"}})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\":", "BitbucketTriggerValidator() url_scheme_and_hostname = URLSchemeAndHostname(\"http\", \"localhost:5000\") unvalidated_config = ValidatorContext( { \"BITBUCKET_TRIGGER_CONFIG\":", "validator.validate(unvalidated_config) def test_validate_bitbucket_trigger(app): url_hit = [False] @urlmatch(netloc=r\"bitbucket.org\") def handler(url, request):", "ValidatorContext( { \"BITBUCKET_TRIGGER_CONFIG\": { \"CONSUMER_KEY\": \"foo\", \"CONSUMER_SECRET\": \"bar\", }, },", "@urlmatch(netloc=r\"bitbucket.org\") def handler(url, request): url_hit[0] = True return { \"status_code\":", "import BitbucketTriggerValidator from test.fixtures import * @pytest.mark.parametrize( \"unvalidated_config\", [ (ValidatorContext({})),", "(ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {\"CONSUMER_SECRET\": \"foo\"}})), ], ) def test_validate_invalid_bitbucket_trigger_config(unvalidated_config, app): validator =", "= URLSchemeAndHostname(\"http\", \"localhost:5000\") unvalidated_config = ValidatorContext( { \"BITBUCKET_TRIGGER_CONFIG\": { \"CONSUMER_KEY\":", "\"foo\"}})), ], ) def test_validate_invalid_bitbucket_trigger_config(unvalidated_config, app): validator = BitbucketTriggerValidator() with", "pytest.raises(ConfigValidationException): validator.validate(unvalidated_config) def test_validate_bitbucket_trigger(app): url_hit = [False] @urlmatch(netloc=r\"bitbucket.org\") def handler(url,", "{}})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {\"CONSUMER_KEY\": \"foo\"}})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {\"CONSUMER_SECRET\": \"foo\"}})), ], ) def", "], ) def test_validate_invalid_bitbucket_trigger_config(unvalidated_config, app): validator = BitbucketTriggerValidator() with pytest.raises(ConfigValidationException):", "URLSchemeAndHostname from util.config.validator import ValidatorContext from util.config.validators import ConfigValidationException from", "test_validate_invalid_bitbucket_trigger_config(unvalidated_config, app): validator = BitbucketTriggerValidator() with pytest.raises(ConfigValidationException): validator.validate(unvalidated_config) def test_validate_bitbucket_trigger(app):", "[ (ValidatorContext({})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {}})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {\"CONSUMER_KEY\": \"foo\"}})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {\"CONSUMER_SECRET\": \"foo\"}})),", "pytest from httmock import urlmatch, HTTMock from util.config import URLSchemeAndHostname", "True return { \"status_code\": 200, \"content\": \"oauth_token=foo&oauth_token_secret=bar\", } with HTTMock(handler):", "\"localhost:5000\") unvalidated_config = ValidatorContext( { \"BITBUCKET_TRIGGER_CONFIG\": { \"CONSUMER_KEY\": \"foo\", \"CONSUMER_SECRET\":", "\"BITBUCKET_TRIGGER_CONFIG\": { \"CONSUMER_KEY\": \"foo\", \"CONSUMER_SECRET\": \"bar\", }, }, url_scheme_and_hostname=url_scheme_and_hostname, )", "(ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {}})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {\"CONSUMER_KEY\": \"foo\"}})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {\"CONSUMER_SECRET\": \"foo\"}})), ], )", "{ \"CONSUMER_KEY\": \"foo\", \"CONSUMER_SECRET\": \"bar\", }, }, url_scheme_and_hostname=url_scheme_and_hostname, ) validator.validate(unvalidated_config)", "request): url_hit[0] = True return { \"status_code\": 200, \"content\": \"oauth_token=foo&oauth_token_secret=bar\",", "= ValidatorContext( { \"BITBUCKET_TRIGGER_CONFIG\": { \"CONSUMER_KEY\": \"foo\", \"CONSUMER_SECRET\": \"bar\", },", "(ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {\"CONSUMER_KEY\": \"foo\"}})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {\"CONSUMER_SECRET\": \"foo\"}})), ], ) def test_validate_invalid_bitbucket_trigger_config(unvalidated_config,", "util.config.validator import ValidatorContext from util.config.validators import ConfigValidationException from util.config.validators.validate_bitbucket_trigger import", "test.fixtures import * @pytest.mark.parametrize( \"unvalidated_config\", [ (ValidatorContext({})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {}})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\":", "util.config.validators import ConfigValidationException from util.config.validators.validate_bitbucket_trigger import BitbucketTriggerValidator from test.fixtures import", "app): validator = BitbucketTriggerValidator() with pytest.raises(ConfigValidationException): validator.validate(unvalidated_config) def test_validate_bitbucket_trigger(app): url_hit", "BitbucketTriggerValidator() with pytest.raises(ConfigValidationException): validator.validate(unvalidated_config) def test_validate_bitbucket_trigger(app): url_hit = [False] @urlmatch(netloc=r\"bitbucket.org\")", "import URLSchemeAndHostname from util.config.validator import ValidatorContext from util.config.validators import ConfigValidationException", "HTTMock(handler): validator = BitbucketTriggerValidator() url_scheme_and_hostname = URLSchemeAndHostname(\"http\", \"localhost:5000\") unvalidated_config =", "from util.config.validators.validate_bitbucket_trigger import BitbucketTriggerValidator from test.fixtures import * @pytest.mark.parametrize( \"unvalidated_config\",", "util.config.validators.validate_bitbucket_trigger import BitbucketTriggerValidator from test.fixtures import * @pytest.mark.parametrize( \"unvalidated_config\", [", "with HTTMock(handler): validator = BitbucketTriggerValidator() url_scheme_and_hostname = URLSchemeAndHostname(\"http\", \"localhost:5000\") unvalidated_config", "= [False] @urlmatch(netloc=r\"bitbucket.org\") def handler(url, request): url_hit[0] = True return", "def handler(url, request): url_hit[0] = True return { \"status_code\": 200,", "import pytest from httmock import urlmatch, HTTMock from util.config import", "from httmock import urlmatch, HTTMock from util.config import URLSchemeAndHostname from", "\"CONSUMER_KEY\": \"foo\", \"CONSUMER_SECRET\": \"bar\", }, }, url_scheme_and_hostname=url_scheme_and_hostname, ) validator.validate(unvalidated_config) assert", "unvalidated_config = ValidatorContext( { \"BITBUCKET_TRIGGER_CONFIG\": { \"CONSUMER_KEY\": \"foo\", \"CONSUMER_SECRET\": \"bar\",", "ConfigValidationException from util.config.validators.validate_bitbucket_trigger import BitbucketTriggerValidator from test.fixtures import * @pytest.mark.parametrize(", "handler(url, request): url_hit[0] = True return { \"status_code\": 200, \"content\":", "{ \"BITBUCKET_TRIGGER_CONFIG\": { \"CONSUMER_KEY\": \"foo\", \"CONSUMER_SECRET\": \"bar\", }, }, url_scheme_and_hostname=url_scheme_and_hostname,", "= True return { \"status_code\": 200, \"content\": \"oauth_token=foo&oauth_token_secret=bar\", } with", "[False] @urlmatch(netloc=r\"bitbucket.org\") def handler(url, request): url_hit[0] = True return {", "(ValidatorContext({})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {}})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {\"CONSUMER_KEY\": \"foo\"}})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {\"CONSUMER_SECRET\": \"foo\"}})), ],", "import ConfigValidationException from util.config.validators.validate_bitbucket_trigger import BitbucketTriggerValidator from test.fixtures import *", ") def test_validate_invalid_bitbucket_trigger_config(unvalidated_config, app): validator = BitbucketTriggerValidator() with pytest.raises(ConfigValidationException): validator.validate(unvalidated_config)", "\"unvalidated_config\", [ (ValidatorContext({})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {}})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {\"CONSUMER_KEY\": \"foo\"}})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {\"CONSUMER_SECRET\":", "{\"CONSUMER_KEY\": \"foo\"}})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {\"CONSUMER_SECRET\": \"foo\"}})), ], ) def test_validate_invalid_bitbucket_trigger_config(unvalidated_config, app):", "= BitbucketTriggerValidator() with pytest.raises(ConfigValidationException): validator.validate(unvalidated_config) def test_validate_bitbucket_trigger(app): url_hit = [False]", "validator = BitbucketTriggerValidator() url_scheme_and_hostname = URLSchemeAndHostname(\"http\", \"localhost:5000\") unvalidated_config = ValidatorContext(", "URLSchemeAndHostname(\"http\", \"localhost:5000\") unvalidated_config = ValidatorContext( { \"BITBUCKET_TRIGGER_CONFIG\": { \"CONSUMER_KEY\": \"foo\",", "def test_validate_invalid_bitbucket_trigger_config(unvalidated_config, app): validator = BitbucketTriggerValidator() with pytest.raises(ConfigValidationException): validator.validate(unvalidated_config) def", "import urlmatch, HTTMock from util.config import URLSchemeAndHostname from util.config.validator import", "= BitbucketTriggerValidator() url_scheme_and_hostname = URLSchemeAndHostname(\"http\", \"localhost:5000\") unvalidated_config = ValidatorContext( {", "import ValidatorContext from util.config.validators import ConfigValidationException from util.config.validators.validate_bitbucket_trigger import BitbucketTriggerValidator", "from test.fixtures import * @pytest.mark.parametrize( \"unvalidated_config\", [ (ValidatorContext({})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {}})),", "util.config import URLSchemeAndHostname from util.config.validator import ValidatorContext from util.config.validators import", "\"status_code\": 200, \"content\": \"oauth_token=foo&oauth_token_secret=bar\", } with HTTMock(handler): validator = BitbucketTriggerValidator()", "with pytest.raises(ConfigValidationException): validator.validate(unvalidated_config) def test_validate_bitbucket_trigger(app): url_hit = [False] @urlmatch(netloc=r\"bitbucket.org\") def", "{ \"status_code\": 200, \"content\": \"oauth_token=foo&oauth_token_secret=bar\", } with HTTMock(handler): validator =", "{\"CONSUMER_SECRET\": \"foo\"}})), ], ) def test_validate_invalid_bitbucket_trigger_config(unvalidated_config, app): validator = BitbucketTriggerValidator()", "return { \"status_code\": 200, \"content\": \"oauth_token=foo&oauth_token_secret=bar\", } with HTTMock(handler): validator", "HTTMock from util.config import URLSchemeAndHostname from util.config.validator import ValidatorContext from", "import * @pytest.mark.parametrize( \"unvalidated_config\", [ (ValidatorContext({})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {}})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {\"CONSUMER_KEY\":", "def test_validate_bitbucket_trigger(app): url_hit = [False] @urlmatch(netloc=r\"bitbucket.org\") def handler(url, request): url_hit[0]", "200, \"content\": \"oauth_token=foo&oauth_token_secret=bar\", } with HTTMock(handler): validator = BitbucketTriggerValidator() url_scheme_and_hostname", "* @pytest.mark.parametrize( \"unvalidated_config\", [ (ValidatorContext({})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {}})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\": {\"CONSUMER_KEY\": \"foo\"}})),", "urlmatch, HTTMock from util.config import URLSchemeAndHostname from util.config.validator import ValidatorContext", "httmock import urlmatch, HTTMock from util.config import URLSchemeAndHostname from util.config.validator", "\"oauth_token=foo&oauth_token_secret=bar\", } with HTTMock(handler): validator = BitbucketTriggerValidator() url_scheme_and_hostname = URLSchemeAndHostname(\"http\",", "url_scheme_and_hostname = URLSchemeAndHostname(\"http\", \"localhost:5000\") unvalidated_config = ValidatorContext( { \"BITBUCKET_TRIGGER_CONFIG\": {", "BitbucketTriggerValidator from test.fixtures import * @pytest.mark.parametrize( \"unvalidated_config\", [ (ValidatorContext({})), (ValidatorContext({\"BITBUCKET_TRIGGER_CONFIG\":", "\"foo\", \"CONSUMER_SECRET\": \"bar\", }, }, url_scheme_and_hostname=url_scheme_and_hostname, ) validator.validate(unvalidated_config) assert url_hit[0]", "validator = BitbucketTriggerValidator() with pytest.raises(ConfigValidationException): validator.validate(unvalidated_config) def test_validate_bitbucket_trigger(app): url_hit =", "from util.config.validator import ValidatorContext from util.config.validators import ConfigValidationException from util.config.validators.validate_bitbucket_trigger", "url_hit[0] = True return { \"status_code\": 200, \"content\": \"oauth_token=foo&oauth_token_secret=bar\", }", "} with HTTMock(handler): validator = BitbucketTriggerValidator() url_scheme_and_hostname = URLSchemeAndHostname(\"http\", \"localhost:5000\")", "\"content\": \"oauth_token=foo&oauth_token_secret=bar\", } with HTTMock(handler): validator = BitbucketTriggerValidator() url_scheme_and_hostname =" ]
[ "probabilty') plt.legend(loc='best') plt.text(2.4,0.2,r'$k=1000$') #plt.text(1.1,0.5,r'$|\\int_{-R}^{R}e^{i k s(x)}dx|^2$',fontsize=20) plt.savefig('refraction_v3',dpi=200) plt.show() #N=20 #", "xa=-ya*np.tan(alpha) yb=-1.0 xb=-yb*np.tan(beta) def s(x): return n1*np.sqrt((xa-x)**2+ya**2)+n2*np.sqrt((xb-x)**2+yb**2) def kernel(xa,xb): return", "#plt.xlabel(r'Integration Interval $ \\Delta x$') ##plt.axis([0,10,0,1.1]) #plt.legend(loc='best') ##plt.savefig('refraction',dpi=200) #plt.show() #x=np.linspace(-5,5,100)", "x,dx=np.linspace(0.01,R,N,retstep=True) real=np.empty(N) imag=np.empty(N) real[0]=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0] imag[0]=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0] for", "r1=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0] r2=scipy.integrate.quad(lambda x: np.cos(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0] real[i]=real[i-1]+r1+r2 i1=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0]", "16:51:16 2019 @author: Silvan \"\"\" import numpy as np import", "import scipy import matplotlib.pyplot as plt k=1000 n1=2.0 n2=1.0 alpha=np.pi/6.0", "real[0]=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0] imag[0]=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0] for i in range(1,N):", "# #P=np.ones(N) # #for i in range(N): # print(i+1) #", "x: np.cos(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0] r2=scipy.integrate.quad(lambda x: np.cos(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0] real[i]=real[i-1]+r1+r2 i1=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0] i2=scipy.integrate.quad(lambda", "\\Delta x$') ##plt.axis([0,10,0,1.1]) #plt.legend(loc='best') ##plt.savefig('refraction',dpi=200) #plt.show() #x=np.linspace(-5,5,100) # #plt.figure(2) #plt.plot(x,s(x))", "i1=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0] i2=scipy.integrate.quad(lambda x: np.sin(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0] imag[i]=imag[i-1]+i1+i2 return np.sqrt(real**2+imag**2),x,real,imag K2,x,r,i=K(3)", "range(N): # print(i+1) # P[i]=trans_amp(dx[i]) # # #plt.figure(1) #plt.plot(dx,P/np.mean(P[20:])) #plt.text(4.0,0.5,r'$|\\int_{-\\Delta", "#plt.errorbar(x,K2/M,0.1*K2/M) plt.xlabel(r'Integration range $R$') plt.ylabel('Detection probabilty') plt.legend(loc='best') plt.text(2.4,0.2,r'$k=1000$') #plt.text(1.1,0.5,r'$|\\int_{-R}^{R}e^{i k", "range $R$') plt.ylabel('Detection probabilty') plt.legend(loc='best') plt.text(2.4,0.2,r'$k=1000$') #plt.text(1.1,0.5,r'$|\\int_{-R}^{R}e^{i k s(x)}dx|^2$',fontsize=20) plt.savefig('refraction_v3',dpi=200)", "np.sqrt(real**2+imag**2),x,real,imag K2,x,r,i=K(3) M=np.mean(K2[25:]) plt.plot(x,K2/M,label=r'$|\\int_{-R}^{R}e^{i k s(x)}dx|^2$') #plt.errorbar(x,K2/M,0.1*K2/M) plt.xlabel(r'Integration range $R$')", "yb=-1.0 xb=-yb*np.tan(beta) def s(x): return n1*np.sqrt((xa-x)**2+ya**2)+n2*np.sqrt((xb-x)**2+yb**2) def kernel(xa,xb): return 1.0/np.sqrt(xa**2+1)**(3/2.0)+1.0/np.sqrt(xa**2+1)**(3/2.0)", "plt.xlabel(r'Integration range $R$') plt.ylabel('Detection probabilty') plt.legend(loc='best') plt.text(2.4,0.2,r'$k=1000$') #plt.text(1.1,0.5,r'$|\\int_{-R}^{R}e^{i k s(x)}dx|^2$',fontsize=20)", "Silvan \"\"\" import numpy as np import scipy import matplotlib.pyplot", "#plt.show() #x=np.linspace(-5,5,100) # #plt.figure(2) #plt.plot(x,s(x)) #plt.show() # #d=np.linspace(0,5,100) #xa=-d/2 #xb=d/2", "kernel(xa,xb): return 1.0/np.sqrt(xa**2+1)**(3/2.0)+1.0/np.sqrt(xa**2+1)**(3/2.0) def K(R): L=1000 #Maximum Number of subdivisions", "return 1.0/np.sqrt(xa**2+1)**(3/2.0)+1.0/np.sqrt(xa**2+1)**(3/2.0) def K(R): L=1000 #Maximum Number of subdivisions for", "print(i+1) # P[i]=trans_amp(dx[i]) # # #plt.figure(1) #plt.plot(dx,P/np.mean(P[20:])) #plt.text(4.0,0.5,r'$|\\int_{-\\Delta x}^{\\Delta x}", "i2=scipy.integrate.quad(lambda x: np.sin(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0] imag[i]=imag[i-1]+i1+i2 return np.sqrt(real**2+imag**2),x,real,imag K2,x,r,i=K(3) M=np.mean(K2[25:]) plt.plot(x,K2/M,label=r'$|\\int_{-R}^{R}e^{i k", "np.sin(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0] i2=scipy.integrate.quad(lambda x: np.sin(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0] imag[i]=imag[i-1]+i1+i2 return np.sqrt(real**2+imag**2),x,real,imag K2,x,r,i=K(3) M=np.mean(K2[25:]) plt.plot(x,K2/M,label=r'$|\\int_{-R}^{R}e^{i", "return np.sqrt(real**2+imag**2),x,real,imag K2,x,r,i=K(3) M=np.mean(K2[25:]) plt.plot(x,K2/M,label=r'$|\\int_{-R}^{R}e^{i k s(x)}dx|^2$') #plt.errorbar(x,K2/M,0.1*K2/M) plt.xlabel(r'Integration range", "import numpy as np import scipy import matplotlib.pyplot as plt", "as plt k=1000 n1=2.0 n2=1.0 alpha=np.pi/6.0 beta=np.arcsin(n2/n1*np.sin(alpha)) ya=1.0 xa=-ya*np.tan(alpha) yb=-1.0", "return n1*np.sqrt((xa-x)**2+ya**2)+n2*np.sqrt((xb-x)**2+yb**2) def kernel(xa,xb): return 1.0/np.sqrt(xa**2+1)**(3/2.0)+1.0/np.sqrt(xa**2+1)**(3/2.0) def K(R): L=1000 #Maximum", "s(x)}dx$|',fontsize=20) #plt.ylabel('Transition Amplitude') #plt.xlabel(r'Integration Interval $ \\Delta x$') ##plt.axis([0,10,0,1.1]) #plt.legend(loc='best')", "##plt.axis([0,10,0,1.1]) #plt.legend(loc='best') ##plt.savefig('refraction',dpi=200) #plt.show() #x=np.linspace(-5,5,100) # #plt.figure(2) #plt.plot(x,s(x)) #plt.show() #", "utf-8 -*- \"\"\" Created on Fri Mar 15 16:51:16 2019", "numpy as np import scipy import matplotlib.pyplot as plt k=1000", "scipy import matplotlib.pyplot as plt k=1000 n1=2.0 n2=1.0 alpha=np.pi/6.0 beta=np.arcsin(n2/n1*np.sin(alpha))", "in range(1,N): r1=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0] r2=scipy.integrate.quad(lambda x: np.cos(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0] real[i]=real[i-1]+r1+r2 i1=scipy.integrate.quad(lambda", "#P=np.ones(N) # #for i in range(N): # print(i+1) # P[i]=trans_amp(dx[i])", "i in range(N): # print(i+1) # P[i]=trans_amp(dx[i]) # # #plt.figure(1)", "K(R): L=1000 #Maximum Number of subdivisions for integral calculations eps=0.01", "#x=np.linspace(-5,5,100) # #plt.figure(2) #plt.plot(x,s(x)) #plt.show() # #d=np.linspace(0,5,100) #xa=-d/2 #xb=d/2 #plt.figure(3)", "n1*np.sqrt((xa-x)**2+ya**2)+n2*np.sqrt((xb-x)**2+yb**2) def kernel(xa,xb): return 1.0/np.sqrt(xa**2+1)**(3/2.0)+1.0/np.sqrt(xa**2+1)**(3/2.0) def K(R): L=1000 #Maximum Number", "Amplitude') #plt.xlabel(r'Integration Interval $ \\Delta x$') ##plt.axis([0,10,0,1.1]) #plt.legend(loc='best') ##plt.savefig('refraction',dpi=200) #plt.show()", "for integral calculations eps=0.01 N=50 x,dx=np.linspace(0.01,R,N,retstep=True) real=np.empty(N) imag=np.empty(N) real[0]=scipy.integrate.quad(lambda x:", "n1=2.0 n2=1.0 alpha=np.pi/6.0 beta=np.arcsin(n2/n1*np.sin(alpha)) ya=1.0 xa=-ya*np.tan(alpha) yb=-1.0 xb=-yb*np.tan(beta) def s(x):", "x: np.sin(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0] for i in range(1,N): r1=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0] r2=scipy.integrate.quad(lambda", "def kernel(xa,xb): return 1.0/np.sqrt(xa**2+1)**(3/2.0)+1.0/np.sqrt(xa**2+1)**(3/2.0) def K(R): L=1000 #Maximum Number of", "calculations eps=0.01 N=50 x,dx=np.linspace(0.01,R,N,retstep=True) real=np.empty(N) imag=np.empty(N) real[0]=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0] imag[0]=scipy.integrate.quad(lambda", "-*- coding: utf-8 -*- \"\"\" Created on Fri Mar 15", "range(1,N): r1=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0] r2=scipy.integrate.quad(lambda x: np.cos(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0] real[i]=real[i-1]+r1+r2 i1=scipy.integrate.quad(lambda x:", "x: np.cos(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0] real[i]=real[i-1]+r1+r2 i1=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0] i2=scipy.integrate.quad(lambda x: np.sin(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0] imag[i]=imag[i-1]+i1+i2", "# #for i in range(N): # print(i+1) # P[i]=trans_amp(dx[i]) #", "imag[i]=imag[i-1]+i1+i2 return np.sqrt(real**2+imag**2),x,real,imag K2,x,r,i=K(3) M=np.mean(K2[25:]) plt.plot(x,K2/M,label=r'$|\\int_{-R}^{R}e^{i k s(x)}dx|^2$') #plt.errorbar(x,K2/M,0.1*K2/M) plt.xlabel(r'Integration", "in range(N): # print(i+1) # P[i]=trans_amp(dx[i]) # # #plt.figure(1) #plt.plot(dx,P/np.mean(P[20:]))", "# -*- coding: utf-8 -*- \"\"\" Created on Fri Mar", "def K(R): L=1000 #Maximum Number of subdivisions for integral calculations", "r2=scipy.integrate.quad(lambda x: np.cos(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0] real[i]=real[i-1]+r1+r2 i1=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0] i2=scipy.integrate.quad(lambda x: np.sin(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0]", "k s(x)}dx|^2$',fontsize=20) plt.savefig('refraction_v3',dpi=200) plt.show() #N=20 # #dx=np.linspace(0,10,N) # #P=np.ones(N) #", "s(x)}dx|^2$') #plt.errorbar(x,K2/M,0.1*K2/M) plt.xlabel(r'Integration range $R$') plt.ylabel('Detection probabilty') plt.legend(loc='best') plt.text(2.4,0.2,r'$k=1000$') #plt.text(1.1,0.5,r'$|\\int_{-R}^{R}e^{i", "##plt.savefig('refraction',dpi=200) #plt.show() #x=np.linspace(-5,5,100) # #plt.figure(2) #plt.plot(x,s(x)) #plt.show() # #d=np.linspace(0,5,100) #xa=-d/2", "<reponame>silkoch42/Geometric-Optics-from-QM # -*- coding: utf-8 -*- \"\"\" Created on Fri", "as np import scipy import matplotlib.pyplot as plt k=1000 n1=2.0", "subdivisions for integral calculations eps=0.01 N=50 x,dx=np.linspace(0.01,R,N,retstep=True) real=np.empty(N) imag=np.empty(N) real[0]=scipy.integrate.quad(lambda", "15 16:51:16 2019 @author: Silvan \"\"\" import numpy as np", "N=50 x,dx=np.linspace(0.01,R,N,retstep=True) real=np.empty(N) imag=np.empty(N) real[0]=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0] imag[0]=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0]", "Interval $ \\Delta x$') ##plt.axis([0,10,0,1.1]) #plt.legend(loc='best') ##plt.savefig('refraction',dpi=200) #plt.show() #x=np.linspace(-5,5,100) #", "plt.savefig('refraction_v3',dpi=200) plt.show() #N=20 # #dx=np.linspace(0,10,N) # #P=np.ones(N) # #for i", "np.cos(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0] real[i]=real[i-1]+r1+r2 i1=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0] i2=scipy.integrate.quad(lambda x: np.sin(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0] imag[i]=imag[i-1]+i1+i2 return", "# #plt.figure(2) #plt.plot(x,s(x)) #plt.show() # #d=np.linspace(0,5,100) #xa=-d/2 #xb=d/2 #plt.figure(3) #plt.plot(d,kernel(xa,xb)**2)", "\"\"\" Created on Fri Mar 15 16:51:16 2019 @author: Silvan", "np.sin(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0] imag[i]=imag[i-1]+i1+i2 return np.sqrt(real**2+imag**2),x,real,imag K2,x,r,i=K(3) M=np.mean(K2[25:]) plt.plot(x,K2/M,label=r'$|\\int_{-R}^{R}e^{i k s(x)}dx|^2$') #plt.errorbar(x,K2/M,0.1*K2/M)", "real[i]=real[i-1]+r1+r2 i1=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0] i2=scipy.integrate.quad(lambda x: np.sin(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0] imag[i]=imag[i-1]+i1+i2 return np.sqrt(real**2+imag**2),x,real,imag", "#Maximum Number of subdivisions for integral calculations eps=0.01 N=50 x,dx=np.linspace(0.01,R,N,retstep=True)", "np.cos(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0] imag[0]=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0] for i in range(1,N): r1=scipy.integrate.quad(lambda x:", "\"\"\" import numpy as np import scipy import matplotlib.pyplot as", "s(x)}dx|^2$',fontsize=20) plt.savefig('refraction_v3',dpi=200) plt.show() #N=20 # #dx=np.linspace(0,10,N) # #P=np.ones(N) # #for", "#N=20 # #dx=np.linspace(0,10,N) # #P=np.ones(N) # #for i in range(N):", "def s(x): return n1*np.sqrt((xa-x)**2+ya**2)+n2*np.sqrt((xb-x)**2+yb**2) def kernel(xa,xb): return 1.0/np.sqrt(xa**2+1)**(3/2.0)+1.0/np.sqrt(xa**2+1)**(3/2.0) def K(R):", "imag[0]=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0] for i in range(1,N): r1=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0]", "1.0/np.sqrt(xa**2+1)**(3/2.0)+1.0/np.sqrt(xa**2+1)**(3/2.0) def K(R): L=1000 #Maximum Number of subdivisions for integral", "K2,x,r,i=K(3) M=np.mean(K2[25:]) plt.plot(x,K2/M,label=r'$|\\int_{-R}^{R}e^{i k s(x)}dx|^2$') #plt.errorbar(x,K2/M,0.1*K2/M) plt.xlabel(r'Integration range $R$') plt.ylabel('Detection", "#plt.ylabel('Transition Amplitude') #plt.xlabel(r'Integration Interval $ \\Delta x$') ##plt.axis([0,10,0,1.1]) #plt.legend(loc='best') ##plt.savefig('refraction',dpi=200)", "# P[i]=trans_amp(dx[i]) # # #plt.figure(1) #plt.plot(dx,P/np.mean(P[20:])) #plt.text(4.0,0.5,r'$|\\int_{-\\Delta x}^{\\Delta x} e^{ik", "eps=0.01 N=50 x,dx=np.linspace(0.01,R,N,retstep=True) real=np.empty(N) imag=np.empty(N) real[0]=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0] imag[0]=scipy.integrate.quad(lambda x:", "M=np.mean(K2[25:]) plt.plot(x,K2/M,label=r'$|\\int_{-R}^{R}e^{i k s(x)}dx|^2$') #plt.errorbar(x,K2/M,0.1*K2/M) plt.xlabel(r'Integration range $R$') plt.ylabel('Detection probabilty')", "np import scipy import matplotlib.pyplot as plt k=1000 n1=2.0 n2=1.0", "k=1000 n1=2.0 n2=1.0 alpha=np.pi/6.0 beta=np.arcsin(n2/n1*np.sin(alpha)) ya=1.0 xa=-ya*np.tan(alpha) yb=-1.0 xb=-yb*np.tan(beta) def", "@author: Silvan \"\"\" import numpy as np import scipy import", "k s(x)}dx|^2$') #plt.errorbar(x,K2/M,0.1*K2/M) plt.xlabel(r'Integration range $R$') plt.ylabel('Detection probabilty') plt.legend(loc='best') plt.text(2.4,0.2,r'$k=1000$')", "np.sin(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0] for i in range(1,N): r1=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0] r2=scipy.integrate.quad(lambda x:", "Fri Mar 15 16:51:16 2019 @author: Silvan \"\"\" import numpy", "i in range(1,N): r1=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0] r2=scipy.integrate.quad(lambda x: np.cos(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0] real[i]=real[i-1]+r1+r2", "$R$') plt.ylabel('Detection probabilty') plt.legend(loc='best') plt.text(2.4,0.2,r'$k=1000$') #plt.text(1.1,0.5,r'$|\\int_{-R}^{R}e^{i k s(x)}dx|^2$',fontsize=20) plt.savefig('refraction_v3',dpi=200) plt.show()", "#dx=np.linspace(0,10,N) # #P=np.ones(N) # #for i in range(N): # print(i+1)", "# #plt.figure(1) #plt.plot(dx,P/np.mean(P[20:])) #plt.text(4.0,0.5,r'$|\\int_{-\\Delta x}^{\\Delta x} e^{ik s(x)}dx$|',fontsize=20) #plt.ylabel('Transition Amplitude')", "plt.show() #N=20 # #dx=np.linspace(0,10,N) # #P=np.ones(N) # #for i in", "xb=-yb*np.tan(beta) def s(x): return n1*np.sqrt((xa-x)**2+ya**2)+n2*np.sqrt((xb-x)**2+yb**2) def kernel(xa,xb): return 1.0/np.sqrt(xa**2+1)**(3/2.0)+1.0/np.sqrt(xa**2+1)**(3/2.0) def", "of subdivisions for integral calculations eps=0.01 N=50 x,dx=np.linspace(0.01,R,N,retstep=True) real=np.empty(N) imag=np.empty(N)", "x} e^{ik s(x)}dx$|',fontsize=20) #plt.ylabel('Transition Amplitude') #plt.xlabel(r'Integration Interval $ \\Delta x$')", "#plt.figure(2) #plt.plot(x,s(x)) #plt.show() # #d=np.linspace(0,5,100) #xa=-d/2 #xb=d/2 #plt.figure(3) #plt.plot(d,kernel(xa,xb)**2) #plt.show()", "# #dx=np.linspace(0,10,N) # #P=np.ones(N) # #for i in range(N): #", "plt.legend(loc='best') plt.text(2.4,0.2,r'$k=1000$') #plt.text(1.1,0.5,r'$|\\int_{-R}^{R}e^{i k s(x)}dx|^2$',fontsize=20) plt.savefig('refraction_v3',dpi=200) plt.show() #N=20 # #dx=np.linspace(0,10,N)", "#plt.legend(loc='best') ##plt.savefig('refraction',dpi=200) #plt.show() #x=np.linspace(-5,5,100) # #plt.figure(2) #plt.plot(x,s(x)) #plt.show() # #d=np.linspace(0,5,100)", "import matplotlib.pyplot as plt k=1000 n1=2.0 n2=1.0 alpha=np.pi/6.0 beta=np.arcsin(n2/n1*np.sin(alpha)) ya=1.0", "s(x): return n1*np.sqrt((xa-x)**2+ya**2)+n2*np.sqrt((xb-x)**2+yb**2) def kernel(xa,xb): return 1.0/np.sqrt(xa**2+1)**(3/2.0)+1.0/np.sqrt(xa**2+1)**(3/2.0) def K(R): L=1000", "real=np.empty(N) imag=np.empty(N) real[0]=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0] imag[0]=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0] for i", "plt.ylabel('Detection probabilty') plt.legend(loc='best') plt.text(2.4,0.2,r'$k=1000$') #plt.text(1.1,0.5,r'$|\\int_{-R}^{R}e^{i k s(x)}dx|^2$',fontsize=20) plt.savefig('refraction_v3',dpi=200) plt.show() #N=20", "#for i in range(N): # print(i+1) # P[i]=trans_amp(dx[i]) # #", "#plt.text(4.0,0.5,r'$|\\int_{-\\Delta x}^{\\Delta x} e^{ik s(x)}dx$|',fontsize=20) #plt.ylabel('Transition Amplitude') #plt.xlabel(r'Integration Interval $", "# print(i+1) # P[i]=trans_amp(dx[i]) # # #plt.figure(1) #plt.plot(dx,P/np.mean(P[20:])) #plt.text(4.0,0.5,r'$|\\int_{-\\Delta x}^{\\Delta", "P[i]=trans_amp(dx[i]) # # #plt.figure(1) #plt.plot(dx,P/np.mean(P[20:])) #plt.text(4.0,0.5,r'$|\\int_{-\\Delta x}^{\\Delta x} e^{ik s(x)}dx$|',fontsize=20)", "plt.plot(x,K2/M,label=r'$|\\int_{-R}^{R}e^{i k s(x)}dx|^2$') #plt.errorbar(x,K2/M,0.1*K2/M) plt.xlabel(r'Integration range $R$') plt.ylabel('Detection probabilty') plt.legend(loc='best')", "# # #plt.figure(1) #plt.plot(dx,P/np.mean(P[20:])) #plt.text(4.0,0.5,r'$|\\int_{-\\Delta x}^{\\Delta x} e^{ik s(x)}dx$|',fontsize=20) #plt.ylabel('Transition", "x$') ##plt.axis([0,10,0,1.1]) #plt.legend(loc='best') ##plt.savefig('refraction',dpi=200) #plt.show() #x=np.linspace(-5,5,100) # #plt.figure(2) #plt.plot(x,s(x)) #plt.show()", "for i in range(1,N): r1=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0] r2=scipy.integrate.quad(lambda x: np.cos(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0]", "x: np.sin(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0] imag[i]=imag[i-1]+i1+i2 return np.sqrt(real**2+imag**2),x,real,imag K2,x,r,i=K(3) M=np.mean(K2[25:]) plt.plot(x,K2/M,label=r'$|\\int_{-R}^{R}e^{i k s(x)}dx|^2$')", "plt.text(2.4,0.2,r'$k=1000$') #plt.text(1.1,0.5,r'$|\\int_{-R}^{R}e^{i k s(x)}dx|^2$',fontsize=20) plt.savefig('refraction_v3',dpi=200) plt.show() #N=20 # #dx=np.linspace(0,10,N) #", "x}^{\\Delta x} e^{ik s(x)}dx$|',fontsize=20) #plt.ylabel('Transition Amplitude') #plt.xlabel(r'Integration Interval $ \\Delta", "beta=np.arcsin(n2/n1*np.sin(alpha)) ya=1.0 xa=-ya*np.tan(alpha) yb=-1.0 xb=-yb*np.tan(beta) def s(x): return n1*np.sqrt((xa-x)**2+ya**2)+n2*np.sqrt((xb-x)**2+yb**2) def", "np.cos(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0] r2=scipy.integrate.quad(lambda x: np.cos(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0] real[i]=real[i-1]+r1+r2 i1=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0] i2=scipy.integrate.quad(lambda x:", "on Fri Mar 15 16:51:16 2019 @author: Silvan \"\"\" import", "2019 @author: Silvan \"\"\" import numpy as np import scipy", "x: np.cos(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0] imag[0]=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0] for i in range(1,N): r1=scipy.integrate.quad(lambda", "ya=1.0 xa=-ya*np.tan(alpha) yb=-1.0 xb=-yb*np.tan(beta) def s(x): return n1*np.sqrt((xa-x)**2+ya**2)+n2*np.sqrt((xb-x)**2+yb**2) def kernel(xa,xb):", "integral calculations eps=0.01 N=50 x,dx=np.linspace(0.01,R,N,retstep=True) real=np.empty(N) imag=np.empty(N) real[0]=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0]", "x: np.sin(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0] i2=scipy.integrate.quad(lambda x: np.sin(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0] imag[i]=imag[i-1]+i1+i2 return np.sqrt(real**2+imag**2),x,real,imag K2,x,r,i=K(3) M=np.mean(K2[25:])", "Mar 15 16:51:16 2019 @author: Silvan \"\"\" import numpy as", "e^{ik s(x)}dx$|',fontsize=20) #plt.ylabel('Transition Amplitude') #plt.xlabel(r'Integration Interval $ \\Delta x$') ##plt.axis([0,10,0,1.1])", "-*- \"\"\" Created on Fri Mar 15 16:51:16 2019 @author:", "matplotlib.pyplot as plt k=1000 n1=2.0 n2=1.0 alpha=np.pi/6.0 beta=np.arcsin(n2/n1*np.sin(alpha)) ya=1.0 xa=-ya*np.tan(alpha)", "#plt.text(1.1,0.5,r'$|\\int_{-R}^{R}e^{i k s(x)}dx|^2$',fontsize=20) plt.savefig('refraction_v3',dpi=200) plt.show() #N=20 # #dx=np.linspace(0,10,N) # #P=np.ones(N)", "$ \\Delta x$') ##plt.axis([0,10,0,1.1]) #plt.legend(loc='best') ##plt.savefig('refraction',dpi=200) #plt.show() #x=np.linspace(-5,5,100) # #plt.figure(2)", "#plt.plot(dx,P/np.mean(P[20:])) #plt.text(4.0,0.5,r'$|\\int_{-\\Delta x}^{\\Delta x} e^{ik s(x)}dx$|',fontsize=20) #plt.ylabel('Transition Amplitude') #plt.xlabel(r'Integration Interval", "coding: utf-8 -*- \"\"\" Created on Fri Mar 15 16:51:16", "plt k=1000 n1=2.0 n2=1.0 alpha=np.pi/6.0 beta=np.arcsin(n2/n1*np.sin(alpha)) ya=1.0 xa=-ya*np.tan(alpha) yb=-1.0 xb=-yb*np.tan(beta)", "n2=1.0 alpha=np.pi/6.0 beta=np.arcsin(n2/n1*np.sin(alpha)) ya=1.0 xa=-ya*np.tan(alpha) yb=-1.0 xb=-yb*np.tan(beta) def s(x): return", "Number of subdivisions for integral calculations eps=0.01 N=50 x,dx=np.linspace(0.01,R,N,retstep=True) real=np.empty(N)", "imag=np.empty(N) real[0]=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0] imag[0]=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0] for i in", "Created on Fri Mar 15 16:51:16 2019 @author: Silvan \"\"\"", "L=1000 #Maximum Number of subdivisions for integral calculations eps=0.01 N=50", "alpha=np.pi/6.0 beta=np.arcsin(n2/n1*np.sin(alpha)) ya=1.0 xa=-ya*np.tan(alpha) yb=-1.0 xb=-yb*np.tan(beta) def s(x): return n1*np.sqrt((xa-x)**2+ya**2)+n2*np.sqrt((xb-x)**2+yb**2)", "#plt.figure(1) #plt.plot(dx,P/np.mean(P[20:])) #plt.text(4.0,0.5,r'$|\\int_{-\\Delta x}^{\\Delta x} e^{ik s(x)}dx$|',fontsize=20) #plt.ylabel('Transition Amplitude') #plt.xlabel(r'Integration" ]
[ "django.conf import settings from django.core.management.base import BaseCommand from elasticsearch import", "BaseCommand from elasticsearch import Elasticsearch class Command(BaseCommand): \"\"\"Clear elasticsearch index.\"\"\"", "readthedocs elasticsearch index.\"\"\" from __future__ import absolute_import from django.conf import", "<reponame>italia/readthedocs.org<gh_stars>10-100 \"\"\"Remove the readthedocs elasticsearch index.\"\"\" from __future__ import absolute_import", "\"\"\"Clear elasticsearch index.\"\"\" def handle(self, *args, **options): \"\"\"handle command.\"\"\" e_s", "Elasticsearch class Command(BaseCommand): \"\"\"Clear elasticsearch index.\"\"\" def handle(self, *args, **options):", "elasticsearch index.\"\"\" def handle(self, *args, **options): \"\"\"handle command.\"\"\" e_s =", "elasticsearch import Elasticsearch class Command(BaseCommand): \"\"\"Clear elasticsearch index.\"\"\" def handle(self,", "import settings from django.core.management.base import BaseCommand from elasticsearch import Elasticsearch", "import Elasticsearch class Command(BaseCommand): \"\"\"Clear elasticsearch index.\"\"\" def handle(self, *args,", "import absolute_import from django.conf import settings from django.core.management.base import BaseCommand", "from __future__ import absolute_import from django.conf import settings from django.core.management.base", "Command(BaseCommand): \"\"\"Clear elasticsearch index.\"\"\" def handle(self, *args, **options): \"\"\"handle command.\"\"\"", "elasticsearch index.\"\"\" from __future__ import absolute_import from django.conf import settings", "import BaseCommand from elasticsearch import Elasticsearch class Command(BaseCommand): \"\"\"Clear elasticsearch", "absolute_import from django.conf import settings from django.core.management.base import BaseCommand from", "__future__ import absolute_import from django.conf import settings from django.core.management.base import", "settings from django.core.management.base import BaseCommand from elasticsearch import Elasticsearch class", "from elasticsearch import Elasticsearch class Command(BaseCommand): \"\"\"Clear elasticsearch index.\"\"\" def", "index.\"\"\" from __future__ import absolute_import from django.conf import settings from", "from django.conf import settings from django.core.management.base import BaseCommand from elasticsearch", "\"\"\"Remove the readthedocs elasticsearch index.\"\"\" from __future__ import absolute_import from", "the readthedocs elasticsearch index.\"\"\" from __future__ import absolute_import from django.conf", "from django.core.management.base import BaseCommand from elasticsearch import Elasticsearch class Command(BaseCommand):", "def handle(self, *args, **options): \"\"\"handle command.\"\"\" e_s = Elasticsearch(settings.ES_HOSTS) e_s.indices.delete(index='_all')", "class Command(BaseCommand): \"\"\"Clear elasticsearch index.\"\"\" def handle(self, *args, **options): \"\"\"handle", "index.\"\"\" def handle(self, *args, **options): \"\"\"handle command.\"\"\" e_s = Elasticsearch(settings.ES_HOSTS)", "django.core.management.base import BaseCommand from elasticsearch import Elasticsearch class Command(BaseCommand): \"\"\"Clear" ]
[ "itr # state_dict = {'itr': 0, 'epoch': 0, 'save_num': 0,", "loaders[0] total_iters = config['num_epochs'] * len(loader) # Train for specified", "sizes in G G_batch_size = max(config['G_batch_size'], config['batch_size']) num_samples = config['num_fixed_samples']", "main(): # parse command line and run parser = utils.prepare_parser()", "shuffle=config['shuffle'], pin_memory=config['pin_memory'], drop_last=True, load_in_mem=config['load_in_mem'], mask_out=config['mask_out'] ) # Prepare noise and", "if config['resume']: print('Skipping initialization for training resumption...') config['skip_init'] = True", "shouldn't matter much. G.train() D.train() if config['ema']: G_ema.train() x, y", "eval mode...') G.eval() train_fns.save_and_sample(G, D, G_ema, z_, y_, fixed_z, fixed_y,", "di / thoi gian da di) eta = ( total_iters", "timer = mmcv.Timer() timer.start() start_itr = state_dict['itr'] for epoch in", "fixed_z.sample_() fixed_y.sample_() # Loaders are loaded, prepare the training function", "for _ in range(state_dict['itr']): pbar.update() timer = mmcv.Timer() timer.start() start_itr", "datetime.datetime.fromtimestamp( curr_time).strftime('%H:%M:%S') # quang duong / (quang duong da di", "of number of D steps and accumulations) D_batch_size = (config['batch_size']", "# IMG_SIZE_2 = IMG_SIZE * 2 def run(config): # Update", "unofficial reimplementation of \"Large-Scale GAN Training for High Fidelity Natural", "all that needs to be passed # to the dataloader,", "'generative_dog_images') print('Experiment name is %s' % experiment_name) G = BigGAN.Generator(**config).to(device)", "of {}'.format( config['ema_decay'])) G_ema = BigGAN.Generator(**{**config, 'skip_init': True, 'no_optim': True}).to(device)", "start_time = time.perf_counter() loader = loaders[0] total_iters = config['num_epochs'] *", "G with decay of {}'.format( config['ema_decay'])) G_ema = BigGAN.Generator(**{**config, 'skip_init':", "High Fidelity Natural Image Synthesis,\" by <NAME>, <NAME>, and <NAME>", "print(G) print(D) print('Number of params in G: {} D: {}'.format(", "set to eval # For D, which typically doesn't have", "dataset import BigGAN import train_fns import utils from common import", "by <NAME>, <NAME>, and <NAME> (arXiv 1809.11096). Let's go. \"\"\"", "to see individual sample evolution throghout training fixed_z, fixed_y =", "complete # a full D iteration (regardless of number of", "classes # and size of the images from the dataset,", "= train_fns.create_train_fn( G, D, GD, z_, y_, ema, state_dict, config)", "reimplementation of \"Large-Scale GAN Training for High Fidelity Natural Image", "# Prepare data; the Discriminator's batch size is all that", "evolution throghout training fixed_z, fixed_y = utils.prepare_z_y( num_samples, G.module.dim_z, config['n_classes'],", "y_, fixed_z, fixed_y, state_dict, config, experiment_name, save_weight=True) pbar.update() # Increment", "total_iters, epoch) log += ', '.join(['%s : %+4.3f' % (key,", "dataset, passing in a pytorch object # for the activation", "as a string) config['resolution'] = IMG_SIZE config['n_classes'] = 1 config['G_activation']", "Seed RNG utils.seed_rng(config['seed']) # Prepare root folders if necessary utils.prepare_root(config)", "from the user-specified # configuration into the config-dict (e.g. inferring", "steps and accumulations) D_batch_size = (config['batch_size'] * config['num_D_steps'] * config['num_D_accumulations'])", "batch_size=D_batch_size, num_workers=config['num_workers'], shuffle=config['shuffle'], pin_memory=config['pin_memory'], drop_last=True, load_in_mem=config['load_in_mem'], mask_out=config['mask_out'] ) # Prepare", "pin_memory=config['pin_memory'], drop_last=True, load_in_mem=config['load_in_mem'], mask_out=config['mask_out'] ) # Prepare noise and randomly", "specified as a string) config['resolution'] = IMG_SIZE config['n_classes'] = 1", "got set to eval # For D, which typically doesn't", "G: {} D: {}'.format( *[sum([p.data.nelement() for p in net.parameters()]) for", "(state_dict['itr'] % config['save_every']): if config['G_eval_mode']: # print('Switching G to eval", "IMG_SIZE_2 = IMG_SIZE * 2 def run(config): # Update the", "Prepare state dict, which holds things like epoch # and", "% experiment_name) G = BigGAN.Generator(**config).to(device) D = BigGAN.Discriminator(**config).to(device) # if", "= (config['experiment_name'] if config['experiment_name'] else 'generative_dog_images') print('Experiment name is %s'", "config['resume']: print('Skipping initialization for training resumption...') config['skip_init'] = True config", "Note that at every loader iteration we pass in enough", "G_ema, z_, y_, fixed_z, fixed_y, state_dict, config, experiment_name, save_weight=False) if", "config['log_interval']): curr_time = timer.since_start() curr_time_str = datetime.datetime.fromtimestamp( curr_time).strftime('%H:%M:%S') # quang", "eta_str = datetime.datetime.fromtimestamp( eta).strftime('%H:%M:%S') log = \"[{}] [{}] [{} /", "if config['ema'] else None) # Prepare data; the Discriminator's batch", "for training resumption...') config['skip_init'] = True config = utils.update_config_roots(config) device", "as G doesn't require dataloading. # Note that at every", "= 'cuda' # Seed RNG utils.seed_rng(config['seed']) # Prepare root folders", "config['load_weights'] else None, G_ema if config['ema'] else None) # Prepare", "p in net.parameters()]) for net in [G, D]])) # Prepare", "free speed torch.backends.cudnn.benchmark = True experiment_name = (config['experiment_name'] if config['experiment_name']", "{}'.format( *[sum([p.data.nelement() for p in net.parameters()]) for net in [G,", "print('Number of params in G: {} D: {}'.format( *[sum([p.data.nelement() for", "config['D_activation'] = utils.activation_dict[config['D_nl']] # By default, skip init if resuming", "# Prepare root folders if necessary utils.prepare_root(config) # Setup cudnn.benchmark", "import datetime import time import torch import dataset import BigGAN", "default, skip init if resuming training. if config['resume']: print('Skipping initialization", "specified interval if not (state_dict['itr'] % config['sample_every']): if config['G_eval_mode']: #", "of \"Large-Scale GAN Training for High Fidelity Natural Image Synthesis,\"", "in case they got set to eval # For D,", "# a full D iteration (regardless of number of D", "in a pytorch object # for the activation specified as", "range(state_dict['itr']): pbar.update() timer = mmcv.Timer() timer.start() start_itr = state_dict['itr'] for", "for High Fidelity Natural Image Synthesis,\" by <NAME>, <NAME>, and", "% config['log_interval']): curr_time = timer.since_start() curr_time_str = datetime.datetime.fromtimestamp( curr_time).strftime('%H:%M:%S') #", "every loader iteration we pass in enough data to complete", "y = data['img'], data['label'] # Increment the iteration counter state_dict['itr']", "the user-specified # configuration into the config-dict (e.g. inferring the", "in range(state_dict['epoch'], config['num_epochs']): for i, data in enumerate(loader): x, y", "fixed_y, state_dict, config, experiment_name, save_weight=True) pbar.update() # Increment epoch counter", "= utils.prepare_parser() config = vars(parser.parse_args()) print(config) run(config) if __name__ ==", "# If loading from a pre-trained model, load weights if", "# Prepare a fixed z & y to see individual", "not (state_dict['itr'] % config['save_every']): if config['G_eval_mode']: # print('Switching G to", "is all that needs to be passed # to the", "at specified interval if not (state_dict['itr'] % config['sample_every']): if config['G_eval_mode']:", "fixed z & y to see individual sample evolution throghout", "by <NAME> and <NAME> This code is an unofficial reimplementation", "epoch in range(state_dict['epoch'], config['num_epochs']): for i, data in enumerate(loader): x,", "if config['resume']: print('Loading weights...') utils.load_weights(G, D, state_dict, config['weights_root'], experiment_name, config['load_weights']", "BigGAN import train_fns import utils from common import * #", "into the config-dict (e.g. inferring the number of classes #", "= utils.activation_dict[config['G_nl']] config['D_activation'] = utils.activation_dict[config['D_nl']] # By default, skip init", "net in [G, D]])) # Prepare state dict, which holds", "G_batch_size = max(config['G_batch_size'], config['batch_size']) num_samples = config['num_fixed_samples'] z_, y_ =", "D, G_ema, z_, y_, fixed_z, fixed_y, state_dict, config, experiment_name, save_weight=True)", "max(config['G_batch_size'], config['batch_size']) num_samples = config['num_fixed_samples'] z_, y_ = utils.prepare_z_y( num_samples,", "# and itr # state_dict = {'itr': 0, 'epoch': 0,", "sure G and D are in training mode, just in", "for different batch sizes in G G_batch_size = max(config['G_batch_size'], config['batch_size'])", "from a pre-trained model, load weights if config['resume']: print('Loading weights...')", "and itr # state_dict = {'itr': 0, 'epoch': 0, 'save_num':", "Prepare noise and randomly sampled label arrays # Allow for", "64 # IMG_SIZE_2 = IMG_SIZE * 2 def run(config): #", "config, experiment_name, save_weight=True) pbar.update() # Increment epoch counter at end", "{}'.format( config['ema_decay'])) G_ema = BigGAN.Generator(**{**config, 'skip_init': True, 'no_optim': True}).to(device) G_ema", "0, 'epoch': 0, 'save_num': 0, 'config': config} # If loading", "ema = utils.ema(G, G_ema, config['ema_decay'], config['ema_start']) else: G_ema, ema =", "training fixed_z, fixed_y = utils.prepare_z_y( num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16'])", "loaded, prepare the training function train = train_fns.create_train_fn( G, D,", "# Make sure G and D are in training mode,", "batch size is all that needs to be passed #", "see individual sample evolution throghout training fixed_z, fixed_y = utils.prepare_z_y(", "thoi gian da di) eta = ( total_iters - state_dict['itr'])", "\"\"\" BigGAN: The Authorized Unofficial PyTorch release Code by <NAME>", "range(state_dict['epoch'], config['num_epochs']): for i, data in enumerate(loader): x, y =", "this shouldn't matter much. G.train() D.train() if config['ema']: G_ema.train() x,", "if necessary utils.prepare_root(config) # Setup cudnn.benchmark for free speed torch.backends.cudnn.benchmark", "that at every loader iteration we pass in enough data", "G = BigGAN.Generator(**config).to(device) D = BigGAN.Discriminator(**config).to(device) # if config['parallel']: G", "# Increment epoch counter at end of epoch state_dict['epoch'] +=", "'no_optim': True}).to(device) G_ema = nn.DataParallel(G_ema) ema = utils.ema(G, G_ema, config['ema_decay'],", "decay of {}'.format( config['ema_decay'])) G_ema = BigGAN.Generator(**{**config, 'skip_init': True, 'no_optim':", "and run parser = utils.prepare_parser() config = vars(parser.parse_args()) print(config) run(config)", "epoch # and itr # state_dict = {'itr': 0, 'epoch':", "i, data in enumerate(loader): x, y = data['img'], data['label'] #", "experiment_name = (config['experiment_name'] if config['experiment_name'] else 'generative_dog_images') print('Experiment name is", "((state_dict['itr']-start_itr) / (curr_time+1)) eta_str = datetime.datetime.fromtimestamp( eta).strftime('%H:%M:%S') log = \"[{}]", "of epoch state_dict['epoch'] += 1 def main(): # parse command", "/ (curr_time+1)) eta_str = datetime.datetime.fromtimestamp( eta).strftime('%H:%M:%S') log = \"[{}] [{}]", "epoch state_dict['epoch'] += 1 def main(): # parse command line", "ema = None, None GD = BigGAN.G_D(G, D) print(G) print(D)", "# and size of the images from the dataset, passing", "device=device, fp16=config['G_fp16']) # Prepare a fixed z & y to", "# For D, which typically doesn't have BN, this shouldn't", "utils.update_config_roots(config) device = 'cuda' # Seed RNG utils.seed_rng(config['seed']) # Prepare", "experiment_name) G = BigGAN.Generator(**config).to(device) D = BigGAN.Discriminator(**config).to(device) # if config['parallel']:", "specified number of epochs, although we mostly track G iterations.", "if resuming training. if config['resume']: print('Skipping initialization for training resumption...')", "'save_num': 0, 'config': config} # If loading from a pre-trained", "# state_dict = {'itr': 0, 'epoch': 0, 'save_num': 0, 'config':", "// ((state_dict['itr']-start_itr) / (curr_time+1)) eta_str = datetime.datetime.fromtimestamp( eta).strftime('%H:%M:%S') log =", "pbar = tqdm(total=total_iters) for _ in range(state_dict['itr']): pbar.update() timer =", "time import torch import dataset import BigGAN import train_fns import", "# Prepare state dict, which holds things like epoch #", "total_iters - state_dict['itr']) // ((state_dict['itr']-start_itr) / (curr_time+1)) eta_str = datetime.datetime.fromtimestamp(", "that needs to be passed # to the dataloader, as", "datetime.datetime.fromtimestamp( eta).strftime('%H:%M:%S') log = \"[{}] [{}] [{} / {}] Ep", "utils.ema(G, G_ema, config['ema_decay'], config['ema_start']) else: G_ema, ema = None, None", "iteration counter state_dict['itr'] += 1 # Make sure G and", "= utils.activation_dict[config['D_nl']] # By default, skip init if resuming training.", "and D are in training mode, just in case they", "different batch sizes in G G_batch_size = max(config['G_batch_size'], config['batch_size']) num_samples", "config['resume']: print('Loading weights...') utils.load_weights(G, D, state_dict, config['weights_root'], experiment_name, config['load_weights'] if", "metrics = train(x, y) if not (state_dict['itr'] % config['log_interval']): curr_time", "# By default, skip init if resuming training. if config['resume']:", "<NAME>, <NAME>, and <NAME> (arXiv 1809.11096). Let's go. \"\"\" import", "to add settings derived from the user-specified # configuration into", "(config['batch_size'] * config['num_D_steps'] * config['num_D_accumulations']) loaders = dataset.get_data_loaders( data_root=config['data_root'], label_root=config['label_root'],", "D = BigGAN.Discriminator(**config).to(device) # if config['parallel']: G = nn.DataParallel(G) D", "mode...') G.eval() train_fns.save_and_sample(G, D, G_ema, z_, y_, fixed_z, fixed_y, state_dict,", "a fixed z & y to see individual sample evolution", "at epoch %d...' % state_dict['epoch']) start_time = time.perf_counter() loader =", "utils.prepare_parser() config = vars(parser.parse_args()) print(config) run(config) if __name__ == '__main__':", "z_, y_ = utils.prepare_z_y( num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) #", "None) # Prepare data; the Discriminator's batch size is all", "things like epoch # and itr # state_dict = {'itr':", "G.eval() train_fns.save_and_sample(G, D, G_ema, z_, y_, fixed_z, fixed_y, state_dict, config,", "go. \"\"\" import datetime import time import torch import dataset", "# parse command line and run parser = utils.prepare_parser() config", "the dataset, passing in a pytorch object # for the", "experiment_name, save_weight=True) pbar.update() # Increment epoch counter at end of", "# Loaders are loaded, prepare the training function train =", "config['num_epochs']): for i, data in enumerate(loader): x, y = data['img'],", "EMA for G with decay of {}'.format( config['ema_decay'])) G_ema =", "num_workers=config['num_workers'], shuffle=config['shuffle'], pin_memory=config['pin_memory'], drop_last=True, load_in_mem=config['load_in_mem'], mask_out=config['mask_out'] ) # Prepare noise", "print('Switching G to eval mode...') G.eval() train_fns.save_and_sample(G, D, G_ema, z_,", "G to eval mode...') G.eval() train_fns.save_and_sample(G, D, G_ema, z_, y_,", "line and run parser = utils.prepare_parser() config = vars(parser.parse_args()) print(config)", "state dict, which holds things like epoch # and itr", "state_dict, config) print('Beginning training at epoch %d...' % state_dict['epoch']) start_time", "'skip_init': True, 'no_optim': True}).to(device) G_ema = nn.DataParallel(G_ema) ema = utils.ema(G,", "* 2 def run(config): # Update the config dict as", "Setup cudnn.benchmark for free speed torch.backends.cudnn.benchmark = True experiment_name =", "x, y = x.to(device), y.to(device) metrics = train(x, y) if", "config['n_classes'] = 1 config['G_activation'] = utils.activation_dict[config['G_nl']] config['D_activation'] = utils.activation_dict[config['D_nl']] #", "fp16=config['G_fp16']) fixed_z.sample_() fixed_y.sample_() # Loaders are loaded, prepare the training", "* len(loader) # Train for specified number of epochs, although", "nn.DataParallel(G_ema) ema = utils.ema(G, G_ema, config['ema_decay'], config['ema_start']) else: G_ema, ema", "state_dict, config['weights_root'], experiment_name, config['load_weights'] if config['load_weights'] else None, G_ema if", "<NAME> (arXiv 1809.11096). Let's go. \"\"\" import datetime import time", "config['num_fixed_samples'] z_, y_ = utils.prepare_z_y( num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16'])", "have BN, this shouldn't matter much. G.train() D.train() if config['ema']:", "user-specified # configuration into the config-dict (e.g. inferring the number", "train_fns import utils from common import * # IMG_SIZE =", "1 def main(): # parse command line and run parser", "a pre-trained model, load weights if config['resume']: print('Loading weights...') utils.load_weights(G,", "iteration we pass in enough data to complete # a", "fixed_z, fixed_y = utils.prepare_z_y( num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) fixed_z.sample_()", "we pass in enough data to complete # a full", "\"Large-Scale GAN Training for High Fidelity Natural Image Synthesis,\" by", "utils.prepare_z_y( num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) fixed_z.sample_() fixed_y.sample_() # Loaders", "drop_last=True, load_in_mem=config['load_in_mem'], mask_out=config['mask_out'] ) # Prepare noise and randomly sampled", "the activation specified as a string) config['resolution'] = IMG_SIZE config['n_classes']", "(state_dict['itr'] % config['sample_every']): if config['G_eval_mode']: # print('Switching G to eval", "# print(log) # Save weights and copies as configured at", "# Setup cudnn.benchmark for free speed torch.backends.cudnn.benchmark = True experiment_name", "function train = train_fns.create_train_fn( G, D, GD, z_, y_, ema,", "z_, y_, fixed_z, fixed_y, state_dict, config, experiment_name, save_weight=False) if not", "data['img'], data['label'] # Increment the iteration counter state_dict['itr'] += 1", "for p in net.parameters()]) for net in [G, D]])) #", "G.train() D.train() if config['ema']: G_ema.train() x, y = x.to(device), y.to(device)", "D_batch_size = (config['batch_size'] * config['num_D_steps'] * config['num_D_accumulations']) loaders = dataset.get_data_loaders(", "config, experiment_name, save_weight=False) if not (state_dict['itr'] % config['save_every']): if config['G_eval_mode']:", "Prepare data; the Discriminator's batch size is all that needs", "passed # to the dataloader, as G doesn't require dataloading.", "(arXiv 1809.11096). Let's go. \"\"\" import datetime import time import", "enumerate(loader): x, y = data['img'], data['label'] # Increment the iteration", "state_dict['itr'] += 1 # Make sure G and D are", "not (state_dict['itr'] % config['sample_every']): if config['G_eval_mode']: # print('Switching G to", "nn.DataParallel(D) # If using EMA, prepare it if config['ema']: print('Preparing", "state_dict['itr'] for epoch in range(state_dict['epoch'], config['num_epochs']): for i, data in", "Image Synthesis,\" by <NAME>, <NAME>, and <NAME> (arXiv 1809.11096). Let's", "pytorch object # for the activation specified as a string)", "= mmcv.Timer() timer.start() start_itr = state_dict['itr'] for epoch in range(state_dict['epoch'],", "IMG_SIZE config['n_classes'] = 1 config['G_activation'] = utils.activation_dict[config['G_nl']] config['D_activation'] = utils.activation_dict[config['D_nl']]", "# Prepare noise and randomly sampled label arrays # Allow", "needs to be passed # to the dataloader, as G", "arrays # Allow for different batch sizes in G G_batch_size", "total_iters = config['num_epochs'] * len(loader) # Train for specified number", "fixed_z, fixed_y, state_dict, config, experiment_name, save_weight=True) pbar.update() # Increment epoch", "case they got set to eval # For D, which", "y to see individual sample evolution throghout training fixed_z, fixed_y", "If using EMA, prepare it if config['ema']: print('Preparing EMA for", "with decay of {}'.format( config['ema_decay'])) G_ema = BigGAN.Generator(**{**config, 'skip_init': True,", "<NAME> This code is an unofficial reimplementation of \"Large-Scale GAN", "PyTorch release Code by <NAME> and <NAME> This code is", "import torch import dataset import BigGAN import train_fns import utils", "state_dict['epoch']) start_time = time.perf_counter() loader = loaders[0] total_iters = config['num_epochs']", "torch import dataset import BigGAN import train_fns import utils from", "G_ema = nn.DataParallel(G_ema) ema = utils.ema(G, G_ema, config['ema_decay'], config['ema_start']) else:", "label arrays # Allow for different batch sizes in G", "= 64 # IMG_SIZE_2 = IMG_SIZE * 2 def run(config):", "batch sizes in G G_batch_size = max(config['G_batch_size'], config['batch_size']) num_samples =", "config['save_every']): if config['G_eval_mode']: # print('Switching G to eval mode...') G.eval()", "of epochs, although we mostly track G iterations. pbar =", "matter much. G.train() D.train() if config['ema']: G_ema.train() x, y =", "config['ema']: G_ema.train() x, y = x.to(device), y.to(device) metrics = train(x,", "Training for High Fidelity Natural Image Synthesis,\" by <NAME>, <NAME>,", "= \"[{}] [{}] [{} / {}] Ep {}, \".format( curr_time_str,", "if not (state_dict['itr'] % config['sample_every']): if config['G_eval_mode']: # print('Switching G", "throghout training fixed_z, fixed_y = utils.prepare_z_y( num_samples, G.module.dim_z, config['n_classes'], device=device,", "load_in_mem=config['load_in_mem'], mask_out=config['mask_out'] ) # Prepare noise and randomly sampled label", "print('Loading weights...') utils.load_weights(G, D, state_dict, config['weights_root'], experiment_name, config['load_weights'] if config['load_weights']", "dataset.get_data_loaders( data_root=config['data_root'], label_root=config['label_root'], batch_size=D_batch_size, num_workers=config['num_workers'], shuffle=config['shuffle'], pin_memory=config['pin_memory'], drop_last=True, load_in_mem=config['load_in_mem'], mask_out=config['mask_out']", "BigGAN: The Authorized Unofficial PyTorch release Code by <NAME> and", "data in enumerate(loader): x, y = data['img'], data['label'] # Increment", "parser = utils.prepare_parser() config = vars(parser.parse_args()) print(config) run(config) if __name__", "dataloading. # Note that at every loader iteration we pass", ") # Prepare noise and randomly sampled label arrays #", "for the activation specified as a string) config['resolution'] = IMG_SIZE", "# configuration into the config-dict (e.g. inferring the number of", "end of epoch state_dict['epoch'] += 1 def main(): # parse", "necessary utils.prepare_root(config) # Setup cudnn.benchmark for free speed torch.backends.cudnn.benchmark =", "1 config['G_activation'] = utils.activation_dict[config['G_nl']] config['D_activation'] = utils.activation_dict[config['D_nl']] # By default,", "sample evolution throghout training fixed_z, fixed_y = utils.prepare_z_y( num_samples, G.module.dim_z,", "None GD = BigGAN.G_D(G, D) print(G) print(D) print('Number of params", "typically doesn't have BN, this shouldn't matter much. G.train() D.train()", "a pytorch object # for the activation specified as a", "len(loader) # Train for specified number of epochs, although we", "D: {}'.format( *[sum([p.data.nelement() for p in net.parameters()]) for net in", "curr_time_str, eta_str, state_dict['itr'], total_iters, epoch) log += ', '.join(['%s :", "(regardless of number of D steps and accumulations) D_batch_size =", "Loaders are loaded, prepare the training function train = train_fns.create_train_fn(", "at end of epoch state_dict['epoch'] += 1 def main(): #", "G_ema.train() x, y = x.to(device), y.to(device) metrics = train(x, y)", "%s' % experiment_name) G = BigGAN.Generator(**config).to(device) D = BigGAN.Discriminator(**config).to(device) #", "= BigGAN.Discriminator(**config).to(device) # if config['parallel']: G = nn.DataParallel(G) D =", "for key in metrics]) pbar.set_description(log) # print(log) # Save weights", "config dict as necessary # This is for convenience, to", "in enough data to complete # a full D iteration", "= loaders[0] total_iters = config['num_epochs'] * len(loader) # Train for", "G_ema if config['ema'] else None) # Prepare data; the Discriminator's", "Save weights and copies as configured at specified interval if", "G doesn't require dataloading. # Note that at every loader", "experiment_name, save_weight=False) if not (state_dict['itr'] % config['save_every']): if config['G_eval_mode']: #", "Ep {}, \".format( curr_time_str, eta_str, state_dict['itr'], total_iters, epoch) log +=", "import utils from common import * # IMG_SIZE = 64", "print('Beginning training at epoch %d...' % state_dict['epoch']) start_time = time.perf_counter()", "track G iterations. pbar = tqdm(total=total_iters) for _ in range(state_dict['itr']):", "1 # Make sure G and D are in training", "config['n_classes'], device=device, fp16=config['G_fp16']) fixed_z.sample_() fixed_y.sample_() # Loaders are loaded, prepare", "run parser = utils.prepare_parser() config = vars(parser.parse_args()) print(config) run(config) if", "passing in a pytorch object # for the activation specified", "utils.prepare_root(config) # Setup cudnn.benchmark for free speed torch.backends.cudnn.benchmark = True", "epochs, although we mostly track G iterations. pbar = tqdm(total=total_iters)", "loader iteration we pass in enough data to complete #", "the Discriminator's batch size is all that needs to be", "to complete # a full D iteration (regardless of number", "to eval mode...') G.eval() train_fns.save_and_sample(G, D, G_ema, z_, y_, fixed_z,", "holds things like epoch # and itr # state_dict =", "mask_out=config['mask_out'] ) # Prepare noise and randomly sampled label arrays", "z_, y_, fixed_z, fixed_y, state_dict, config, experiment_name, save_weight=True) pbar.update() #", "= BigGAN.Generator(**{**config, 'skip_init': True, 'no_optim': True}).to(device) G_ema = nn.DataParallel(G_ema) ema", "dataloader, as G doesn't require dataloading. # Note that at", "epoch) log += ', '.join(['%s : %+4.3f' % (key, metrics[key])", "D, state_dict, config['weights_root'], experiment_name, config['load_weights'] if config['load_weights'] else None, G_ema", "Allow for different batch sizes in G G_batch_size = max(config['G_batch_size'],", "G, D, GD, z_, y_, ema, state_dict, config) print('Beginning training", "metrics[key]) for key in metrics]) pbar.set_description(log) # print(log) # Save", "True, 'no_optim': True}).to(device) G_ema = nn.DataParallel(G_ema) ema = utils.ema(G, G_ema,", "import train_fns import utils from common import * # IMG_SIZE", "just in case they got set to eval # For", "quang duong / (quang duong da di / thoi gian", "loaders = dataset.get_data_loaders( data_root=config['data_root'], label_root=config['label_root'], batch_size=D_batch_size, num_workers=config['num_workers'], shuffle=config['shuffle'], pin_memory=config['pin_memory'], drop_last=True,", "name is %s' % experiment_name) G = BigGAN.Generator(**config).to(device) D =", "= (config['batch_size'] * config['num_D_steps'] * config['num_D_accumulations']) loaders = dataset.get_data_loaders( data_root=config['data_root'],", "log = \"[{}] [{}] [{} / {}] Ep {}, \".format(", "0, 'config': config} # If loading from a pre-trained model,", "fixed_y.sample_() # Loaders are loaded, prepare the training function train", "# for the activation specified as a string) config['resolution'] =", "import dataset import BigGAN import train_fns import utils from common", "at every loader iteration we pass in enough data to", "Code by <NAME> and <NAME> This code is an unofficial", "0, 'save_num': 0, 'config': config} # If loading from a", "= data['img'], data['label'] # Increment the iteration counter state_dict['itr'] +=", "1809.11096). Let's go. \"\"\" import datetime import time import torch", "and size of the images from the dataset, passing in", "z & y to see individual sample evolution throghout training", "the dataloader, as G doesn't require dataloading. # Note that", "cudnn.benchmark for free speed torch.backends.cudnn.benchmark = True experiment_name = (config['experiment_name']", "in metrics]) pbar.set_description(log) # print(log) # Save weights and copies", "If loading from a pre-trained model, load weights if config['resume']:", "<NAME> and <NAME> This code is an unofficial reimplementation of", "and copies as configured at specified interval if not (state_dict['itr']", "loader = loaders[0] total_iters = config['num_epochs'] * len(loader) # Train", "doesn't have BN, this shouldn't matter much. G.train() D.train() if", "D]])) # Prepare state dict, which holds things like epoch", "metrics]) pbar.set_description(log) # print(log) # Save weights and copies as", "import time import torch import dataset import BigGAN import train_fns", "config['ema_decay'])) G_ema = BigGAN.Generator(**{**config, 'skip_init': True, 'no_optim': True}).to(device) G_ema =", "= max(config['G_batch_size'], config['batch_size']) num_samples = config['num_fixed_samples'] z_, y_ = utils.prepare_z_y(", "Make sure G and D are in training mode, just", "= nn.DataParallel(D) # If using EMA, prepare it if config['ema']:", "config['n_classes'], device=device, fp16=config['G_fp16']) # Prepare a fixed z & y", "parse command line and run parser = utils.prepare_parser() config =", "% config['sample_every']): if config['G_eval_mode']: # print('Switching G to eval mode...')", "x, y = data['img'], data['label'] # Increment the iteration counter", ": %+4.3f' % (key, metrics[key]) for key in metrics]) pbar.set_description(log)", "y.to(device) metrics = train(x, y) if not (state_dict['itr'] % config['log_interval']):", "state_dict['itr']) // ((state_dict['itr']-start_itr) / (curr_time+1)) eta_str = datetime.datetime.fromtimestamp( eta).strftime('%H:%M:%S') log", "prepare it if config['ema']: print('Preparing EMA for G with decay", "config['G_activation'] = utils.activation_dict[config['G_nl']] config['D_activation'] = utils.activation_dict[config['D_nl']] # By default, skip", "= nn.DataParallel(G) D = nn.DataParallel(D) # If using EMA, prepare", "<NAME>, and <NAME> (arXiv 1809.11096). Let's go. \"\"\" import datetime", "resuming training. if config['resume']: print('Skipping initialization for training resumption...') config['skip_init']", "skip init if resuming training. if config['resume']: print('Skipping initialization for", "counter at end of epoch state_dict['epoch'] += 1 def main():", "{} D: {}'.format( *[sum([p.data.nelement() for p in net.parameters()]) for net", "in training mode, just in case they got set to", "config['ema'] else None) # Prepare data; the Discriminator's batch size", "config['ema_decay'], config['ema_start']) else: G_ema, ema = None, None GD =", "device = 'cuda' # Seed RNG utils.seed_rng(config['seed']) # Prepare root", "This is for convenience, to add settings derived from the", "prepare the training function train = train_fns.create_train_fn( G, D, GD,", "derived from the user-specified # configuration into the config-dict (e.g.", "= dataset.get_data_loaders( data_root=config['data_root'], label_root=config['label_root'], batch_size=D_batch_size, num_workers=config['num_workers'], shuffle=config['shuffle'], pin_memory=config['pin_memory'], drop_last=True, load_in_mem=config['load_in_mem'],", "print(log) # Save weights and copies as configured at specified", "pbar.set_description(log) # print(log) # Save weights and copies as configured", "as configured at specified interval if not (state_dict['itr'] % config['sample_every']):", "enough data to complete # a full D iteration (regardless", "= True config = utils.update_config_roots(config) device = 'cuda' # Seed", "config['experiment_name'] else 'generative_dog_images') print('Experiment name is %s' % experiment_name) G", "if not (state_dict['itr'] % config['save_every']): if config['G_eval_mode']: # print('Switching G", "settings derived from the user-specified # configuration into the config-dict", "for i, data in enumerate(loader): x, y = data['img'], data['label']", "in [G, D]])) # Prepare state dict, which holds things", "experiment_name, config['load_weights'] if config['load_weights'] else None, G_ema if config['ema'] else", "common import * # IMG_SIZE = 64 # IMG_SIZE_2 =", "/ {}] Ep {}, \".format( curr_time_str, eta_str, state_dict['itr'], total_iters, epoch)", "# Note that at every loader iteration we pass in", "BN, this shouldn't matter much. G.train() D.train() if config['ema']: G_ema.train()", "%+4.3f' % (key, metrics[key]) for key in metrics]) pbar.set_description(log) #", "config['G_eval_mode']: # print('Switching G to eval mode...') G.eval() train_fns.save_and_sample(G, D,", "gian da di) eta = ( total_iters - state_dict['itr']) //", "and <NAME> This code is an unofficial reimplementation of \"Large-Scale", "number of epochs, although we mostly track G iterations. pbar", "% (key, metrics[key]) for key in metrics]) pbar.set_description(log) # print(log)", "= IMG_SIZE config['n_classes'] = 1 config['G_activation'] = utils.activation_dict[config['G_nl']] config['D_activation'] =", "from common import * # IMG_SIZE = 64 # IMG_SIZE_2", "(key, metrics[key]) for key in metrics]) pbar.set_description(log) # print(log) #", "utils.activation_dict[config['D_nl']] # By default, skip init if resuming training. if", "start_itr = state_dict['itr'] for epoch in range(state_dict['epoch'], config['num_epochs']): for i,", "(config['experiment_name'] if config['experiment_name'] else 'generative_dog_images') print('Experiment name is %s' %", "number of classes # and size of the images from", "in G G_batch_size = max(config['G_batch_size'], config['batch_size']) num_samples = config['num_fixed_samples'] z_,", "config['ema_start']) else: G_ema, ema = None, None GD = BigGAN.G_D(G,", "utils.seed_rng(config['seed']) # Prepare root folders if necessary utils.prepare_root(config) # Setup", "is %s' % experiment_name) G = BigGAN.Generator(**config).to(device) D = BigGAN.Discriminator(**config).to(device)", "if config['experiment_name'] else 'generative_dog_images') print('Experiment name is %s' % experiment_name)", "= train(x, y) if not (state_dict['itr'] % config['log_interval']): curr_time =", "of classes # and size of the images from the", "and accumulations) D_batch_size = (config['batch_size'] * config['num_D_steps'] * config['num_D_accumulations']) loaders", "G G_batch_size = max(config['G_batch_size'], config['batch_size']) num_samples = config['num_fixed_samples'] z_, y_", "For D, which typically doesn't have BN, this shouldn't matter", "are loaded, prepare the training function train = train_fns.create_train_fn( G,", "# IMG_SIZE = 64 # IMG_SIZE_2 = IMG_SIZE * 2", "'.join(['%s : %+4.3f' % (key, metrics[key]) for key in metrics])", "# quang duong / (quang duong da di / thoi", "train = train_fns.create_train_fn( G, D, GD, z_, y_, ema, state_dict,", "print('Preparing EMA for G with decay of {}'.format( config['ema_decay'])) G_ema", "the images from the dataset, passing in a pytorch object", "sampled label arrays # Allow for different batch sizes in", "da di / thoi gian da di) eta = (", "[G, D]])) # Prepare state dict, which holds things like", "(e.g. inferring the number of classes # and size of", "in enumerate(loader): x, y = data['img'], data['label'] # Increment the", "config['weights_root'], experiment_name, config['load_weights'] if config['load_weights'] else None, G_ema if config['ema']", "size is all that needs to be passed # to", "datetime import time import torch import dataset import BigGAN import", "eta).strftime('%H:%M:%S') log = \"[{}] [{}] [{} / {}] Ep {},", "di) eta = ( total_iters - state_dict['itr']) // ((state_dict['itr']-start_itr) /", "dict, which holds things like epoch # and itr #", "save_weight=True) pbar.update() # Increment epoch counter at end of epoch", "a full D iteration (regardless of number of D steps", "data['label'] # Increment the iteration counter state_dict['itr'] += 1 #", "if config['ema']: G_ema.train() x, y = x.to(device), y.to(device) metrics =", "= 1 config['G_activation'] = utils.activation_dict[config['G_nl']] config['D_activation'] = utils.activation_dict[config['D_nl']] # By", "{'itr': 0, 'epoch': 0, 'save_num': 0, 'config': config} # If", "num_samples = config['num_fixed_samples'] z_, y_ = utils.prepare_z_y( num_samples, G.module.dim_z, config['n_classes'],", "= utils.prepare_z_y( num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) fixed_z.sample_() fixed_y.sample_() #", "config) print('Beginning training at epoch %d...' % state_dict['epoch']) start_time =", "add settings derived from the user-specified # configuration into the", "training mode, just in case they got set to eval", "the config-dict (e.g. inferring the number of classes # and", "G = nn.DataParallel(G) D = nn.DataParallel(D) # If using EMA,", "', '.join(['%s : %+4.3f' % (key, metrics[key]) for key in", "the iteration counter state_dict['itr'] += 1 # Make sure G", "fixed_y, state_dict, config, experiment_name, save_weight=False) if not (state_dict['itr'] % config['save_every']):", "data_root=config['data_root'], label_root=config['label_root'], batch_size=D_batch_size, num_workers=config['num_workers'], shuffle=config['shuffle'], pin_memory=config['pin_memory'], drop_last=True, load_in_mem=config['load_in_mem'], mask_out=config['mask_out'] )", "= IMG_SIZE * 2 def run(config): # Update the config", "if config['ema']: print('Preparing EMA for G with decay of {}'.format(", "Prepare a fixed z & y to see individual sample", "GAN Training for High Fidelity Natural Image Synthesis,\" by <NAME>,", "[{}] [{} / {}] Ep {}, \".format( curr_time_str, eta_str, state_dict['itr'],", "y) if not (state_dict['itr'] % config['log_interval']): curr_time = timer.since_start() curr_time_str", "dict as necessary # This is for convenience, to add", "The Authorized Unofficial PyTorch release Code by <NAME> and <NAME>", "ema, state_dict, config) print('Beginning training at epoch %d...' % state_dict['epoch'])", "[{} / {}] Ep {}, \".format( curr_time_str, eta_str, state_dict['itr'], total_iters,", "= ( total_iters - state_dict['itr']) // ((state_dict['itr']-start_itr) / (curr_time+1)) eta_str", "model, load weights if config['resume']: print('Loading weights...') utils.load_weights(G, D, state_dict,", "the number of classes # and size of the images", "root folders if necessary utils.prepare_root(config) # Setup cudnn.benchmark for free", "Natural Image Synthesis,\" by <NAME>, <NAME>, and <NAME> (arXiv 1809.11096).", "configured at specified interval if not (state_dict['itr'] % config['sample_every']): if", "state_dict, config, experiment_name, save_weight=False) if not (state_dict['itr'] % config['save_every']): if", "Synthesis,\" by <NAME>, <NAME>, and <NAME> (arXiv 1809.11096). Let's go.", "* config['num_D_accumulations']) loaders = dataset.get_data_loaders( data_root=config['data_root'], label_root=config['label_root'], batch_size=D_batch_size, num_workers=config['num_workers'], shuffle=config['shuffle'],", "for specified number of epochs, although we mostly track G", "mmcv.Timer() timer.start() start_itr = state_dict['itr'] for epoch in range(state_dict['epoch'], config['num_epochs']):", "D = nn.DataParallel(D) # If using EMA, prepare it if", "Authorized Unofficial PyTorch release Code by <NAME> and <NAME> This", "weights if config['resume']: print('Loading weights...') utils.load_weights(G, D, state_dict, config['weights_root'], experiment_name,", "*[sum([p.data.nelement() for p in net.parameters()]) for net in [G, D]]))", "curr_time).strftime('%H:%M:%S') # quang duong / (quang duong da di /", "weights...') utils.load_weights(G, D, state_dict, config['weights_root'], experiment_name, config['load_weights'] if config['load_weights'] else", "in net.parameters()]) for net in [G, D]])) # Prepare state", "counter state_dict['itr'] += 1 # Make sure G and D", "+= 1 def main(): # parse command line and run", "By default, skip init if resuming training. if config['resume']: print('Skipping", "print('Skipping initialization for training resumption...') config['skip_init'] = True config =", "and randomly sampled label arrays # Allow for different batch", "of params in G: {} D: {}'.format( *[sum([p.data.nelement() for p", "torch.backends.cudnn.benchmark = True experiment_name = (config['experiment_name'] if config['experiment_name'] else 'generative_dog_images')", "mostly track G iterations. pbar = tqdm(total=total_iters) for _ in", "= x.to(device), y.to(device) metrics = train(x, y) if not (state_dict['itr']", "= config['num_epochs'] * len(loader) # Train for specified number of", "save_weight=False) if not (state_dict['itr'] % config['save_every']): if config['G_eval_mode']: # print('Switching", "x.to(device), y.to(device) metrics = train(x, y) if not (state_dict['itr'] %", "None, None GD = BigGAN.G_D(G, D) print(G) print(D) print('Number of", "train_fns.save_and_sample(G, D, G_ema, z_, y_, fixed_z, fixed_y, state_dict, config, experiment_name,", "and <NAME> (arXiv 1809.11096). Let's go. \"\"\" import datetime import", "iterations. pbar = tqdm(total=total_iters) for _ in range(state_dict['itr']): pbar.update() timer", "nn.DataParallel(G) D = nn.DataParallel(D) # If using EMA, prepare it", "else None, G_ema if config['ema'] else None) # Prepare data;", "- state_dict['itr']) // ((state_dict['itr']-start_itr) / (curr_time+1)) eta_str = datetime.datetime.fromtimestamp( eta).strftime('%H:%M:%S')", "= BigGAN.G_D(G, D) print(G) print(D) print('Number of params in G:", "RNG utils.seed_rng(config['seed']) # Prepare root folders if necessary utils.prepare_root(config) #", "BigGAN.Generator(**config).to(device) D = BigGAN.Discriminator(**config).to(device) # if config['parallel']: G = nn.DataParallel(G)", "a string) config['resolution'] = IMG_SIZE config['n_classes'] = 1 config['G_activation'] =", "tqdm(total=total_iters) for _ in range(state_dict['itr']): pbar.update() timer = mmcv.Timer() timer.start()", "training resumption...') config['skip_init'] = True config = utils.update_config_roots(config) device =", "# If using EMA, prepare it if config['ema']: print('Preparing EMA", "GD = BigGAN.G_D(G, D) print(G) print(D) print('Number of params in", "y = x.to(device), y.to(device) metrics = train(x, y) if not", "Discriminator's batch size is all that needs to be passed", "D, G_ema, z_, y_, fixed_z, fixed_y, state_dict, config, experiment_name, save_weight=False)", "pass in enough data to complete # a full D", "y_, ema, state_dict, config) print('Beginning training at epoch %d...' %", "+= 1 # Make sure G and D are in", "num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) fixed_z.sample_() fixed_y.sample_() # Loaders are", "Update the config dict as necessary # This is for", "folders if necessary utils.prepare_root(config) # Setup cudnn.benchmark for free speed", "state_dict = {'itr': 0, 'epoch': 0, 'save_num': 0, 'config': config}", "D are in training mode, just in case they got", "else None) # Prepare data; the Discriminator's batch size is", "# Increment the iteration counter state_dict['itr'] += 1 # Make", "convenience, to add settings derived from the user-specified # configuration", "fp16=config['G_fp16']) # Prepare a fixed z & y to see", "( total_iters - state_dict['itr']) // ((state_dict['itr']-start_itr) / (curr_time+1)) eta_str =", "the training function train = train_fns.create_train_fn( G, D, GD, z_,", "(quang duong da di / thoi gian da di) eta", "D, which typically doesn't have BN, this shouldn't matter much.", "# Update the config dict as necessary # This is", "else 'generative_dog_images') print('Experiment name is %s' % experiment_name) G =", "to be passed # to the dataloader, as G doesn't", "config} # If loading from a pre-trained model, load weights", "G_ema, config['ema_decay'], config['ema_start']) else: G_ema, ema = None, None GD", "to the dataloader, as G doesn't require dataloading. # Note", "# Allow for different batch sizes in G G_batch_size =", "of the images from the dataset, passing in a pytorch", "(curr_time+1)) eta_str = datetime.datetime.fromtimestamp( eta).strftime('%H:%M:%S') log = \"[{}] [{}] [{}", "although we mostly track G iterations. pbar = tqdm(total=total_iters) for", "= state_dict['itr'] for epoch in range(state_dict['epoch'], config['num_epochs']): for i, data", "data; the Discriminator's batch size is all that needs to", "copies as configured at specified interval if not (state_dict['itr'] %", "an unofficial reimplementation of \"Large-Scale GAN Training for High Fidelity", "for convenience, to add settings derived from the user-specified #", "This code is an unofficial reimplementation of \"Large-Scale GAN Training", "number of D steps and accumulations) D_batch_size = (config['batch_size'] *", "y_, fixed_z, fixed_y, state_dict, config, experiment_name, save_weight=False) if not (state_dict['itr']", "images from the dataset, passing in a pytorch object #", "num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) # Prepare a fixed z", "the config dict as necessary # This is for convenience,", "BigGAN.G_D(G, D) print(G) print(D) print('Number of params in G: {}", "None, G_ema if config['ema'] else None) # Prepare data; the", "inferring the number of classes # and size of the", "True config = utils.update_config_roots(config) device = 'cuda' # Seed RNG", "+= ', '.join(['%s : %+4.3f' % (key, metrics[key]) for key", "def run(config): # Update the config dict as necessary #", "config['load_weights'] if config['load_weights'] else None, G_ema if config['ema'] else None)", "_ in range(state_dict['itr']): pbar.update() timer = mmcv.Timer() timer.start() start_itr =", "= timer.since_start() curr_time_str = datetime.datetime.fromtimestamp( curr_time).strftime('%H:%M:%S') # quang duong /", "epoch counter at end of epoch state_dict['epoch'] += 1 def", "in G: {} D: {}'.format( *[sum([p.data.nelement() for p in net.parameters()])", "be passed # to the dataloader, as G doesn't require", "for free speed torch.backends.cudnn.benchmark = True experiment_name = (config['experiment_name'] if", "= None, None GD = BigGAN.G_D(G, D) print(G) print(D) print('Number", "eval # For D, which typically doesn't have BN, this", "= datetime.datetime.fromtimestamp( curr_time).strftime('%H:%M:%S') # quang duong / (quang duong da", "is an unofficial reimplementation of \"Large-Scale GAN Training for High", "load weights if config['resume']: print('Loading weights...') utils.load_weights(G, D, state_dict, config['weights_root'],", "% state_dict['epoch']) start_time = time.perf_counter() loader = loaders[0] total_iters =", "timer.start() start_itr = state_dict['itr'] for epoch in range(state_dict['epoch'], config['num_epochs']): for", "epoch %d...' % state_dict['epoch']) start_time = time.perf_counter() loader = loaders[0]", "pbar.update() # Increment epoch counter at end of epoch state_dict['epoch']", "as necessary # This is for convenience, to add settings", "G_ema, z_, y_, fixed_z, fixed_y, state_dict, config, experiment_name, save_weight=True) pbar.update()", "print(D) print('Number of params in G: {} D: {}'.format( *[sum([p.data.nelement()", "True experiment_name = (config['experiment_name'] if config['experiment_name'] else 'generative_dog_images') print('Experiment name", "# Train for specified number of epochs, although we mostly", "eta_str, state_dict['itr'], total_iters, epoch) log += ', '.join(['%s : %+4.3f'", "EMA, prepare it if config['ema']: print('Preparing EMA for G with", "much. G.train() D.train() if config['ema']: G_ema.train() x, y = x.to(device),", "noise and randomly sampled label arrays # Allow for different", "= time.perf_counter() loader = loaders[0] total_iters = config['num_epochs'] * len(loader)", "object # for the activation specified as a string) config['resolution']", "'cuda' # Seed RNG utils.seed_rng(config['seed']) # Prepare root folders if", "Unofficial PyTorch release Code by <NAME> and <NAME> This code", "randomly sampled label arrays # Allow for different batch sizes", "config['skip_init'] = True config = utils.update_config_roots(config) device = 'cuda' #", "# print('Switching G to eval mode...') G.eval() train_fns.save_and_sample(G, D, G_ema,", "# Seed RNG utils.seed_rng(config['seed']) # Prepare root folders if necessary", "= nn.DataParallel(G_ema) ema = utils.ema(G, G_ema, config['ema_decay'], config['ema_start']) else: G_ema,", "config['sample_every']): if config['G_eval_mode']: # print('Switching G to eval mode...') G.eval()", "'config': config} # If loading from a pre-trained model, load", "config = vars(parser.parse_args()) print(config) run(config) if __name__ == '__main__': main()", "necessary # This is for convenience, to add settings derived", "train(x, y) if not (state_dict['itr'] % config['log_interval']): curr_time = timer.since_start()", "of D steps and accumulations) D_batch_size = (config['batch_size'] * config['num_D_steps']", "D.train() if config['ema']: G_ema.train() x, y = x.to(device), y.to(device) metrics", "* config['num_D_steps'] * config['num_D_accumulations']) loaders = dataset.get_data_loaders( data_root=config['data_root'], label_root=config['label_root'], batch_size=D_batch_size,", "G and D are in training mode, just in case", "are in training mode, just in case they got set", "using EMA, prepare it if config['ema']: print('Preparing EMA for G", "{}, \".format( curr_time_str, eta_str, state_dict['itr'], total_iters, epoch) log += ',", "/ thoi gian da di) eta = ( total_iters -", "/ (quang duong da di / thoi gian da di)", "= True experiment_name = (config['experiment_name'] if config['experiment_name'] else 'generative_dog_images') print('Experiment", "= config['num_fixed_samples'] z_, y_ = utils.prepare_z_y( num_samples, G.module.dim_z, config['n_classes'], device=device,", "fixed_y = utils.prepare_z_y( num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) fixed_z.sample_() fixed_y.sample_()", "training function train = train_fns.create_train_fn( G, D, GD, z_, y_,", "IMG_SIZE * 2 def run(config): # Update the config dict", "code is an unofficial reimplementation of \"Large-Scale GAN Training for", "from the dataset, passing in a pytorch object # for", "utils.activation_dict[config['G_nl']] config['D_activation'] = utils.activation_dict[config['D_nl']] # By default, skip init if", "loading from a pre-trained model, load weights if config['resume']: print('Loading", "fixed_z, fixed_y, state_dict, config, experiment_name, save_weight=False) if not (state_dict['itr'] %", "duong / (quang duong da di / thoi gian da", "Fidelity Natural Image Synthesis,\" by <NAME>, <NAME>, and <NAME> (arXiv", "D iteration (regardless of number of D steps and accumulations)", "config-dict (e.g. inferring the number of classes # and size", "True}).to(device) G_ema = nn.DataParallel(G_ema) ema = utils.ema(G, G_ema, config['ema_decay'], config['ema_start'])", "run(config): # Update the config dict as necessary # This", "D, GD, z_, y_, ema, state_dict, config) print('Beginning training at", "D steps and accumulations) D_batch_size = (config['batch_size'] * config['num_D_steps'] *", "eta = ( total_iters - state_dict['itr']) // ((state_dict['itr']-start_itr) / (curr_time+1))", "\".format( curr_time_str, eta_str, state_dict['itr'], total_iters, epoch) log += ', '.join(['%s", "individual sample evolution throghout training fixed_z, fixed_y = utils.prepare_z_y( num_samples,", "log += ', '.join(['%s : %+4.3f' % (key, metrics[key]) for", "config['ema']: print('Preparing EMA for G with decay of {}'.format( config['ema_decay']))", "%d...' % state_dict['epoch']) start_time = time.perf_counter() loader = loaders[0] total_iters", "params in G: {} D: {}'.format( *[sum([p.data.nelement() for p in", "device=device, fp16=config['G_fp16']) fixed_z.sample_() fixed_y.sample_() # Loaders are loaded, prepare the", "in range(state_dict['itr']): pbar.update() timer = mmcv.Timer() timer.start() start_itr = state_dict['itr']", "utils.prepare_z_y( num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) # Prepare a fixed", "config['num_D_steps'] * config['num_D_accumulations']) loaders = dataset.get_data_loaders( data_root=config['data_root'], label_root=config['label_root'], batch_size=D_batch_size, num_workers=config['num_workers'],", "= utils.ema(G, G_ema, config['ema_decay'], config['ema_start']) else: G_ema, ema = None,", "da di) eta = ( total_iters - state_dict['itr']) // ((state_dict['itr']-start_itr)", "G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) # Prepare a fixed z &", "time.perf_counter() loader = loaders[0] total_iters = config['num_epochs'] * len(loader) #", "utils from common import * # IMG_SIZE = 64 #", "else: G_ema, ema = None, None GD = BigGAN.G_D(G, D)", "G_ema, ema = None, None GD = BigGAN.G_D(G, D) print(G)", "we mostly track G iterations. pbar = tqdm(total=total_iters) for _", "not (state_dict['itr'] % config['log_interval']): curr_time = timer.since_start() curr_time_str = datetime.datetime.fromtimestamp(", "label_root=config['label_root'], batch_size=D_batch_size, num_workers=config['num_workers'], shuffle=config['shuffle'], pin_memory=config['pin_memory'], drop_last=True, load_in_mem=config['load_in_mem'], mask_out=config['mask_out'] ) #", "which typically doesn't have BN, this shouldn't matter much. G.train()", "= {'itr': 0, 'epoch': 0, 'save_num': 0, 'config': config} #", "# to the dataloader, as G doesn't require dataloading. #", "Increment epoch counter at end of epoch state_dict['epoch'] += 1", "2 def run(config): # Update the config dict as necessary", "G_ema = BigGAN.Generator(**{**config, 'skip_init': True, 'no_optim': True}).to(device) G_ema = nn.DataParallel(G_ema)", "state_dict['epoch'] += 1 def main(): # parse command line and", "{}] Ep {}, \".format( curr_time_str, eta_str, state_dict['itr'], total_iters, epoch) log", "config['num_D_accumulations']) loaders = dataset.get_data_loaders( data_root=config['data_root'], label_root=config['label_root'], batch_size=D_batch_size, num_workers=config['num_workers'], shuffle=config['shuffle'], pin_memory=config['pin_memory'],", "Train for specified number of epochs, although we mostly track", "utils.load_weights(G, D, state_dict, config['weights_root'], experiment_name, config['load_weights'] if config['load_weights'] else None,", "size of the images from the dataset, passing in a", "data to complete # a full D iteration (regardless of", "if config['load_weights'] else None, G_ema if config['ema'] else None) #", "if config['G_eval_mode']: # print('Switching G to eval mode...') G.eval() train_fns.save_and_sample(G,", "config['batch_size']) num_samples = config['num_fixed_samples'] z_, y_ = utils.prepare_z_y( num_samples, G.module.dim_z,", "pbar.update() timer = mmcv.Timer() timer.start() start_itr = state_dict['itr'] for epoch", "training at epoch %d...' % state_dict['epoch']) start_time = time.perf_counter() loader", "config['num_epochs'] * len(loader) # Train for specified number of epochs,", "for epoch in range(state_dict['epoch'], config['num_epochs']): for i, data in enumerate(loader):", "GD, z_, y_, ema, state_dict, config) print('Beginning training at epoch", "doesn't require dataloading. # Note that at every loader iteration", "require dataloading. # Note that at every loader iteration we", "to eval # For D, which typically doesn't have BN,", "import * # IMG_SIZE = 64 # IMG_SIZE_2 = IMG_SIZE", "full D iteration (regardless of number of D steps and", "print('Experiment name is %s' % experiment_name) G = BigGAN.Generator(**config).to(device) D", "Increment the iteration counter state_dict['itr'] += 1 # Make sure", "G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) fixed_z.sample_() fixed_y.sample_() # Loaders are loaded,", "for net in [G, D]])) # Prepare state dict, which", "release Code by <NAME> and <NAME> This code is an", "config['resolution'] = IMG_SIZE config['n_classes'] = 1 config['G_activation'] = utils.activation_dict[config['G_nl']] config['D_activation']", "if config['parallel']: G = nn.DataParallel(G) D = nn.DataParallel(D) # If", "G iterations. pbar = tqdm(total=total_iters) for _ in range(state_dict['itr']): pbar.update()", "command line and run parser = utils.prepare_parser() config = vars(parser.parse_args())", "= tqdm(total=total_iters) for _ in range(state_dict['itr']): pbar.update() timer = mmcv.Timer()", "BigGAN.Generator(**{**config, 'skip_init': True, 'no_optim': True}).to(device) G_ema = nn.DataParallel(G_ema) ema =", "key in metrics]) pbar.set_description(log) # print(log) # Save weights and", "= BigGAN.Generator(**config).to(device) D = BigGAN.Discriminator(**config).to(device) # if config['parallel']: G =", "curr_time = timer.since_start() curr_time_str = datetime.datetime.fromtimestamp( curr_time).strftime('%H:%M:%S') # quang duong", "they got set to eval # For D, which typically", "train_fns.create_train_fn( G, D, GD, z_, y_, ema, state_dict, config) print('Beginning", "\"[{}] [{}] [{} / {}] Ep {}, \".format( curr_time_str, eta_str,", "IMG_SIZE = 64 # IMG_SIZE_2 = IMG_SIZE * 2 def", "pre-trained model, load weights if config['resume']: print('Loading weights...') utils.load_weights(G, D,", "BigGAN.Discriminator(**config).to(device) # if config['parallel']: G = nn.DataParallel(G) D = nn.DataParallel(D)", "like epoch # and itr # state_dict = {'itr': 0,", "iteration (regardless of number of D steps and accumulations) D_batch_size", "which holds things like epoch # and itr # state_dict", "& y to see individual sample evolution throghout training fixed_z,", "curr_time_str = datetime.datetime.fromtimestamp( curr_time).strftime('%H:%M:%S') # quang duong / (quang duong", "D) print(G) print(D) print('Number of params in G: {} D:", "initialization for training resumption...') config['skip_init'] = True config = utils.update_config_roots(config)", "# Save weights and copies as configured at specified interval", "is for convenience, to add settings derived from the user-specified", "string) config['resolution'] = IMG_SIZE config['n_classes'] = 1 config['G_activation'] = utils.activation_dict[config['G_nl']]", "Let's go. \"\"\" import datetime import time import torch import", "def main(): # parse command line and run parser =", "import BigGAN import train_fns import utils from common import *", "duong da di / thoi gian da di) eta =", "y_ = utils.prepare_z_y( num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) # Prepare", "training. if config['resume']: print('Skipping initialization for training resumption...') config['skip_init'] =", "state_dict, config, experiment_name, save_weight=True) pbar.update() # Increment epoch counter at", "weights and copies as configured at specified interval if not", "accumulations) D_batch_size = (config['batch_size'] * config['num_D_steps'] * config['num_D_accumulations']) loaders =", "# This is for convenience, to add settings derived from", "Prepare root folders if necessary utils.prepare_root(config) # Setup cudnn.benchmark for", "config = utils.update_config_roots(config) device = 'cuda' # Seed RNG utils.seed_rng(config['seed'])", "interval if not (state_dict['itr'] % config['sample_every']): if config['G_eval_mode']: # print('Switching", "for G with decay of {}'.format( config['ema_decay'])) G_ema = BigGAN.Generator(**{**config,", "= datetime.datetime.fromtimestamp( eta).strftime('%H:%M:%S') log = \"[{}] [{}] [{} / {}]", "net.parameters()]) for net in [G, D]])) # Prepare state dict,", "configuration into the config-dict (e.g. inferring the number of classes", "config['parallel']: G = nn.DataParallel(G) D = nn.DataParallel(D) # If using", "* # IMG_SIZE = 64 # IMG_SIZE_2 = IMG_SIZE *", "init if resuming training. if config['resume']: print('Skipping initialization for training", "resumption...') config['skip_init'] = True config = utils.update_config_roots(config) device = 'cuda'", "= utils.update_config_roots(config) device = 'cuda' # Seed RNG utils.seed_rng(config['seed']) #", "(state_dict['itr'] % config['log_interval']): curr_time = timer.since_start() curr_time_str = datetime.datetime.fromtimestamp( curr_time).strftime('%H:%M:%S')", "'epoch': 0, 'save_num': 0, 'config': config} # If loading from", "= utils.prepare_z_y( num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16']) # Prepare a", "speed torch.backends.cudnn.benchmark = True experiment_name = (config['experiment_name'] if config['experiment_name'] else", "activation specified as a string) config['resolution'] = IMG_SIZE config['n_classes'] =", "z_, y_, ema, state_dict, config) print('Beginning training at epoch %d...'", "\"\"\" import datetime import time import torch import dataset import", "# if config['parallel']: G = nn.DataParallel(G) D = nn.DataParallel(D) #", "mode, just in case they got set to eval #", "timer.since_start() curr_time_str = datetime.datetime.fromtimestamp( curr_time).strftime('%H:%M:%S') # quang duong / (quang", "if not (state_dict['itr'] % config['log_interval']): curr_time = timer.since_start() curr_time_str =", "state_dict['itr'], total_iters, epoch) log += ', '.join(['%s : %+4.3f' %", "% config['save_every']): if config['G_eval_mode']: # print('Switching G to eval mode...')", "it if config['ema']: print('Preparing EMA for G with decay of" ]
[ "'%04x' % random.getrandbits(16), suffix) def deleteStaleFiles(): files = glob('%s/*' %", "OSError: traceback.print_exc() print >> sys.stderr, '[tempfiles.deleteStaleFiles: could not unlink %s]'", "def initZipDir(prefix): return makeTempDir(prefix) def finishZipDir(zipDir): zipFile = '%s.zip' %", "FileUtil from django.conf import settings def getTempName(prefix, suffix=''): return '%s/%s-%s-%s%s'", "FileUtil.mkdirP(settings.TMP_DIR) os.system('chmod go+rw %s' % settings.TMP_DIR) deleteStaleFiles() FileUtil.mkdirP(d) return d", "in files: if (now - os.stat(f).st_ctime > settings.GEOCAM_UTIL_DELETE_TMP_FILE_WAIT_SECONDS and not", "not os.path.exists(settings.TMP_DIR): FileUtil.mkdirP(settings.TMP_DIR) os.system('chmod go+rw %s' % settings.TMP_DIR) deleteStaleFiles() FileUtil.mkdirP(d)", "by the #Administrator of the National Aeronautics and Space Administration.", "reserved. # __END_LICENSE__ import os import time import random import", "Government, as represented by the #Administrator of the National Aeronautics", "glob('%s/*' % settings.TMP_DIR) now = time.time() for f in files:", "unlink %s]' % f def makeTempDir(prefix): d = getTempName(prefix) if", "def getTempName(prefix, suffix=''): return '%s/%s-%s-%s%s' % (settings.TMP_DIR, prefix, time.strftime('%Y-%m-%d-%H%M'), '%04x'", "import os import time import random import shutil from glob", "from django.conf import settings def getTempName(prefix, suffix=''): return '%s/%s-%s-%s%s' %", "return makeTempDir(prefix) def finishZipDir(zipDir): zipFile = '%s.zip' % zipDir oldDir", "'%s/%s-%s-%s%s' % (settings.TMP_DIR, prefix, time.strftime('%Y-%m-%d-%H%M'), '%04x' % random.getrandbits(16), suffix) def", "2015, United States Government, as represented by the #Administrator of", "os.path.exists(settings.TMP_DIR): FileUtil.mkdirP(settings.TMP_DIR) os.system('chmod go+rw %s' % settings.TMP_DIR) deleteStaleFiles() FileUtil.mkdirP(d) return", "prefix, time.strftime('%Y-%m-%d-%H%M'), '%04x' % random.getrandbits(16), suffix) def deleteStaleFiles(): files =", "django.conf import settings def getTempName(prefix, suffix=''): return '%s/%s-%s-%s%s' % (settings.TMP_DIR,", "def makeTempDir(prefix): d = getTempName(prefix) if not os.path.exists(settings.TMP_DIR): FileUtil.mkdirP(settings.TMP_DIR) os.system('chmod", "suffix=''): return '%s/%s-%s-%s%s' % (settings.TMP_DIR, prefix, time.strftime('%Y-%m-%d-%H%M'), '%04x' % random.getrandbits(16),", "National Aeronautics and Space Administration. #All rights reserved. # __END_LICENSE__", "% settings.TMP_DIR) deleteStaleFiles() FileUtil.mkdirP(d) return d def initZipDir(prefix): return makeTempDir(prefix)", "States Government, as represented by the #Administrator of the National", "%s' % settings.TMP_DIR) deleteStaleFiles() FileUtil.mkdirP(d) return d def initZipDir(prefix): return", "%s]' % f def makeTempDir(prefix): d = getTempName(prefix) if not", "import random import shutil from glob import glob import traceback", "United States Government, as represented by the #Administrator of the", "and not f.endswith('/README.txt')): try: os.unlink(f) except OSError: traceback.print_exc() print >>", "<reponame>geocam/geocamUtilWeb<gh_stars>1-10 # __BEGIN_LICENSE__ #Copyright (c) 2015, United States Government, as", "finishZipDir(zipDir): zipFile = '%s.zip' % zipDir oldDir = os.getcwd() os.chdir(os.path.dirname(settings.TMP_DIR))", "not unlink %s]' % f def makeTempDir(prefix): d = getTempName(prefix)", "suffix) def deleteStaleFiles(): files = glob('%s/*' % settings.TMP_DIR) now =", "rights reserved. # __END_LICENSE__ import os import time import random", "d def initZipDir(prefix): return makeTempDir(prefix) def finishZipDir(zipDir): zipFile = '%s.zip'", "Aeronautics and Space Administration. #All rights reserved. # __END_LICENSE__ import", "if not os.path.exists(settings.TMP_DIR): FileUtil.mkdirP(settings.TMP_DIR) os.system('chmod go+rw %s' % settings.TMP_DIR) deleteStaleFiles()", "settings.TMP_DIR) deleteStaleFiles() FileUtil.mkdirP(d) return d def initZipDir(prefix): return makeTempDir(prefix) def", "import time import random import shutil from glob import glob", "shutil from glob import glob import traceback import sys from", "sys from geocamUtil import FileUtil from django.conf import settings def", "os.stat(f).st_ctime > settings.GEOCAM_UTIL_DELETE_TMP_FILE_WAIT_SECONDS and not f.endswith('/README.txt')): try: os.unlink(f) except OSError:", "def deleteStaleFiles(): files = glob('%s/*' % settings.TMP_DIR) now = time.time()", "the #Administrator of the National Aeronautics and Space Administration. #All", "from geocamUtil import FileUtil from django.conf import settings def getTempName(prefix,", "makeTempDir(prefix) def finishZipDir(zipDir): zipFile = '%s.zip' % zipDir oldDir =", "f.endswith('/README.txt')): try: os.unlink(f) except OSError: traceback.print_exc() print >> sys.stderr, '[tempfiles.deleteStaleFiles:", "__END_LICENSE__ import os import time import random import shutil from", "-r %s %s' % (zipFile, os.path.basename(zipDir))) os.chdir(oldDir) shutil.rmtree(zipDir) return zipFile", "random import shutil from glob import glob import traceback import", "files = glob('%s/*' % settings.TMP_DIR) now = time.time() for f", "now = time.time() for f in files: if (now -", "initZipDir(prefix): return makeTempDir(prefix) def finishZipDir(zipDir): zipFile = '%s.zip' % zipDir", "#All rights reserved. # __END_LICENSE__ import os import time import", "os import time import random import shutil from glob import", "(settings.TMP_DIR, prefix, time.strftime('%Y-%m-%d-%H%M'), '%04x' % random.getrandbits(16), suffix) def deleteStaleFiles(): files", "random.getrandbits(16), suffix) def deleteStaleFiles(): files = glob('%s/*' % settings.TMP_DIR) now", "d = getTempName(prefix) if not os.path.exists(settings.TMP_DIR): FileUtil.mkdirP(settings.TMP_DIR) os.system('chmod go+rw %s'", "import sys from geocamUtil import FileUtil from django.conf import settings", "from glob import glob import traceback import sys from geocamUtil", "FileUtil.mkdirP(d) return d def initZipDir(prefix): return makeTempDir(prefix) def finishZipDir(zipDir): zipFile", "import settings def getTempName(prefix, suffix=''): return '%s/%s-%s-%s%s' % (settings.TMP_DIR, prefix,", "makeTempDir(prefix): d = getTempName(prefix) if not os.path.exists(settings.TMP_DIR): FileUtil.mkdirP(settings.TMP_DIR) os.system('chmod go+rw", "of the National Aeronautics and Space Administration. #All rights reserved.", "import traceback import sys from geocamUtil import FileUtil from django.conf", "- os.stat(f).st_ctime > settings.GEOCAM_UTIL_DELETE_TMP_FILE_WAIT_SECONDS and not f.endswith('/README.txt')): try: os.unlink(f) except", "#Copyright (c) 2015, United States Government, as represented by the", "deleteStaleFiles() FileUtil.mkdirP(d) return d def initZipDir(prefix): return makeTempDir(prefix) def finishZipDir(zipDir):", "zipDir oldDir = os.getcwd() os.chdir(os.path.dirname(settings.TMP_DIR)) os.system('zip -r %s %s' %", "not f.endswith('/README.txt')): try: os.unlink(f) except OSError: traceback.print_exc() print >> sys.stderr,", "go+rw %s' % settings.TMP_DIR) deleteStaleFiles() FileUtil.mkdirP(d) return d def initZipDir(prefix):", "return d def initZipDir(prefix): return makeTempDir(prefix) def finishZipDir(zipDir): zipFile =", "# __BEGIN_LICENSE__ #Copyright (c) 2015, United States Government, as represented", "print >> sys.stderr, '[tempfiles.deleteStaleFiles: could not unlink %s]' % f", "the National Aeronautics and Space Administration. #All rights reserved. #", "glob import traceback import sys from geocamUtil import FileUtil from", "% settings.TMP_DIR) now = time.time() for f in files: if", "= getTempName(prefix) if not os.path.exists(settings.TMP_DIR): FileUtil.mkdirP(settings.TMP_DIR) os.system('chmod go+rw %s' %", "def finishZipDir(zipDir): zipFile = '%s.zip' % zipDir oldDir = os.getcwd()", "#Administrator of the National Aeronautics and Space Administration. #All rights", "os.getcwd() os.chdir(os.path.dirname(settings.TMP_DIR)) os.system('zip -r %s %s' % (zipFile, os.path.basename(zipDir))) os.chdir(oldDir)", "settings def getTempName(prefix, suffix=''): return '%s/%s-%s-%s%s' % (settings.TMP_DIR, prefix, time.strftime('%Y-%m-%d-%H%M'),", "% f def makeTempDir(prefix): d = getTempName(prefix) if not os.path.exists(settings.TMP_DIR):", "except OSError: traceback.print_exc() print >> sys.stderr, '[tempfiles.deleteStaleFiles: could not unlink", "= time.time() for f in files: if (now - os.stat(f).st_ctime", "as represented by the #Administrator of the National Aeronautics and", "oldDir = os.getcwd() os.chdir(os.path.dirname(settings.TMP_DIR)) os.system('zip -r %s %s' % (zipFile,", "__BEGIN_LICENSE__ #Copyright (c) 2015, United States Government, as represented by", "traceback import sys from geocamUtil import FileUtil from django.conf import", "Space Administration. #All rights reserved. # __END_LICENSE__ import os import", "f def makeTempDir(prefix): d = getTempName(prefix) if not os.path.exists(settings.TMP_DIR): FileUtil.mkdirP(settings.TMP_DIR)", "time import random import shutil from glob import glob import", "getTempName(prefix, suffix=''): return '%s/%s-%s-%s%s' % (settings.TMP_DIR, prefix, time.strftime('%Y-%m-%d-%H%M'), '%04x' %", "'%s.zip' % zipDir oldDir = os.getcwd() os.chdir(os.path.dirname(settings.TMP_DIR)) os.system('zip -r %s", "(c) 2015, United States Government, as represented by the #Administrator", "# __END_LICENSE__ import os import time import random import shutil", "'[tempfiles.deleteStaleFiles: could not unlink %s]' % f def makeTempDir(prefix): d", "for f in files: if (now - os.stat(f).st_ctime > settings.GEOCAM_UTIL_DELETE_TMP_FILE_WAIT_SECONDS", "represented by the #Administrator of the National Aeronautics and Space", "geocamUtil import FileUtil from django.conf import settings def getTempName(prefix, suffix=''):", "sys.stderr, '[tempfiles.deleteStaleFiles: could not unlink %s]' % f def makeTempDir(prefix):", "= glob('%s/*' % settings.TMP_DIR) now = time.time() for f in", "zipFile = '%s.zip' % zipDir oldDir = os.getcwd() os.chdir(os.path.dirname(settings.TMP_DIR)) os.system('zip", "deleteStaleFiles(): files = glob('%s/*' % settings.TMP_DIR) now = time.time() for", "os.unlink(f) except OSError: traceback.print_exc() print >> sys.stderr, '[tempfiles.deleteStaleFiles: could not", ">> sys.stderr, '[tempfiles.deleteStaleFiles: could not unlink %s]' % f def", "% random.getrandbits(16), suffix) def deleteStaleFiles(): files = glob('%s/*' % settings.TMP_DIR)", "settings.GEOCAM_UTIL_DELETE_TMP_FILE_WAIT_SECONDS and not f.endswith('/README.txt')): try: os.unlink(f) except OSError: traceback.print_exc() print", "glob import glob import traceback import sys from geocamUtil import", "import glob import traceback import sys from geocamUtil import FileUtil", "Administration. #All rights reserved. # __END_LICENSE__ import os import time", "if (now - os.stat(f).st_ctime > settings.GEOCAM_UTIL_DELETE_TMP_FILE_WAIT_SECONDS and not f.endswith('/README.txt')): try:", "time.strftime('%Y-%m-%d-%H%M'), '%04x' % random.getrandbits(16), suffix) def deleteStaleFiles(): files = glob('%s/*'", "f in files: if (now - os.stat(f).st_ctime > settings.GEOCAM_UTIL_DELETE_TMP_FILE_WAIT_SECONDS and", "% (settings.TMP_DIR, prefix, time.strftime('%Y-%m-%d-%H%M'), '%04x' % random.getrandbits(16), suffix) def deleteStaleFiles():", "os.system('chmod go+rw %s' % settings.TMP_DIR) deleteStaleFiles() FileUtil.mkdirP(d) return d def", "could not unlink %s]' % f def makeTempDir(prefix): d =", "getTempName(prefix) if not os.path.exists(settings.TMP_DIR): FileUtil.mkdirP(settings.TMP_DIR) os.system('chmod go+rw %s' % settings.TMP_DIR)", "> settings.GEOCAM_UTIL_DELETE_TMP_FILE_WAIT_SECONDS and not f.endswith('/README.txt')): try: os.unlink(f) except OSError: traceback.print_exc()", "(now - os.stat(f).st_ctime > settings.GEOCAM_UTIL_DELETE_TMP_FILE_WAIT_SECONDS and not f.endswith('/README.txt')): try: os.unlink(f)", "% zipDir oldDir = os.getcwd() os.chdir(os.path.dirname(settings.TMP_DIR)) os.system('zip -r %s %s'", "os.system('zip -r %s %s' % (zipFile, os.path.basename(zipDir))) os.chdir(oldDir) shutil.rmtree(zipDir) return", "traceback.print_exc() print >> sys.stderr, '[tempfiles.deleteStaleFiles: could not unlink %s]' %", "and Space Administration. #All rights reserved. # __END_LICENSE__ import os", "import shutil from glob import glob import traceback import sys", "return '%s/%s-%s-%s%s' % (settings.TMP_DIR, prefix, time.strftime('%Y-%m-%d-%H%M'), '%04x' % random.getrandbits(16), suffix)", "= os.getcwd() os.chdir(os.path.dirname(settings.TMP_DIR)) os.system('zip -r %s %s' % (zipFile, os.path.basename(zipDir)))", "= '%s.zip' % zipDir oldDir = os.getcwd() os.chdir(os.path.dirname(settings.TMP_DIR)) os.system('zip -r", "import FileUtil from django.conf import settings def getTempName(prefix, suffix=''): return", "time.time() for f in files: if (now - os.stat(f).st_ctime >", "files: if (now - os.stat(f).st_ctime > settings.GEOCAM_UTIL_DELETE_TMP_FILE_WAIT_SECONDS and not f.endswith('/README.txt')):", "os.chdir(os.path.dirname(settings.TMP_DIR)) os.system('zip -r %s %s' % (zipFile, os.path.basename(zipDir))) os.chdir(oldDir) shutil.rmtree(zipDir)", "settings.TMP_DIR) now = time.time() for f in files: if (now", "try: os.unlink(f) except OSError: traceback.print_exc() print >> sys.stderr, '[tempfiles.deleteStaleFiles: could" ]
[ "test_percup(integer): gamma = [0,1,3,2,9,99,2,3,10,9,103,102,3,2,3,3] tau = BinHeap() tau.currentsize = 16", "self.heapList[mc] = tmp i = mc def minChild(self,i): if i", "= self.minChild(i)#mc is the index of the smallest if self.heapList[i]", "dépôt git par email.\"\"\" import hypothesis from hypothesis import given,", "2] self.heapList[i // 2] = self.heapList[i] self.heapList[i] = tmp i", "mc = self.minChild(i)#mc is the index of the smallest if", "= 18 tau.percUp(17) assert tau.heapList[17] >= tau.heapList[8] assert tau.heapList[8] >=", "+ 1 def percDown(self,i): while (i * 2) < self.currentSize:#while", "tester jusqu'à atteindre 100% de couverture; # - corriger les", "self.currentSize = self.currentSize + 1 def percDown(self,i): while (i *", "else: return i * 2 + 1 def delMin(self): try:", "elements assert x == sorted(L + K)#verifie qu'on a bien", "test_insert(L,i): tau = BinHeap() tau.buildHeap(L) tau.insert(i) assert_goodheap(tau,len(L)+1) @given(lists(elements=integers()),integers()) @settings(max_examples=100) def", "par email.\"\"\" import hypothesis from hypothesis import given, settings from", "test_general(L,K): tau = BinHeap() tau.buildHeap(L)#tas construit avec L for k", "1 def delMin(self): try: rval = self.heapList[1] except IndexError: print(\"Empty", "a new value into the heap self.heapList.append(k) self.percUp(self.currentSize) self.currentSize =", "liste heapList (invariant) def percUp(self,i): #upward percolation until 0 reached", ">= tau.heapList[8] assert tau.heapList[8] >= tau.heapList[4] @given(lists(elements=integers())) @settings(max_examples=1000) def test_build(L):", "x in range(1,lon): assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList) def test_init(): tau = BinHeap() assert", "= [] tau.buildHeap(K) for l in L:#teste si 1 suite", "== gamma[:] tau.heapList[15] = 2 tau.percUp(15) print(tau.heapList) assert tau.heapList ==", "2 >= self.currentSize or i == 0: print(\"No Child. None", "tau.currentsize = 16 tau.heapList = gamma[:] tau.percUp(15) assert tau.heapList ==", "@given(integers()) @settings(max_examples=100) def test_percup(integer): gamma = [0,1,3,2,9,99,2,3,10,9,103,102,3,2,3,3] tau = BinHeap()", "whole heap from a list, by percolating all its elements", "gamma[:] tau.heapList[15] = 2 tau.percUp(15) print(tau.heapList) assert tau.heapList == [0,1,3,2,9,99,2,2,10,9,103,102,3,2,3,3]", "self.currentSize = len(alist) + 1# + 1 self.heapList = [0]", "assert_goodheap(tau,len(L)+1) #for x in range(1,len(L) + 1): # assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList) @given(lists(elements=integers()),integers())", "1): for _ in range(len(L)): tau.percDown(x) #then we test that", "de tas binaires d'entiers def __init__(self): #initialise un tas binaire", "self.heapList = [0] self.currentSize = 1#taille de la liste heapList", "2 + 1 >= self.currentSize: return i * 2 else:", "tau = BinHeap() tau.buildHeap(L) tau.insert(i) assert_goodheap(tau,len(L)+1) @given(lists(elements=integers()),integers()) @settings(max_examples=100) def test_percDown(L,i):", "1 == lon and HL[2*x] >= val) or (HL[2*x] >=", "returned.\") return if i * 2 + 1 >= self.currentSize:", "* 2 + 1 > lon) or (x * 2", "hypothesis.strategies import integers, lists class BinHeap: #structure de tas binaires", "la liste heapList (invariant) def percUp(self,i): #upward percolation until 0", "heap assert_goodheap(tau,len(L)+1) @given(lists(elements=integers())) @settings(max_examples=400,deadline=None) def test_delmin(L): L += [10] tau", "len(L) + 1 assert sorted(tau.heapList) == sorted(L+[0]) assert_goodheap(tau,len(L)+1) #for x", "until 0 reached or father is bigger while i //", "tau.currentSize == 1 @given(integers()) @settings(max_examples=100) def test_percup(integer): gamma = [0,1,3,2,9,99,2,3,10,9,103,102,3,2,3,3]", "le [:] while (i < self.currentSize): self.percUp(i) i += 1", "for x in range(1,len(L) + 1): for _ in range(len(L)):", "tau = BinHeap() assert tau.heapList == [0] assert tau.currentSize ==", "val)) def assert_goodheap(tau,lon): for x in range(1,lon): assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList) def test_init():", "into the heap self.heapList.append(k) self.percUp(self.currentSize) self.currentSize = self.currentSize + 1", "is the index of the smallest if self.heapList[i] > self.heapList[mc]:", "assert sorted(tau.heapList) == sorted(L+[0]) assert_goodheap(tau,len(L)+1) #for x in range(1,len(L) +", "contient (au moins) cinq erreurs. # Instructions: # - tester", "of the smallest if self.heapList[i] > self.heapList[mc]: tmp = self.heapList[i]", "[0] assert tau.currentSize == 1 @given(integers()) @settings(max_examples=100) def test_percup(integer): gamma", "tau.buildHeap(L) assert tau.currentSize == len(L) + 1 assert sorted(tau.heapList) ==", "erreurs. # Instructions: # - tester jusqu'à atteindre 100% de", "assert_goodheap(tau,len(L)+1) @given(lists(elements=integers())) @settings(max_examples=400,deadline=None) def test_delmin(L): L += [10] tau =", "tau.percUp(15) print(tau.heapList) assert tau.heapList == [0,1,3,2,9,99,2,2,10,9,103,102,3,2,3,3] assert tau.currentsize == 16", "from hypothesis.strategies import integers, lists class BinHeap: #structure de tas", "self.heapList[self.currentSize] self.heapList.pop() self.percDown(1) return rval def buildHeap(self,alist): #creates a whole", "= 1 self.currentSize = len(alist) + 1# + 1 self.heapList", "test_init(): tau = BinHeap() assert tau.heapList == [0] assert tau.currentSize", "tau.delMin() is None tau.buildHeap(L) #print(L) #print(\"sorted\",sorted(L),\"\\n\") #print(\"TAU \", tau.heapList,\"\\n\") assert", "email.\"\"\" import hypothesis from hypothesis import given, settings from hypothesis.strategies", "self.percDown(1) return rval def buildHeap(self,alist): #creates a whole heap from", "d'entiers avec un element 0 self.heapList = [0] self.currentSize =", "2 + 1 == lon and HL[2*x] >= val) or", "tau.heapList = gamma[:] tau.percUp(15) assert tau.heapList == gamma[:] tau.heapList[15] =", "= len(alist) + 1# + 1 self.heapList = [0] +", "def test_delmin(L): L += [10] tau = BinHeap() assert tau.delMin()", "Ce fichier contient (au moins) cinq erreurs. # Instructions: #", "val and HL[2*x+1] >= val)) def assert_goodheap(tau,lon): for x in", "= self.currentSize + 1 def percDown(self,i): while (i * 2)", "lon and HL[2*x] >= val) or (HL[2*x] >= val and", "self.heapList[i] self.heapList[i] = tmp i //= 2 def insert(self,k): #inserting", "tau.percUp(15) assert tau.heapList == gamma[:] tau.heapList[15] = 2 tau.percUp(15) print(tau.heapList)", "#then we test that we got a well-ordered heap assert_goodheap(tau,len(L)+1)", "enlever le [:] while (i < self.currentSize): self.percUp(i) i +=", "= tmp i //= 2 def insert(self,k): #inserting a new", "== sorted(L + K)#verifie qu'on a bien le minimum avec", "test that we got a well-ordered heap assert_goodheap(tau,len(L)+1) @given(lists(elements=integers())) @settings(max_examples=400,deadline=None)", "tau.percDown(x) #then we test that we got a well-ordered heap", "def test_general(L,K): tau = BinHeap() tau.buildHeap(L)#tas construit avec L for", "2 > 0 and self.heapList[i] < self.heapList[i // 2]: tmp", "sorted(L+[0]) assert_goodheap(tau,len(L)+1) #for x in range(1,len(L) + 1): # assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList)", "rajoute les elements de K assert_goodheap(tau,tau.currentSize) x = [] while", "< self.currentSize): self.percUp(i) i += 1 def assert_isheaplist(x,val,lon,HL): assert ((x", "test_build(L): tau = BinHeap() tau.buildHeap(L) assert tau.currentSize == len(L) +", "is returned.\") return self.currentSize = self.currentSize - 1 self.heapList[1] =", "a bien le minimum avec delmin assert tau.delMin() is None", "return i * 2 else: if self.heapList[i*2] < self.heapList[i*2+1]: return", "print(\"Empty heap. Nothing is changed. None is returned.\") return self.currentSize", "self.heapList[i // 2] self.heapList[i // 2] = self.heapList[i] self.heapList[i] =", "def delMin(self): try: rval = self.heapList[1] except IndexError: print(\"Empty heap.", "# - tester jusqu'à atteindre 100% de couverture; # -", "i * 2 + 1 >= self.currentSize: return i *", "[:] while (i < self.currentSize): self.percUp(i) i += 1 def", "tau.percUp(17) assert tau.heapList[17] >= tau.heapList[8] assert tau.heapList[8] >= tau.heapList[4] @given(lists(elements=integers()))", "1 suite d'insertion/ suppression maintient la structure tau.delMin() tau.insert(l) assert_goodheap(tau,tau.currentSize)", "0: print(\"No Child. None is returned.\") return if i *", "and HL[2*x] >= val) or (HL[2*x] >= val and HL[2*x+1]", "tau.heapList.append(integer) tau.currentsize = 18 tau.percUp(17) assert tau.heapList[17] >= tau.heapList[8] assert", "test_delmin(L): L += [10] tau = BinHeap() assert tau.delMin() is", "[10] tau = BinHeap() assert tau.delMin() is None tau.buildHeap(L) #print(L)", "+ 1 > lon) or (x * 2 + 1", "// 2] self.heapList[i // 2] = self.heapList[i] self.heapList[i] = tmp", "return rval def buildHeap(self,alist): #creates a whole heap from a", "x = [] while tau.currentSize > 1:x.append(tau.delMin())#on retire tous les", "tau.percUp(16) assert tau.heapList == [0,1,3,2,8,99,2,2,9,9,103,102,3,2,3,3,10] tau.heapList.append(integer) tau.currentsize = 18 tau.percUp(17)", "assert tau.currentsize == 16 tau.heapList.append(8) tau.currentsize = 17 tau.percUp(16) assert", "= self.heapList[self.currentSize] self.heapList.pop() self.percDown(1) return rval def buildHeap(self,alist): #creates a", "heap. Nothing is changed. None is returned.\") return self.currentSize =", "tau.minChild(len(L)+1) is not None @given(lists(elements=integers()),lists(elements=integers())) @settings(max_examples=400,deadline=None) def test_general(L,K): tau =", "+ 1 >= self.currentSize: return i * 2 else: if", "= self.heapList[1] except IndexError: print(\"Empty heap. Nothing is changed. None", "tau = BinHeap() tau.currentsize = 16 tau.heapList = gamma[:] tau.percUp(15)", "@settings(max_examples=100) def test_percup(integer): gamma = [0,1,3,2,9,99,2,3,10,9,103,102,3,2,3,3] tau = BinHeap() tau.currentsize", "the heap self.heapList.append(k) self.percUp(self.currentSize) self.currentSize = self.currentSize + 1 def", "= 17 tau.percUp(16) assert tau.heapList == [0,1,3,2,8,99,2,2,9,9,103,102,3,2,3,3,10] tau.heapList.append(integer) tau.currentsize =", "2 + 1 def delMin(self): try: rval = self.heapList[1] except", "i * 2 else: return i * 2 + 1", "self.currentSize = 1#taille de la liste heapList (invariant) def percUp(self,i):", "2] = self.heapList[i] self.heapList[i] = tmp i //= 2 def", "qu'on a bien le minimum avec delmin assert tau.delMin() is", "None @given(lists(elements=integers()),lists(elements=integers())) @settings(max_examples=400,deadline=None) def test_general(L,K): tau = BinHeap() tau.buildHeap(L)#tas construit", "d'entiers def __init__(self): #initialise un tas binaire d'entiers avec un", "# Instructions: # - tester jusqu'à atteindre 100% de couverture;", "tau.heapList[17] >= tau.heapList[8] assert tau.heapList[8] >= tau.heapList[4] @given(lists(elements=integers())) @settings(max_examples=1000) def", ">= tau.heapList[4] @given(lists(elements=integers())) @settings(max_examples=1000) def test_build(L): tau = BinHeap() tau.buildHeap(L)", "== lon and HL[2*x] >= val) or (HL[2*x] >= val", "tau.heapList.append(8) tau.currentsize = 17 tau.percUp(16) assert tau.heapList == [0,1,3,2,8,99,2,2,9,9,103,102,3,2,3,3,10] tau.heapList.append(integer)", "None is returned.\") return if i * 2 + 1", "//= 2 def insert(self,k): #inserting a new value into the", "integers, lists class BinHeap: #structure de tas binaires d'entiers def", "def assert_isheaplist(x,val,lon,HL): assert ((x * 2 + 1 > lon)", "assert tau.delMin() is None x = [] tau.buildHeap(K) for l", "smallest if self.heapList[i] > self.heapList[mc]: tmp = self.heapList[i] self.heapList[i] =", "def test_percDown(L,i): tau = BinHeap() L += [10] tau.buildHeap(L) tau.heapList[1]", "tau = BinHeap() tau.buildHeap(L) assert tau.currentSize == len(L) + 1", "i = mc def minChild(self,i): if i * 2 >=", "x = [] tau.buildHeap(K) for l in L:#teste si 1", "print(\"No Child. None is returned.\") return if i * 2", "while (i < self.currentSize): self.percUp(i) i += 1 def assert_isheaplist(x,val,lon,HL):", "[] while tau.currentSize > 1:x.append(tau.delMin())#on retire tous les elements assert", "de la liste heapList (invariant) def percUp(self,i): #upward percolation until", "def test_insert(L,i): tau = BinHeap() tau.buildHeap(L) tau.insert(i) assert_goodheap(tau,len(L)+1) @given(lists(elements=integers()),integers()) @settings(max_examples=100)", "+ 1 self.heapList = [0] + alist # enlever le", "BinHeap() L += [10] tau.buildHeap(L) tau.heapList[1] = i tau.percDown(1) for", "test_percDown(L,i): tau = BinHeap() L += [10] tau.buildHeap(L) tau.heapList[1] =", "= BinHeap() tau.currentsize = 16 tau.heapList = gamma[:] tau.percUp(15) assert", "in range(len(L)): tau.percDown(x) #then we test that we got a", "< self.heapList[i // 2]: tmp = self.heapList[i // 2] self.heapList[i", "gamma[:] tau.percUp(15) assert tau.heapList == gamma[:] tau.heapList[15] = 2 tau.percUp(15)", "not None @given(lists(elements=integers()),lists(elements=integers())) @settings(max_examples=400,deadline=None) def test_general(L,K): tau = BinHeap() tau.buildHeap(L)#tas", "* 2) < self.currentSize:#while I have a child mc =", "is changed. None is returned.\") return self.currentSize = self.currentSize -", "+ 1 == lon and HL[2*x] >= val) or (HL[2*x]", "in range(1,lon): assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList) def test_init(): tau = BinHeap() assert tau.heapList", "100% de couverture; # - corriger les bugs;\" # -", "tau.heapList == gamma[:] tau.heapList[15] = 2 tau.percUp(15) print(tau.heapList) assert tau.heapList", "i * 2 + 1 def delMin(self): try: rval =", "ou le dépôt git par email.\"\"\" import hypothesis from hypothesis", "None x = [] tau.buildHeap(K) for l in L:#teste si", "assert_goodheap(tau,len(L)+1) @given(lists(elements=integers()),integers()) @settings(max_examples=100) def test_percDown(L,i): tau = BinHeap() L +=", "#print(\"TAU \", tau.heapList,\"\\n\") assert tau.delMin() == min(L) @given(lists(elements=integers()),integers()) @settings(max_examples=400) def", "== sorted(L+[0]) assert_goodheap(tau,len(L)+1) #for x in range(1,len(L) + 1): #", "\", tau.heapList,\"\\n\") assert tau.delMin() == min(L) @given(lists(elements=integers()),integers()) @settings(max_examples=400) def test_minChild(L,i):", "return self.currentSize = self.currentSize - 1 self.heapList[1] = self.heapList[self.currentSize] self.heapList.pop()", "#upward percolation until 0 reached or father is bigger while", "si 1 suite d'insertion/ suppression maintient la structure tau.delMin() tau.insert(l)", "BinHeap: #structure de tas binaires d'entiers def __init__(self): #initialise un", "les elements assert x == sorted(L + K)#verifie qu'on a", "minChild(self,i): if i * 2 >= self.currentSize or i ==", "HL[2*x] >= val) or (HL[2*x] >= val and HL[2*x+1] >=", "def percUp(self,i): #upward percolation until 0 reached or father is", "a list, by percolating all its elements i = 1", "(HL[2*x] >= val and HL[2*x+1] >= val)) def assert_goodheap(tau,lon): for", "self.currentSize - 1 self.heapList[1] = self.heapList[self.currentSize] self.heapList.pop() self.percDown(1) return rval", "@settings(max_examples=100) def test_percDown(L,i): tau = BinHeap() L += [10] tau.buildHeap(L)", "[0] self.currentSize = 1#taille de la liste heapList (invariant) def", "* 2 + 1 >= self.currentSize: return i * 2", "assert tau.currentSize == 1 @given(integers()) @settings(max_examples=100) def test_percup(integer): gamma =", "self.heapList[i // 2] = self.heapList[i] self.heapList[i] = tmp i //=", "BinHeap() tau.buildHeap(L)#tas construit avec L for k in K:tau.insert(k)#on rajoute", "changed. None is returned.\") return self.currentSize = self.currentSize - 1", "tous les elements assert x == sorted(L + K)#verifie qu'on", "rval = self.heapList[1] except IndexError: print(\"Empty heap. Nothing is changed.", "16 tau.heapList = gamma[:] tau.percUp(15) assert tau.heapList == gamma[:] tau.heapList[15]", "settings from hypothesis.strategies import integers, lists class BinHeap: #structure de", "== 1 @given(integers()) @settings(max_examples=100) def test_percup(integer): gamma = [0,1,3,2,9,99,2,3,10,9,103,102,3,2,3,3] tau", "corriger les bugs;\" # - envoyer le diff ou le", "K:tau.insert(k)#on rajoute les elements de K assert_goodheap(tau,tau.currentSize) x = []", "+ 1): for _ in range(len(L)): tau.percDown(x) #then we test", "+ 1): # assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList) @given(lists(elements=integers()),integers()) @settings(max_examples=1000) def test_insert(L,i): tau =", "# assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList) @given(lists(elements=integers()),integers()) @settings(max_examples=1000) def test_insert(L,i): tau = BinHeap() tau.buildHeap(L)", "tau.buildHeap(2*L+[0,1]) assert tau.minChild(len(L)+1) is not None @given(lists(elements=integers()),lists(elements=integers())) @settings(max_examples=400,deadline=None) def test_general(L,K):", "well-ordered heap assert_goodheap(tau,len(L)+1) @given(lists(elements=integers())) @settings(max_examples=400,deadline=None) def test_delmin(L): L += [10]", "IndexError: print(\"Empty heap. Nothing is changed. None is returned.\") return", "# - corriger les bugs;\" # - envoyer le diff", "i = 1 self.currentSize = len(alist) + 1# + 1", "[10] tau.buildHeap(L) tau.heapList[1] = i tau.percDown(1) for x in range(1,len(L)", "while tau.currentSize > 1:x.append(tau.delMin())#on retire tous les elements assert x", "def test_minChild(L,i): tau = BinHeap() assert tau.minChild(abs(i)) is None tau.buildHeap(2*L+[0,1])", "= BinHeap() assert tau.delMin() is None tau.buildHeap(L) #print(L) #print(\"sorted\",sorted(L),\"\\n\") #print(\"TAU", "tau = BinHeap() L += [10] tau.buildHeap(L) tau.heapList[1] = i", "* 2 else: return i * 2 + 1 def", "les elements de K assert_goodheap(tau,tau.currentSize) x = [] while tau.currentSize", "rval def buildHeap(self,alist): #creates a whole heap from a list,", "if self.heapList[i*2] < self.heapList[i*2+1]: return i * 2 else: return", "fichier contient (au moins) cinq erreurs. # Instructions: # -", "envoyer le diff ou le dépôt git par email.\"\"\" import", "while (i * 2) < self.currentSize:#while I have a child", "and self.heapList[i] < self.heapList[i // 2]: tmp = self.heapList[i //", "index of the smallest if self.heapList[i] > self.heapList[mc]: tmp =", "binaire d'entiers avec un element 0 self.heapList = [0] self.currentSize", "i * 2 >= self.currentSize or i == 0: print(\"No", "= BinHeap() tau.buildHeap(L) assert tau.currentSize == len(L) + 1 assert", "i //= 2 def insert(self,k): #inserting a new value into", "element 0 self.heapList = [0] self.currentSize = 1#taille de la", "[0,1,3,2,9,99,2,2,10,9,103,102,3,2,3,3] assert tau.currentsize == 16 tau.heapList.append(8) tau.currentsize = 17 tau.percUp(16)", "test_minChild(L,i): tau = BinHeap() assert tau.minChild(abs(i)) is None tau.buildHeap(2*L+[0,1]) assert", "cinq erreurs. # Instructions: # - tester jusqu'à atteindre 100%", "= mc def minChild(self,i): if i * 2 >= self.currentSize", "1 >= self.currentSize: return i * 2 else: if self.heapList[i*2]", "#print(L) #print(\"sorted\",sorted(L),\"\\n\") #print(\"TAU \", tau.heapList,\"\\n\") assert tau.delMin() == min(L) @given(lists(elements=integers()),integers())", "delmin assert tau.delMin() is None x = [] tau.buildHeap(K) for", "percolation until 0 reached or father is bigger while i", "assert tau.heapList == [0,1,3,2,8,99,2,2,9,9,103,102,3,2,3,3,10] tau.heapList.append(integer) tau.currentsize = 18 tau.percUp(17) assert", "* 2 + 1 == lon and HL[2*x] >= val)", "reached or father is bigger while i // 2 >", "self.minChild(i)#mc is the index of the smallest if self.heapList[i] >", "lists class BinHeap: #structure de tas binaires d'entiers def __init__(self):", "2) < self.currentSize:#while I have a child mc = self.minChild(i)#mc", "# enlever le [:] while (i < self.currentSize): self.percUp(i) i", "= self.heapList[mc] self.heapList[mc] = tmp i = mc def minChild(self,i):", "tau.heapList == [0,1,3,2,8,99,2,2,9,9,103,102,3,2,3,3,10] tau.heapList.append(integer) tau.currentsize = 18 tau.percUp(17) assert tau.heapList[17]", "got a well-ordered heap assert_goodheap(tau,len(L)+1) @given(lists(elements=integers())) @settings(max_examples=400,deadline=None) def test_delmin(L): L", "tmp = self.heapList[i] self.heapList[i] = self.heapList[mc] self.heapList[mc] = tmp i", "couverture; # - corriger les bugs;\" # - envoyer le", "0 reached or father is bigger while i // 2", "Instructions: # - tester jusqu'à atteindre 100% de couverture; #", "2 else: if self.heapList[i*2] < self.heapList[i*2+1]: return i * 2", "#structure de tas binaires d'entiers def __init__(self): #initialise un tas", "BinHeap() tau.currentsize = 16 tau.heapList = gamma[:] tau.percUp(15) assert tau.heapList", "== min(L) @given(lists(elements=integers()),integers()) @settings(max_examples=400) def test_minChild(L,i): tau = BinHeap() assert", "or (HL[2*x] >= val and HL[2*x+1] >= val)) def assert_goodheap(tau,lon):", "avec un element 0 self.heapList = [0] self.currentSize = 1#taille", "self.heapList[i] = tmp i //= 2 def insert(self,k): #inserting a", "tau.currentSize > 1:x.append(tau.delMin())#on retire tous les elements assert x ==", "self.heapList = [0] + alist # enlever le [:] while", "except IndexError: print(\"Empty heap. Nothing is changed. None is returned.\")", "2]: tmp = self.heapList[i // 2] self.heapList[i // 2] =", "self.heapList[mc]: tmp = self.heapList[i] self.heapList[i] = self.heapList[mc] self.heapList[mc] = tmp", "if self.heapList[i] > self.heapList[mc]: tmp = self.heapList[i] self.heapList[i] = self.heapList[mc]", "self.currentSize + 1 def percDown(self,i): while (i * 2) <", "#for x in range(1,len(L) + 1): # assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList) @given(lists(elements=integers()),integers()) @settings(max_examples=1000)", "assert tau.delMin() is None tau.buildHeap(L) #print(L) #print(\"sorted\",sorted(L),\"\\n\") #print(\"TAU \", tau.heapList,\"\\n\")", "(i * 2) < self.currentSize:#while I have a child mc", "BinHeap() tau.buildHeap(L) tau.insert(i) assert_goodheap(tau,len(L)+1) @given(lists(elements=integers()),integers()) @settings(max_examples=100) def test_percDown(L,i): tau =", "L += [10] tau = BinHeap() assert tau.delMin() is None", "tau.delMin() is None x = [] tau.buildHeap(K) for l in", "de couverture; # - corriger les bugs;\" # - envoyer", "self.currentSize): self.percUp(i) i += 1 def assert_isheaplist(x,val,lon,HL): assert ((x *", "+= [10] tau.buildHeap(L) tau.heapList[1] = i tau.percDown(1) for x in", "avec L for k in K:tau.insert(k)#on rajoute les elements de", "l in L:#teste si 1 suite d'insertion/ suppression maintient la", "1# + 1 self.heapList = [0] + alist # enlever", "is None tau.buildHeap(L) #print(L) #print(\"sorted\",sorted(L),\"\\n\") #print(\"TAU \", tau.heapList,\"\\n\") assert tau.delMin()", "= [] while tau.currentSize > 1:x.append(tau.delMin())#on retire tous les elements", "else: if self.heapList[i*2] < self.heapList[i*2+1]: return i * 2 else:", "@settings(max_examples=400,deadline=None) def test_delmin(L): L += [10] tau = BinHeap() assert", "the smallest if self.heapList[i] > self.heapList[mc]: tmp = self.heapList[i] self.heapList[i]", "self.heapList[mc] self.heapList[mc] = tmp i = mc def minChild(self,i): if", "self.heapList[1] = self.heapList[self.currentSize] self.heapList.pop() self.percDown(1) return rval def buildHeap(self,alist): #creates", "self.heapList[i] < self.heapList[i // 2]: tmp = self.heapList[i // 2]", "or i == 0: print(\"No Child. None is returned.\") return", "jusqu'à atteindre 100% de couverture; # - corriger les bugs;\"", "sorted(tau.heapList) == sorted(L+[0]) assert_goodheap(tau,len(L)+1) #for x in range(1,len(L) + 1):", "self.heapList[i] self.heapList[i] = self.heapList[mc] self.heapList[mc] = tmp i = mc", "tau.delMin() == min(L) @given(lists(elements=integers()),integers()) @settings(max_examples=400) def test_minChild(L,i): tau = BinHeap()", "range(1,lon): assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList) def test_init(): tau = BinHeap() assert tau.heapList ==", "heap from a list, by percolating all its elements i", "@settings(max_examples=400,deadline=None) def test_general(L,K): tau = BinHeap() tau.buildHeap(L)#tas construit avec L", "is bigger while i // 2 > 0 and self.heapList[i]", "1 self.heapList[1] = self.heapList[self.currentSize] self.heapList.pop() self.percDown(1) return rval def buildHeap(self,alist):", "2 tau.percUp(15) print(tau.heapList) assert tau.heapList == [0,1,3,2,9,99,2,2,10,9,103,102,3,2,3,3] assert tau.currentsize ==", "percolating all its elements i = 1 self.currentSize = len(alist)", "print(tau.heapList) assert tau.heapList == [0,1,3,2,9,99,2,2,10,9,103,102,3,2,3,3] assert tau.currentsize == 16 tau.heapList.append(8)", "gamma = [0,1,3,2,9,99,2,3,10,9,103,102,3,2,3,3] tau = BinHeap() tau.currentsize = 16 tau.heapList", "bien le minimum avec delmin assert tau.delMin() is None x", "min(L) @given(lists(elements=integers()),integers()) @settings(max_examples=400) def test_minChild(L,i): tau = BinHeap() assert tau.minChild(abs(i))", "BinHeap() assert tau.heapList == [0] assert tau.currentSize == 1 @given(integers())", "hypothesis from hypothesis import given, settings from hypothesis.strategies import integers,", "le dépôt git par email.\"\"\" import hypothesis from hypothesis import", "class BinHeap: #structure de tas binaires d'entiers def __init__(self): #initialise", "def assert_goodheap(tau,lon): for x in range(1,lon): assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList) def test_init(): tau", "1 self.currentSize = len(alist) + 1# + 1 self.heapList =", "* 2 else: if self.heapList[i*2] < self.heapList[i*2+1]: return i *", "< self.currentSize:#while I have a child mc = self.minChild(i)#mc is", "self.currentSize: return i * 2 else: if self.heapList[i*2] < self.heapList[i*2+1]:", "list, by percolating all its elements i = 1 self.currentSize", "// 2] = self.heapList[i] self.heapList[i] = tmp i //= 2", "tau.heapList[1] = i tau.percDown(1) for x in range(1,len(L) + 1):", "== len(L) + 1 assert sorted(tau.heapList) == sorted(L+[0]) assert_goodheap(tau,len(L)+1) #for", "def insert(self,k): #inserting a new value into the heap self.heapList.append(k)", "assert tau.heapList == [0,1,3,2,9,99,2,2,10,9,103,102,3,2,3,3] assert tau.currentsize == 16 tau.heapList.append(8) tau.currentsize", "2 else: return i * 2 + 1 def delMin(self):", "self.currentSize:#while I have a child mc = self.minChild(i)#mc is the", "= [0,1,3,2,9,99,2,3,10,9,103,102,3,2,3,3] tau = BinHeap() tau.currentsize = 16 tau.heapList =", "tau.heapList[8] assert tau.heapList[8] >= tau.heapList[4] @given(lists(elements=integers())) @settings(max_examples=1000) def test_build(L): tau", "assert_goodheap(tau,tau.currentSize) x = [] while tau.currentSize > 1:x.append(tau.delMin())#on retire tous", "return i * 2 + 1 def delMin(self): try: rval", "HL[2*x+1] >= val)) def assert_goodheap(tau,lon): for x in range(1,lon): assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList)", "tau = BinHeap() assert tau.minChild(abs(i)) is None tau.buildHeap(2*L+[0,1]) assert tau.minChild(len(L)+1)", "self.heapList[i*2+1]: return i * 2 else: return i * 2", "tau.percDown(1) for x in range(1,len(L) + 1): for _ in", "import given, settings from hypothesis.strategies import integers, lists class BinHeap:", "tau.buildHeap(K) for l in L:#teste si 1 suite d'insertion/ suppression", "[] tau.buildHeap(K) for l in L:#teste si 1 suite d'insertion/", "i += 1 def assert_isheaplist(x,val,lon,HL): assert ((x * 2 +", "i * 2 else: if self.heapList[i*2] < self.heapList[i*2+1]: return i", "= self.heapList[i] self.heapList[i] = tmp i //= 2 def insert(self,k):", "1 > lon) or (x * 2 + 1 ==", "2 + 1 > lon) or (x * 2 +", "= self.currentSize - 1 self.heapList[1] = self.heapList[self.currentSize] self.heapList.pop() self.percDown(1) return", "value into the heap self.heapList.append(k) self.percUp(self.currentSize) self.currentSize = self.currentSize +", "[0] + alist # enlever le [:] while (i <", "* 2 >= self.currentSize or i == 0: print(\"No Child.", "les bugs;\" # - envoyer le diff ou le dépôt", "assert tau.delMin() == min(L) @given(lists(elements=integers()),integers()) @settings(max_examples=400) def test_minChild(L,i): tau =", "atteindre 100% de couverture; # - corriger les bugs;\" #", "from hypothesis import given, settings from hypothesis.strategies import integers, lists", "tau.heapList == [0] assert tau.currentSize == 1 @given(integers()) @settings(max_examples=100) def", "+ K)#verifie qu'on a bien le minimum avec delmin assert", "tau.heapList == [0,1,3,2,9,99,2,2,10,9,103,102,3,2,3,3] assert tau.currentsize == 16 tau.heapList.append(8) tau.currentsize =", "1 @given(integers()) @settings(max_examples=100) def test_percup(integer): gamma = [0,1,3,2,9,99,2,3,10,9,103,102,3,2,3,3] tau =", "L:#teste si 1 suite d'insertion/ suppression maintient la structure tau.delMin()", "BinHeap() assert tau.minChild(abs(i)) is None tau.buildHeap(2*L+[0,1]) assert tau.minChild(len(L)+1) is not", "minimum avec delmin assert tau.delMin() is None x = []", "(au moins) cinq erreurs. # Instructions: # - tester jusqu'à", "BinHeap() assert tau.delMin() is None tau.buildHeap(L) #print(L) #print(\"sorted\",sorted(L),\"\\n\") #print(\"TAU \",", "from a list, by percolating all its elements i =", "by percolating all its elements i = 1 self.currentSize =", "retire tous les elements assert x == sorted(L + K)#verifie", "for l in L:#teste si 1 suite d'insertion/ suppression maintient", "returned.\") return self.currentSize = self.currentSize - 1 self.heapList[1] = self.heapList[self.currentSize]", "== 16 tau.heapList.append(8) tau.currentsize = 17 tau.percUp(16) assert tau.heapList ==", "the index of the smallest if self.heapList[i] > self.heapList[mc]: tmp", "= BinHeap() tau.buildHeap(L)#tas construit avec L for k in K:tau.insert(k)#on", "child mc = self.minChild(i)#mc is the index of the smallest", "we test that we got a well-ordered heap assert_goodheap(tau,len(L)+1) @given(lists(elements=integers()))", "None is returned.\") return self.currentSize = self.currentSize - 1 self.heapList[1]", "percDown(self,i): while (i * 2) < self.currentSize:#while I have a", "#inserting a new value into the heap self.heapList.append(k) self.percUp(self.currentSize) self.currentSize", "1 def assert_isheaplist(x,val,lon,HL): assert ((x * 2 + 1 >", "#initialise un tas binaire d'entiers avec un element 0 self.heapList", "len(alist) + 1# + 1 self.heapList = [0] + alist", "#print(\"sorted\",sorted(L),\"\\n\") #print(\"TAU \", tau.heapList,\"\\n\") assert tau.delMin() == min(L) @given(lists(elements=integers()),integers()) @settings(max_examples=400)", "assert tau.minChild(abs(i)) is None tau.buildHeap(2*L+[0,1]) assert tau.minChild(len(L)+1) is not None", "= [0] self.currentSize = 1#taille de la liste heapList (invariant)", "have a child mc = self.minChild(i)#mc is the index of", "hypothesis import given, settings from hypothesis.strategies import integers, lists class", "tmp i = mc def minChild(self,i): if i * 2", "range(len(L)): tau.percDown(x) #then we test that we got a well-ordered", "BinHeap() tau.buildHeap(L) assert tau.currentSize == len(L) + 1 assert sorted(tau.heapList)", "def __init__(self): #initialise un tas binaire d'entiers avec un element", "= self.heapList[i // 2] self.heapList[i // 2] = self.heapList[i] self.heapList[i]", "= gamma[:] tau.percUp(15) assert tau.heapList == gamma[:] tau.heapList[15] = 2", "elements de K assert_goodheap(tau,tau.currentSize) x = [] while tau.currentSize >", "in range(1,len(L) + 1): for _ in range(len(L)): tau.percDown(x) #then", "while i // 2 > 0 and self.heapList[i] < self.heapList[i", "le minimum avec delmin assert tau.delMin() is None x =", "self.heapList[i*2] < self.heapList[i*2+1]: return i * 2 else: return i", "tau = BinHeap() assert tau.delMin() is None tau.buildHeap(L) #print(L) #print(\"sorted\",sorted(L),\"\\n\")", "tau.buildHeap(L) tau.insert(i) assert_goodheap(tau,len(L)+1) @given(lists(elements=integers()),integers()) @settings(max_examples=100) def test_percDown(L,i): tau = BinHeap()", "bigger while i // 2 > 0 and self.heapList[i] <", "- tester jusqu'à atteindre 100% de couverture; # - corriger", "- corriger les bugs;\" # - envoyer le diff ou", "tmp i //= 2 def insert(self,k): #inserting a new value", ">= self.currentSize: return i * 2 else: if self.heapList[i*2] <", "elements i = 1 self.currentSize = len(alist) + 1# +", "+ 1 def delMin(self): try: rval = self.heapList[1] except IndexError:", "self.heapList[1] except IndexError: print(\"Empty heap. Nothing is changed. None is", "((x * 2 + 1 > lon) or (x *", "range(1,len(L) + 1): # assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList) @given(lists(elements=integers()),integers()) @settings(max_examples=1000) def test_insert(L,i): tau", "= i tau.percDown(1) for x in range(1,len(L) + 1): for", "or father is bigger while i // 2 > 0", "tau.minChild(abs(i)) is None tau.buildHeap(2*L+[0,1]) assert tau.minChild(len(L)+1) is not None @given(lists(elements=integers()),lists(elements=integers()))", "i // 2 > 0 and self.heapList[i] < self.heapList[i //", "all its elements i = 1 self.currentSize = len(alist) +", "(x * 2 + 1 == lon and HL[2*x] >=", "i tau.percDown(1) for x in range(1,len(L) + 1): for _", "None tau.buildHeap(L) #print(L) #print(\"sorted\",sorted(L),\"\\n\") #print(\"TAU \", tau.heapList,\"\\n\") assert tau.delMin() ==", "and HL[2*x+1] >= val)) def assert_goodheap(tau,lon): for x in range(1,lon):", "assert ((x * 2 + 1 > lon) or (x", "= [0] + alist # enlever le [:] while (i", "@settings(max_examples=400) def test_minChild(L,i): tau = BinHeap() assert tau.minChild(abs(i)) is None", "is not None @given(lists(elements=integers()),lists(elements=integers())) @settings(max_examples=400,deadline=None) def test_general(L,K): tau = BinHeap()", "__init__(self): #initialise un tas binaire d'entiers avec un element 0", "None tau.buildHeap(2*L+[0,1]) assert tau.minChild(len(L)+1) is not None @given(lists(elements=integers()),lists(elements=integers())) @settings(max_examples=400,deadline=None) def", "de K assert_goodheap(tau,tau.currentSize) x = [] while tau.currentSize > 1:x.append(tau.delMin())#on", "#creates a whole heap from a list, by percolating all", "assert tau.currentSize == len(L) + 1 assert sorted(tau.heapList) == sorted(L+[0])", "tau.heapList,\"\\n\") assert tau.delMin() == min(L) @given(lists(elements=integers()),integers()) @settings(max_examples=400) def test_minChild(L,i): tau", "// 2 > 0 and self.heapList[i] < self.heapList[i // 2]:", "def minChild(self,i): if i * 2 >= self.currentSize or i", "1#taille de la liste heapList (invariant) def percUp(self,i): #upward percolation", "@given(lists(elements=integers())) @settings(max_examples=1000) def test_build(L): tau = BinHeap() tau.buildHeap(L) assert tau.currentSize", "heapList (invariant) def percUp(self,i): #upward percolation until 0 reached or", "range(1,len(L) + 1): for _ in range(len(L)): tau.percDown(x) #then we", "insert(self,k): #inserting a new value into the heap self.heapList.append(k) self.percUp(self.currentSize)", "@given(lists(elements=integers()),integers()) @settings(max_examples=100) def test_percDown(L,i): tau = BinHeap() L += [10]", "bugs;\" # - envoyer le diff ou le dépôt git", "assert tau.heapList == gamma[:] tau.heapList[15] = 2 tau.percUp(15) print(tau.heapList) assert", "tau.heapList[15] = 2 tau.percUp(15) print(tau.heapList) assert tau.heapList == [0,1,3,2,9,99,2,2,10,9,103,102,3,2,3,3] assert", "16 tau.heapList.append(8) tau.currentsize = 17 tau.percUp(16) assert tau.heapList == [0,1,3,2,8,99,2,2,9,9,103,102,3,2,3,3,10]", "0 and self.heapList[i] < self.heapList[i // 2]: tmp = self.heapList[i", "1): # assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList) @given(lists(elements=integers()),integers()) @settings(max_examples=1000) def test_insert(L,i): tau = BinHeap()", "Nothing is changed. None is returned.\") return self.currentSize = self.currentSize", "(invariant) def percUp(self,i): #upward percolation until 0 reached or father", "in range(1,len(L) + 1): # assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList) @given(lists(elements=integers()),integers()) @settings(max_examples=1000) def test_insert(L,i):", "self.heapList[i // 2]: tmp = self.heapList[i // 2] self.heapList[i //", "> self.heapList[mc]: tmp = self.heapList[i] self.heapList[i] = self.heapList[mc] self.heapList[mc] =", "given, settings from hypothesis.strategies import integers, lists class BinHeap: #structure", "+ alist # enlever le [:] while (i < self.currentSize):", "delMin(self): try: rval = self.heapList[1] except IndexError: print(\"Empty heap. Nothing", "1 self.heapList = [0] + alist # enlever le [:]", "assert_isheaplist(x,val,lon,HL): assert ((x * 2 + 1 > lon) or", "its elements i = 1 self.currentSize = len(alist) + 1#", "def percDown(self,i): while (i * 2) < self.currentSize:#while I have", "moins) cinq erreurs. # Instructions: # - tester jusqu'à atteindre", "17 tau.percUp(16) assert tau.heapList == [0,1,3,2,8,99,2,2,9,9,103,102,3,2,3,3,10] tau.heapList.append(integer) tau.currentsize = 18", "def test_build(L): tau = BinHeap() tau.buildHeap(L) assert tau.currentSize == len(L)", "self.heapList.pop() self.percDown(1) return rval def buildHeap(self,alist): #creates a whole heap", "= BinHeap() assert tau.minChild(abs(i)) is None tau.buildHeap(2*L+[0,1]) assert tau.minChild(len(L)+1) is", "self.percUp(i) i += 1 def assert_isheaplist(x,val,lon,HL): assert ((x * 2", "is returned.\") return if i * 2 + 1 >=", "avec delmin assert tau.delMin() is None x = [] tau.buildHeap(K)", ">= self.currentSize or i == 0: print(\"No Child. None is", "_ in range(len(L)): tau.percDown(x) #then we test that we got", "percUp(self,i): #upward percolation until 0 reached or father is bigger", "mc def minChild(self,i): if i * 2 >= self.currentSize or", "== 0: print(\"No Child. None is returned.\") return if i", "return if i * 2 + 1 >= self.currentSize: return", "buildHeap(self,alist): #creates a whole heap from a list, by percolating", "we got a well-ordered heap assert_goodheap(tau,len(L)+1) @given(lists(elements=integers())) @settings(max_examples=400,deadline=None) def test_delmin(L):", "2 def insert(self,k): #inserting a new value into the heap", "self.currentSize = self.currentSize - 1 self.heapList[1] = self.heapList[self.currentSize] self.heapList.pop() self.percDown(1)", "tau.heapList[8] >= tau.heapList[4] @given(lists(elements=integers())) @settings(max_examples=1000) def test_build(L): tau = BinHeap()", "tau.currentSize == len(L) + 1 assert sorted(tau.heapList) == sorted(L+[0]) assert_goodheap(tau,len(L)+1)", "for k in K:tau.insert(k)#on rajoute les elements de K assert_goodheap(tau,tau.currentSize)", "lon) or (x * 2 + 1 == lon and", "assert tau.heapList[8] >= tau.heapList[4] @given(lists(elements=integers())) @settings(max_examples=1000) def test_build(L): tau =", "assert tau.minChild(len(L)+1) is not None @given(lists(elements=integers()),lists(elements=integers())) @settings(max_examples=400,deadline=None) def test_general(L,K): tau", "tau.buildHeap(L) #print(L) #print(\"sorted\",sorted(L),\"\\n\") #print(\"TAU \", tau.heapList,\"\\n\") assert tau.delMin() == min(L)", ">= val)) def assert_goodheap(tau,lon): for x in range(1,lon): assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList) def", "un tas binaire d'entiers avec un element 0 self.heapList =", "def test_percup(integer): gamma = [0,1,3,2,9,99,2,3,10,9,103,102,3,2,3,3] tau = BinHeap() tau.currentsize =", "diff ou le dépôt git par email.\"\"\" import hypothesis from", "+ 1 assert sorted(tau.heapList) == sorted(L+[0]) assert_goodheap(tau,len(L)+1) #for x in", "in L:#teste si 1 suite d'insertion/ suppression maintient la structure", "or (x * 2 + 1 == lon and HL[2*x]", "new value into the heap self.heapList.append(k) self.percUp(self.currentSize) self.currentSize = self.currentSize", "+= 1 def assert_isheaplist(x,val,lon,HL): assert ((x * 2 + 1", "K)#verifie qu'on a bien le minimum avec delmin assert tau.delMin()", "val) or (HL[2*x] >= val and HL[2*x+1] >= val)) def", "= 2 tau.percUp(15) print(tau.heapList) assert tau.heapList == [0,1,3,2,9,99,2,2,10,9,103,102,3,2,3,3] assert tau.currentsize", "that we got a well-ordered heap assert_goodheap(tau,len(L)+1) @given(lists(elements=integers())) @settings(max_examples=400,deadline=None) def", "if i * 2 + 1 >= self.currentSize: return i", "> lon) or (x * 2 + 1 == lon", "tau.buildHeap(L)#tas construit avec L for k in K:tau.insert(k)#on rajoute les", "assert x == sorted(L + K)#verifie qu'on a bien le", "assert tau.heapList == [0] assert tau.currentSize == 1 @given(integers()) @settings(max_examples=100)", "import hypothesis from hypothesis import given, settings from hypothesis.strategies import", "x in range(1,len(L) + 1): # assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList) @given(lists(elements=integers()),integers()) @settings(max_examples=1000) def", "@settings(max_examples=1000) def test_insert(L,i): tau = BinHeap() tau.buildHeap(L) tau.insert(i) assert_goodheap(tau,len(L)+1) @given(lists(elements=integers()),integers())", "a child mc = self.minChild(i)#mc is the index of the", "self.currentSize or i == 0: print(\"No Child. None is returned.\")", "def buildHeap(self,alist): #creates a whole heap from a list, by", "// 2]: tmp = self.heapList[i // 2] self.heapList[i // 2]", "self.heapList[i] = self.heapList[mc] self.heapList[mc] = tmp i = mc def", "x == sorted(L + K)#verifie qu'on a bien le minimum", "# - envoyer le diff ou le dépôt git par", "I have a child mc = self.minChild(i)#mc is the index", "alist # enlever le [:] while (i < self.currentSize): self.percUp(i)", "= 16 tau.heapList = gamma[:] tau.percUp(15) assert tau.heapList == gamma[:]", ">= val and HL[2*x+1] >= val)) def assert_goodheap(tau,lon): for x", "== [0] assert tau.currentSize == 1 @given(integers()) @settings(max_examples=100) def test_percup(integer):", "# Ce fichier contient (au moins) cinq erreurs. # Instructions:", "= 1#taille de la liste heapList (invariant) def percUp(self,i): #upward", "- envoyer le diff ou le dépôt git par email.\"\"\"", "= BinHeap() assert tau.heapList == [0] assert tau.currentSize == 1", "1 def percDown(self,i): while (i * 2) < self.currentSize:#while I", "L for k in K:tau.insert(k)#on rajoute les elements de K", "> 1:x.append(tau.delMin())#on retire tous les elements assert x == sorted(L", "assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList) def test_init(): tau = BinHeap() assert tau.heapList == [0]", "+= [10] tau = BinHeap() assert tau.delMin() is None tau.buildHeap(L)", "@given(lists(elements=integers())) @settings(max_examples=400,deadline=None) def test_delmin(L): L += [10] tau = BinHeap()", "= tmp i = mc def minChild(self,i): if i *", "== [0,1,3,2,9,99,2,2,10,9,103,102,3,2,3,3] assert tau.currentsize == 16 tau.heapList.append(8) tau.currentsize = 17", "tau.currentsize = 17 tau.percUp(16) assert tau.heapList == [0,1,3,2,8,99,2,2,9,9,103,102,3,2,3,3,10] tau.heapList.append(integer) tau.currentsize", "> 0 and self.heapList[i] < self.heapList[i // 2]: tmp =", "sorted(L + K)#verifie qu'on a bien le minimum avec delmin", "tau.insert(i) assert_goodheap(tau,len(L)+1) @given(lists(elements=integers()),integers()) @settings(max_examples=100) def test_percDown(L,i): tau = BinHeap() L", "1:x.append(tau.delMin())#on retire tous les elements assert x == sorted(L +", "* 2 + 1 def delMin(self): try: rval = self.heapList[1]", "k in K:tau.insert(k)#on rajoute les elements de K assert_goodheap(tau,tau.currentSize) x", "+ 1# + 1 self.heapList = [0] + alist #", "is None tau.buildHeap(2*L+[0,1]) assert tau.minChild(len(L)+1) is not None @given(lists(elements=integers()),lists(elements=integers())) @settings(max_examples=400,deadline=None)", "@settings(max_examples=1000) def test_build(L): tau = BinHeap() tau.buildHeap(L) assert tau.currentSize ==", "tmp = self.heapList[i // 2] self.heapList[i // 2] = self.heapList[i]", "def test_init(): tau = BinHeap() assert tau.heapList == [0] assert", "0 self.heapList = [0] self.currentSize = 1#taille de la liste", "assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList) @given(lists(elements=integers()),integers()) @settings(max_examples=1000) def test_insert(L,i): tau = BinHeap() tau.buildHeap(L) tau.insert(i)", "K assert_goodheap(tau,tau.currentSize) x = [] while tau.currentSize > 1:x.append(tau.delMin())#on retire", "a well-ordered heap assert_goodheap(tau,len(L)+1) @given(lists(elements=integers())) @settings(max_examples=400,deadline=None) def test_delmin(L): L +=", ">= val) or (HL[2*x] >= val and HL[2*x+1] >= val))", "Child. None is returned.\") return if i * 2 +", "self.heapList.append(k) self.percUp(self.currentSize) self.currentSize = self.currentSize + 1 def percDown(self,i): while", "father is bigger while i // 2 > 0 and", "a whole heap from a list, by percolating all its", "i == 0: print(\"No Child. None is returned.\") return if", "in K:tau.insert(k)#on rajoute les elements de K assert_goodheap(tau,tau.currentSize) x =", "18 tau.percUp(17) assert tau.heapList[17] >= tau.heapList[8] assert tau.heapList[8] >= tau.heapList[4]", "assert_goodheap(tau,lon): for x in range(1,lon): assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList) def test_init(): tau =", "tau = BinHeap() tau.buildHeap(L)#tas construit avec L for k in", "= self.heapList[i] self.heapList[i] = self.heapList[mc] self.heapList[mc] = tmp i =", "= BinHeap() tau.buildHeap(L) tau.insert(i) assert_goodheap(tau,len(L)+1) @given(lists(elements=integers()),integers()) @settings(max_examples=100) def test_percDown(L,i): tau", "for x in range(1,lon): assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList) def test_init(): tau = BinHeap()", "git par email.\"\"\" import hypothesis from hypothesis import given, settings", "x in range(1,len(L) + 1): for _ in range(len(L)): tau.percDown(x)", "- 1 self.heapList[1] = self.heapList[self.currentSize] self.heapList.pop() self.percDown(1) return rval def", "L += [10] tau.buildHeap(L) tau.heapList[1] = i tau.percDown(1) for x", "le diff ou le dépôt git par email.\"\"\" import hypothesis", "self.heapList[i] > self.heapList[mc]: tmp = self.heapList[i] self.heapList[i] = self.heapList[mc] self.heapList[mc]", "try: rval = self.heapList[1] except IndexError: print(\"Empty heap. Nothing is", "tau.buildHeap(L) tau.heapList[1] = i tau.percDown(1) for x in range(1,len(L) +", "tau.currentsize = 18 tau.percUp(17) assert tau.heapList[17] >= tau.heapList[8] assert tau.heapList[8]", "tas binaire d'entiers avec un element 0 self.heapList = [0]", "binaires d'entiers def __init__(self): #initialise un tas binaire d'entiers avec", "(i < self.currentSize): self.percUp(i) i += 1 def assert_isheaplist(x,val,lon,HL): assert", "for _ in range(len(L)): tau.percDown(x) #then we test that we", "heap self.heapList.append(k) self.percUp(self.currentSize) self.currentSize = self.currentSize + 1 def percDown(self,i):", "@given(lists(elements=integers()),lists(elements=integers())) @settings(max_examples=400,deadline=None) def test_general(L,K): tau = BinHeap() tau.buildHeap(L)#tas construit avec", "[0,1,3,2,9,99,2,3,10,9,103,102,3,2,3,3] tau = BinHeap() tau.currentsize = 16 tau.heapList = gamma[:]", "tau.currentsize == 16 tau.heapList.append(8) tau.currentsize = 17 tau.percUp(16) assert tau.heapList", "is None x = [] tau.buildHeap(K) for l in L:#teste", "= BinHeap() L += [10] tau.buildHeap(L) tau.heapList[1] = i tau.percDown(1)", "un element 0 self.heapList = [0] self.currentSize = 1#taille de", "@given(lists(elements=integers()),integers()) @settings(max_examples=1000) def test_insert(L,i): tau = BinHeap() tau.buildHeap(L) tau.insert(i) assert_goodheap(tau,len(L)+1)", "tau.heapList[4] @given(lists(elements=integers())) @settings(max_examples=1000) def test_build(L): tau = BinHeap() tau.buildHeap(L) assert", "self.percUp(self.currentSize) self.currentSize = self.currentSize + 1 def percDown(self,i): while (i", "assert tau.heapList[17] >= tau.heapList[8] assert tau.heapList[8] >= tau.heapList[4] @given(lists(elements=integers())) @settings(max_examples=1000)", "import integers, lists class BinHeap: #structure de tas binaires d'entiers", "tas binaires d'entiers def __init__(self): #initialise un tas binaire d'entiers", "< self.heapList[i*2+1]: return i * 2 else: return i *", "construit avec L for k in K:tau.insert(k)#on rajoute les elements", "if i * 2 >= self.currentSize or i == 0:", "return i * 2 else: return i * 2 +", "== [0,1,3,2,8,99,2,2,9,9,103,102,3,2,3,3,10] tau.heapList.append(integer) tau.currentsize = 18 tau.percUp(17) assert tau.heapList[17] >=", "@given(lists(elements=integers()),integers()) @settings(max_examples=400) def test_minChild(L,i): tau = BinHeap() assert tau.minChild(abs(i)) is", "[0,1,3,2,8,99,2,2,9,9,103,102,3,2,3,3,10] tau.heapList.append(integer) tau.currentsize = 18 tau.percUp(17) assert tau.heapList[17] >= tau.heapList[8]", "1 assert sorted(tau.heapList) == sorted(L+[0]) assert_goodheap(tau,len(L)+1) #for x in range(1,len(L)" ]
[ "+ '.tbin{:01d}.'.format(i+1) + transformation_type + \\ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(t0, t1, nbin, d,", "tend[i]) else: starting_index = [next(j for j, t in enumerate(model_tend)", "at each time step, the latter will compute the integrated", "backward compatibiity results = {} #save collated files: with TemporaryDirectory(prefix='snowglobes')", "time. Default to 30 time slices. tmin = snmodel.get_time()[0] tmax", "t1 = tend nbin = 1 times = 0.5*(tstart +", "output into a tarfile. Parameters ---------- model_path : str Input", "os.path.split(os.path.abspath(model_path)) snmodel = model_class(model_path) #set the timings up #default if", "in collate() cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy' logging.info(f'Saving simulation results", "s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E_BAR]) s =", "f in flux_files),res)) # save result to file for re-use", "takes into account the effective mass of the detector as", "else: model_file_root, _ = os.path.splitext(model_file) # strip extension (if present)", "time bin of model in requested interval temp_spectra = snmodel.get_transformed_spectra(", "energy for each interaction channel, integrated in a given time", "energy and time, for each interaction channel. Parameters ---------- SNOwGLoBESdir", "output = '\\n'.join(map(str, transformation_type)).encode('ascii') tf.addfile(tarfile.TarInfo(name='parameterinfo'), io.BytesIO(output)) MeV = 1.60218e-6 *", "dt = tb-ta #first time bin of model in requested", "End of time interval to integrate over, or list of", "time slices. tmin = snmodel.get_time()[0] tmax = snmodel.get_time()[-1] if deltat", "column #return table with the original levels order t =", "t_sel.sum(axis='columns') #drop processed channels t.drop(t_sel.columns, axis='columns',inplace=True) t[name]=t_agg #fill the column", "table with the original levels order t = t.unstack(levels) t", "``generate_fluence()``. detector_input : str Name of detector. If ``\"all\"``, will", "0.5, 0.5, 0.5), loc='best', borderaxespad=0) # formats complete graph smear_title", "---------- model_path : str Input file containing neutrino flux information", "for interacting with SNOwGLoBES. `SNOwGLoBES <https://github.com/SNOwGLoBES/snowglobes>`_ can estimate detected event", "output files from SNOwGLoBES, collated files, and .png's made for", "'Energy '+' '.join(list(table.columns)) data = table.to_numpy().T index = table.index.to_numpy() data", "or returns a data table. Parameters ---------- SNOwGLoBESdir : str", "functions for interacting with SNOwGLoBES. `SNOwGLoBES <https://github.com/SNOwGLoBES/snowglobes>`_ can estimate detected", "d, output_filename=None, ntbins=30, deltat=None): \"\"\"Generate time series files in SNOwGLoBES", "possible values. d : int or float Distance to supernova", "what produced by: #tables = simulate(SNOwGLoBESdir, tarball_path,detector_input) #dict for old-style", "time, for each interaction channel. Parameters ---------- SNOwGLoBESdir : str", "interaction channels. There are three basic steps to using SNOwGLoBES", "= tend[-1] nbin = len(tstart/u.s) except: t0 = tstart t1", "over, or list of end times of the time series", "= snmodel.get_transformed_spectra(t, energy, flavor_transformation) osc_fluence = {} table = []", "'{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E_BAR]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR])", "ntbins = int((tmax-tmin)/dt) else: dt = (tmax - tmin) /", "= {'header':header,'data':data} #optionally plot the results if skip_plots is False:", "'.npy' logging.info(f'Saving simulation results to {cache_file}') np.save(cache_file, result) return result", "snmodel.get_transformed_spectra(model_times[starting_index[i]], energy, flavor_transformation) if dt < model_tend[starting_index[i]]-ta: dt = dt", "= \".dat\" model_file_root, _ = os.path.splitext(model_file) filename = model_file_root +", "in ['weighted','unweighted']: for s in ['smeared','unsmeared']: table = t[w][s] filename_base", "temp_spectra[flavor]*(tb-model_tstart[ending_index[i]]) for flavor in Flavor: osc_spectra[flavor] /= (tb-ta) osc_fluence =", "= [detector_input] result = {} #Extracts data from tarfile and", "np.linspace(0, 100, 501) * MeV # Loop over sampled times.", "= os.path.splitext(model_file) filename = model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type +", "i in range(nbin): if nbin > 1: ta = tstart[i]", "cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy' logging.info(f'Saving simulation results to {cache_file}')", "over, or list of start times of the time series", "the first column the energy bins, in the remaining columns", "snewpy.flavor_transformation documentation for possible values. d : int or float", "for this snewpy run. Returns ------- dict Dictionary of data", "sets up lists of paths and fluxfilenames for later use", "neutrino flux files and configures and runs the supernova script", "three basic steps to using SNOwGLoBES from SNEWPY: * **Generating", "neutrino energy and time, for each interaction channel. Parameters ----------", "There are three basic steps to using SNOwGLoBES from SNEWPY:", "import re import tarfile from pathlib import Path from tempfile", "verbose output, e.g. for debugging. \"\"\" sng = SNOwGLoBES(SNOwGLoBESdir) if", "for each time bin and for each interaction channel. verbose", "simulation results to {cache_file}') np.save(cache_file, result) return result re_chan_label =", "of time slices. Returns ------- str Path of compressed .tar", "detector(s). These event rates are given as a function of", "TBinMid={:g}sec TBinWidth={:g}s EBinWidth=0.2MeV Fluence at Earth for this timebin in", "Flavor: osc_spectra[flavor] *= (model_tend[starting_index[i]]-ta) #intermediate time bins of model in", "If ``\"all\"``, will use all detectors supported by SNOwGLoBES. skip_plots:", "input file. Matches the name of the corresponding class in", "subsample the times in a supernova model, produce energy tables", "in Flavor: osc_spectra[flavor] += temp_spectra[flavor]*(model_tend[j]-model_tstart[j]) #last time bin of model", "in tar archive that gives information on parameters output =", "as tar: tar.extractall(tempdir) flux_files = list(Path(tempdir).glob('*.dat')) if len(detector_input)>0: detector_input =", "slices. Returns ------- str Path of compressed .tar file with", "dt = tb-ta else: ta = tstart tb = tend", "files with open(filename,'w') as f: f.write(table.to_string(float_format='%23.15g')) #format the results for", "> tstart)] ending_index = [next(j for j, t in enumerate(model_tend)", "plt.xlim(right=0.10) plt.ylim(bottom=0.10) plt.yscale('log') plt.legend(bbox_to_anchor=(0.5, 0.5, 0.5, 0.5), loc='best', borderaxespad=0) #", "# Subsample the model time. Default to 30 time slices.", "as f: f.write(table.to_string(float_format='%23.15g')) #format the results for the output header", "s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) table.append(s) logging.debug(s) # Encode energy/flux table", "tstart = snmodel.get_time()[0] tend = snmodel.get_time()[-1] try: if len(tstart/u.s) >", "output, e.g. for debugging. remove_generated_files: bool Remove the output files", "range(len(tstart)): starting_index[i] = next(j for j, t in enumerate(model_tend) if", "a compressed .tar file containing all individual input files. *", "os.path.join(model_dir, tfname) def simulate(SNOwGLoBESdir, tarball_path, detector_input=\"all\", verbose=False): \"\"\"Takes as input", "table.index.to_numpy() data = np.concatenate([[index],data]) results[filename.name] = {'header':header,'data':data} #optionally plot the", "model_times*1.0 model_tend = model_times*1.0 model_tstart[0] = model_times[0] for i in", "SNOwGLoBES:** There are two ways to do this, either generate", "in various detectors supported by SNOwGLoBES. It takes into account", "a time series or a fluence file. This is done", "ignored if ``deltat`` is also given. deltat : astropy.Quantity or", "if smeared=='unsmeared' else 'Detected' plt.title(f'{flux} {det.capitalize()} {weighted.capitalize()} {smear_title} Events') if", "NeutrinoDecay(mh=MassHierarchy.INVERTED)} flavor_transformation = flavor_transformation_dict[transformation_type] model_dir, model_file = os.path.split(os.path.abspath(model_path)) snmodel =", "output_filename is not None: tfname = output_filename+'.tar.bz2' else: model_file_root, _", "if skip_plots is False: plt.figure(dpi=300) do_plot(table,(flux,det,w,s)) filename = tempdir/f'{filename_base}_log_plot.png' plt.savefig(filename.with_suffix('.png'),", "astropy.Quantity or None Length of time slices. Returns ------- str", "time bin), or in a snapshot in time. * **Collating", "= os.path.split(os.path.abspath(model_path)) snmodel = model_class(model_path) # Subsample the model time.", "#plotting the events from given table flux,det,weighted,smeared = params for", "NuTau aNuE aNuMu aNuTau') # Generate energy + number flux", "detected as a function of energy for each interaction channel,", "plt.xlabel('Detected Energy (GeV)') plt.ylabel('Events') else: plt.xlabel('Neutrino Energy (GeV)') plt.ylabel('Interaction Events')", "flavor in Flavor: osc_spectra[flavor] += temp_spectra[flavor]*(tb-model_tstart[ending_index[i]]) for flavor in Flavor:", "# Loop over sampled times. for i in range(nbin): if", "flavor in Flavor: osc_spectra[flavor] += temp_spectra[flavor]*(model_tend[j]-model_tstart[j]) #last time bin of", "plt.ylabel('Events') else: plt.xlabel('Neutrino Energy (GeV)') plt.ylabel('Interaction Events') #read the results", "Path of compressed .tar file produced e.g. by ``generate_time_series()`` or", "table and output to file in tar archive. output =", "dict to associate the transformation name with its class. flavor_transformation_dict", ": str Name of detector. If ``\"all\"``, will use all", "tb = tend[i] t = times[i] dt = tb-ta else:", "else: filename = output_filename+extension else: model_file_root, _ = os.path.splitext(model_file) #", "* from snewpy.neutrino import Flavor, MassHierarchy from snewpy.snowglobes_interface import SNOwGLoBES", "Path(tempdir) for det in tables: results[det] = {} for flux,t", "time bin and for each interaction channel. verbose : bool", "two ways to do this, either generate a time series", "output_filename : str or None Name of output file. If", "the plot of the energy distribution for each time bin", "model_times[ending_index[i]], energy, flavor_transformation) for flavor in Flavor: osc_spectra[flavor] += temp_spectra[flavor]*(tb-model_tstart[ending_index[i]])", "f'${bar}{{\\\\nu}}_{flv}$ '+f'${{}}^{{{num}}}{Nuc}$ '+res return s if c in mapp: return", "as a function of energy for each interaction channel, integrated", "+ '.{:.3f},{:.3f},{:d}-{:.1f}'.format(tmin, tmax, ntbins, d) + 'kpc.tar.bz2' with tarfile.open(os.path.join(model_dir, tfname),", "= snmodel.get_transformed_spectra( model_times[ending_index[i]], energy, flavor_transformation) for flavor in Flavor: osc_spectra[flavor]", "0.2 * MeV / (4.*np.pi*(d*1000*3.086e+18)**2) s = '{:17.8E}'.format(E/(1e3 * MeV))", "+ number flux table. for j, E in enumerate(energy): for", "patterns.items(): #get channels which contain `like` t_sel = t.filter(like=pattern) #sum", "Parameters ---------- model_path : str Input file containing neutrino flux", "detected event rates from a given input supernova neutrino flux.", "#rearrange the table to have only channel column levels =", "t_sel = t.filter(like=pattern) #sum over them and save to a", "tempdir/f'Collated_{filename_base}.dat' #save results to text files with open(filename,'w') as f:", "with SNOwGLoBES. `SNOwGLoBES <https://github.com/SNOwGLoBES/snowglobes>`_ can estimate detected event rates from", "dt/u.s)*u.s times = 0.5*(tedges[1:] + tedges[:-1]) # Generate output. if", "model_class = getattr(snewpy.models.ccsn, model_type) # Choose flavor transformation. Use dict", "is False: plt.figure(dpi=300) do_plot(table,(flux,det,w,s)) filename = tempdir/f'{filename_base}_log_plot.png' plt.savefig(filename.with_suffix('.png'), dpi=300, bbox_inches='tight')", "1.60218e-6 * u.erg energy = np.linspace(0, 100, 501) * MeV", "file produced e.g. by ``generate_time_series()`` or ``generate_fluence()``. detector_input : str", "s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s =", "the sum of them. \"\"\" import io import logging import", "len(detector_input)>0: detector_input = tqdm(detector_input, desc='Detectors', leave=False) for det in detector_input:", "* dt * 0.2 * MeV / (4.*np.pi*(d*1000*3.086e+18)**2) s =", "return mapp[c] else: return re_chan_label.sub(gen_label, c) def collate(SNOwGLoBESdir, tarball_path, detector_input=\"all\",", "them. \"\"\" import io import logging import os import re", "'.tbin{:01d}.'.format(i+1) + transformation_type + \\ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(tmin/u.s, tmax/u.s, ntbins, d, extension)", "snmodel.get_time()[-1] if deltat is not None: dt = deltat ntbins", "to a separate column t_agg = t_sel.sum(axis='columns') #drop processed channels", "model_file_root + '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(t0, t1, nbin, d)", "+ tend) model_times = snmodel.get_time() model_tstart = model_times*1.0 model_tend =", "Events') if smeared=='smeared': plt.xlabel('Detected Energy (GeV)') plt.ylabel('Events') else: plt.xlabel('Neutrino Energy", "flavor transformation. See snewpy.flavor_transformation documentation for possible values. d :", "kpc. output_filename : str or None Name of output file.", "or list of start times of the time series bins.", "compress the output into a tarfile. Parameters ---------- model_path :", "#Make a tarfile with the condensed data files and plots", "osc_spectra = snmodel.get_transformed_spectra(t, energy, flavor_transformation) osc_fluence = {} table =", "j, t in enumerate(model_tend) if t > tstart)] ending_index =", "= output_name[:output_name.rfind('.tar')]+'_SNOprocessed' output_path = Path(tarball_path).parent/(output_name+'.tar.gz') with tarfile.open(output_path, \"w:gz\") as tar:", "Parameters ---------- SNOwGLoBESdir : str Path to directory where SNOwGLoBES", "model_times = snmodel.get_time() model_tstart = model_times*1.0 model_tend = model_times*1.0 model_tstart[0]", "# Generate output. if output_filename is not None: tfname =", "Path to directory where SNOwGLoBES is installed. tarball_path : str", "TBinWidth={:g}s EBinWidth=0.2MeV Fluence at Earth for this timebin in neutrinos", "detector_input=\"all\", verbose=False): \"\"\"Takes as input the neutrino flux files and", "interval temp_spectra = snmodel.get_transformed_spectra( model_times[ending_index[i]], energy, flavor_transformation) for flavor in", "tarfile. Parameters ---------- model_path : str Input file containing neutrino", "ntbins, d, extension) info = tarfile.TarInfo(name=filename) info.size = len(output) tf.addfile(info,", "= len(tstart/u.s) except: t0 = tstart t1 = tend nbin", "else: return re_chan_label.sub(gen_label, c) def collate(SNOwGLoBESdir, tarball_path, detector_input=\"all\", skip_plots=False, verbose=False,", "SNEWPY: * **Generating input files for SNOwGLoBES:** There are two", "osc_fluence = {} table = [] table.append('# TBinMid={:g}sec TBinWidth={:g}s EBinWidth=0.2MeV", "e.g. by ``generate_time_series()`` or ``generate_fluence()``. detector_input : str Name of", "One table per time bin; each table contains in the", "the detector. \"\"\" def aggregate_channels(table, **patterns): #rearrange the table to", "done taking as input the supernova simulation model. The first", "all individual input files. * **Running SNOwGLoBES:** This step convolves", "= 'Energy '+' '.join(list(table.columns)) data = table.to_numpy().T index = table.index.to_numpy()", "'AdiabaticMSW_IMO': AdiabaticMSW(mh=MassHierarchy.INVERTED), 'NonAdiabaticMSWH_NMO': NonAdiabaticMSWH(mh=MassHierarchy.NORMAL), 'NonAdiabaticMSWH_IMO': NonAdiabaticMSWH(mh=MassHierarchy.INVERTED), 'TwoFlavorDecoherence': TwoFlavorDecoherence(), 'ThreeFlavorDecoherence': ThreeFlavorDecoherence(),", "#first time bin of model in requested interval osc_spectra =", "def simulate(SNOwGLoBESdir, tarball_path, detector_input=\"all\", verbose=False): \"\"\"Takes as input the neutrino", "plt.legend(bbox_to_anchor=(0.5, 0.5, 0.5, 0.5), loc='best', borderaxespad=0) # formats complete graph", "det in tables: results[det] = {} for flux,t in tables[det].items():", "(tmax - tmin) / (ntbins+1) tedges = np.arange(tmin/u.s, tmax/u.s, dt/u.s)*u.s", "cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy' logging.info(f'Reading tables from {cache_file}') tables", "supernova model. model_type : str Format of input file. Matches", "column the energy bins, in the remaining columns the number", "mapp[c] else: return re_chan_label.sub(gen_label, c) def collate(SNOwGLoBESdir, tarball_path, detector_input=\"all\", skip_plots=False,", "a given time window (or time bin), or in a", "= tb-ta else: ta = tstart tb = tend t", "made for this snewpy run. Returns ------- dict Dictionary of", "d) + 'kpc.tar.bz2' with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2') as tf: #creates", "the condensed data files and plots output_name = Path(tarball_path).stem output_name", "extension = \".dat\" model_file_root, _ = os.path.splitext(model_file) filename = model_file_root", "or the sum of them. \"\"\" import io import logging", "= list(sng.detectors) detector_input.remove('d2O') elif isinstance(detector_input,str): detector_input = [detector_input] result =", "['weighted','unweighted']: for s in ['smeared','unsmeared']: table = t[w][s] filename_base =", "filename = model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type + \\ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(t0,", "= getattr(snewpy.models.ccsn, model_type) # Choose flavor transformation. Use dict to", "astropy.Quantity or None Start of time interval to integrate over,", "a function of the neutrino energy and time, for each", "and output to file in tar archive. output = '\\n'.join(table).encode('ascii')", "tend) model_times = snmodel.get_time() model_tstart = model_times*1.0 model_tend = model_times*1.0", "for backward compatibiity results = {} #save collated files: with", "detectors supported by SNOwGLoBES. verbose : bool Whether to generate", "= tarball_path[:tarball_path.rfind('.tar')] + '.npy' logging.info(f'Saving simulation results to {cache_file}') np.save(cache_file,", "step convolves the fluence generated in the previous step with", "as plt import numpy as np from astropy import units", "channel. verbose : bool Whether to generate verbose output, e.g.", "and save to a separate column t_agg = t_sel.sum(axis='columns') #drop", "len(output) tf.addfile(info, io.BytesIO(output)) return os.path.join(model_dir, tfname) def simulate(SNOwGLoBESdir, tarball_path, detector_input=\"all\",", "times dt = tb-ta #first time bin of model in", "energy, flavor_transformation) osc_fluence = {} table = [] table.append('# TBinMid={:g}sec", "flavor transformation. Use dict to associate the transformation name with", "#set the timings up #default if inputs are None: full", "for debugging. \"\"\" sng = SNOwGLoBES(SNOwGLoBESdir) if detector_input == 'all':", "parameters output = '\\n'.join(map(str, transformation_type)).encode('ascii') tf.addfile(tarfile.TarInfo(name='parameterinfo'), io.BytesIO(output)) MeV = 1.60218e-6", "Decay', 'e':r'${\\nu}_x+e^-$'} def gen_label(m): flv,bar,Nuc,num,res = m.groups() if flv!='e': flv='\\\\'+flv", "= tempdir/f'Collated_{filename_base}.dat' #save results to text files with open(filename,'w') as", "import matplotlib as mpl import matplotlib.pyplot as plt import numpy", "of time interval to integrate over, or list of end", "output_name[:output_name.rfind('.tar')]+'_SNOprocessed' output_path = Path(tarball_path).parent/(output_name+'.tar.gz') with tarfile.open(output_path, \"w:gz\") as tar: for", "model_times*1.0 model_tstart[0] = model_times[0] for i in range(1, len(model_times), 1):", "``\"all\"``, will use all detectors supported by SNOwGLoBES. verbose :", "configures and runs the supernova script inside SNOwGLoBES, which outputs", "**Running SNOwGLoBES:** This step convolves the fluence generated in the", "number of events detected as a function of energy for", "Use dict to associate the transformation name with its class.", "= {} #Extracts data from tarfile and sets up lists", "for flavor in Flavor: osc_spectra[flavor] += temp_spectra[flavor]*(model_tend[j]-model_tstart[j]) #last time bin", "input files for SNOwGLoBES:** There are two ways to do", "model. The first will evaluate the neutrino flux at each", "model_file_root + '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(tmin, tmax, ntbins, d)", "cross-sections for the interaction channels happening in various detectors supported", "output_filename+'.tar.bz2' else: model_file_root, _ = os.path.splitext(model_file) # strip extension (if", "of end times of the time series bins. Returns -------", "j in range(starting_index[i]+1, ending_index[i], 1): temp_spectra = snmodel.get_transformed_spectra(model_times[j], energy, flavor_transformation)", "Whether to generate verbose output, e.g. for debugging. \"\"\" sng", "= Path(tempdir) for det in tables: results[det] = {} for", "tend[i] t = times[i] dt = tb-ta else: ta =", "transformation_type)).encode('ascii') tf.addfile(tarfile.TarInfo(name='parameterinfo'), io.BytesIO(output)) MeV = 1.60218e-6 * u.erg energy =", "-*- coding: utf-8 -*- \"\"\"The ``snewpy.snowglobes`` module contains functions for", "snapshot in time. * **Collating SNOwGLoBES outputs:** This step puts", "tarfile.TarInfo(name=filename) info.size = len(output) tf.addfile(info, io.BytesIO(output)) return os.path.join(model_dir, tfname) def", "# Generate energy + number flux table. for j, E", "= table.stack(levels) for name,pattern in patterns.items(): #get channels which contain", "= '\\n'.join(table).encode('ascii') extension = \".dat\" model_file_root, _ = os.path.splitext(model_file) filename", "to generate verbose output, e.g. for debugging. \"\"\" sng =", "remove_generated_files: bool Remove the output files from SNOwGLoBES, collated files,", "deltat ntbins = int((tmax-tmin)/dt) else: dt = (tmax - tmin)", "[next(j for j, t in enumerate(model_tend) if t >= tend)]", "from a given input supernova neutrino flux. It supports many", "tb-ta #first time bin of model in requested interval osc_spectra", "each interaction channel, integrated in a given time window (or", "= 0.5*(model_times[i]+model_times[i-1]) model_tend[i-1] = model_tstart[i] model_tend[len(model_times)-1] = model_times[-1] if nbin", "flux_files = list(Path(tempdir).glob('*.dat')) if len(detector_input)>0: detector_input = tqdm(detector_input, desc='Detectors', leave=False)", "do_plot(table, params): #plotting the events from given table flux,det,weighted,smeared =", "not None: dt = deltat ntbins = int((tmax-tmin)/dt) else: dt", "snmodel.get_time()[0] tend = snmodel.get_time()[-1] try: if len(tstart/u.s) > 0: t0", "det) result[det]=dict(zip((f.stem for f in flux_files),res)) # save result to", "results to {cache_file}') np.save(cache_file, result) return result re_chan_label = re.compile(r'nu(e|mu|tau)(bar|)_([A-Z][a-z]*)(\\d*)_?(.*)')", "Earth for this timebin in neutrinos per cm^2'.format(t, dt)) table.append('#", "u.erg energy = np.linspace(0, 100, 501) * MeV # 1MeV", "for j, t in enumerate(model_tend) if t >= tend)] #", "transformation_type + \\ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(t0, t1, nbin, d, extension) info =", "in table.columns: if table[c].max() > 0.1: plt.plot(table[c],drawstyle='steps',label=get_channel_label(c), lw=1) plt.xlim(right=0.10) plt.ylim(bottom=0.10)", "snewpy.models from snewpy.flavor_transformation import * from snewpy.neutrino import Flavor, MassHierarchy", "Path(tarball_path).parent/(output_name+'.tar.gz') with tarfile.open(output_path, \"w:gz\") as tar: for file in tempdir.iterdir():", "from given table flux,det,weighted,smeared = params for c in table.columns:", "channel in the detector. \"\"\" def aggregate_channels(table, **patterns): #rearrange the", "\"\"\"Generate time series files in SNOwGLoBES format. This version will", "output = '\\n'.join(table).encode('ascii') extension = \".dat\" if output_filename is not", "Generate energy + number flux table. for j, E in", "if nbin > 1: ta = tstart[i] tb = tend[i]", "number of events for each interaction channel in the detector.", "or in a snapshot in time. * **Collating SNOwGLoBES outputs:**", "detector as well as a smearing matrix describing the energy-dependent", "supernova in kpc. output_filename : str or None Name of", "'\\n'.join(map(str, transformation_type)).encode('ascii') tf.addfile(tarfile.TarInfo(name='parameterinfo'), io.BytesIO(output)) MeV = 1.60218e-6 * u.erg energy", "with tarfile.open(output_path, \"w:gz\") as tar: for file in tempdir.iterdir(): tar.add(file,arcname=output_name+'/'+file.name)", "= model_class(model_path) # Subsample the model time. Default to 30", "table. Parameters ---------- SNOwGLoBESdir : str Path to directory where", "tfname), 'w:bz2') as tf: #creates file in tar archive that", "is also given. deltat : astropy.Quantity or None Length of", "flavor in Flavor: osc_fluence[flavor] = osc_spectra[flavor][j] * dt * 0.2", "'kpc.tar.bz2' else: model_file_root, _ = os.path.splitext(model_file) # strip extension (if", "run. Returns ------- dict Dictionary of data tables: One table", "verbose=False): \"\"\"Takes as input the neutrino flux files and configures", ":py:mod:`snewpy.models`. transformation_type : str Name of flavor transformation. See snewpy.flavor_transformation", "Subsample the model time. Default to 30 time slices. tmin", "t.unstack(levels) t = t.reorder_levels(table.columns.names, axis=1) return t def do_plot(table, params):", "= aggregate_channels(t,nc='nc_',e='_e') for w in ['weighted','unweighted']: for s in ['smeared','unsmeared']:", "{} table = [] table.append('# TBinMid={:g}sec TBinWidth={:g}s EBinWidth=0.2MeV Fluence at", "table.columns: if table[c].max() > 0.1: plt.plot(table[c],drawstyle='steps',label=get_channel_label(c), lw=1) plt.xlim(right=0.10) plt.ylim(bottom=0.10) plt.yscale('log')", "on input file name. ntbins : int Number of time", "column t_agg = t_sel.sum(axis='columns') #drop processed channels t.drop(t_sel.columns, axis='columns',inplace=True) t[name]=t_agg", "supernova simulation model. The first will evaluate the neutrino flux", "bool Whether to generate verbose output, e.g. for debugging. remove_generated_files:", "files: with TemporaryDirectory(prefix='snowglobes') as tempdir: tempdir = Path(tempdir) for det", "np.arange(tmin/u.s, tmax/u.s, dt/u.s)*u.s times = 0.5*(tedges[1:] + tedges[:-1]) # Generate", "rates are given as a function of the neutrino energy", "events for each interaction channel in the detector. \"\"\" def", "int or float Distance to supernova in kpc. output_filename :", "archive that gives information on parameters output = '\\n'.join(map(str, transformation_type)).encode('ascii')", "data = table.to_numpy().T index = table.index.to_numpy() data = np.concatenate([[index],data]) results[filename.name]", "The first will evaluate the neutrino flux at each time", "'{:17.8E}'.format(E/(1e3 * MeV)) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E]) s = '{}{:17.8E}'.format(s,", "contains functions for interacting with SNOwGLoBES. `SNOwGLoBES <https://github.com/SNOwGLoBES/snowglobes>`_ can estimate", "of the detector as well as a smearing matrix describing", "np.save(cache_file, result) return result re_chan_label = re.compile(r'nu(e|mu|tau)(bar|)_([A-Z][a-z]*)(\\d*)_?(.*)') def get_channel_label(c): mapp", "cm^2'.format(t, dt)) table.append('# E(GeV) NuE NuMu NuTau aNuE aNuMu aNuTau')", "series bins. Returns ------- str Path of compressed .tar file", "= model_class(model_path) #set the timings up #default if inputs are", "of the time series bins. Returns ------- str Path of", ": str Format of input file. Matches the name of", "this, either generate a time series or a fluence file.", "logger = logging.getLogger(__name__) def generate_time_series(model_path, model_type, transformation_type, d, output_filename=None, ntbins=30,", "# Loop over sampled times. for i, t in enumerate(times):", "over them and save to a separate column t_agg =", "the times in a supernova model, produce energy tables expected", "osc_fluence[flavor] = osc_spectra[flavor][j] * dt * 0.2 * MeV /", "temp_spectra[flavor]*(model_tend[j]-model_tstart[j]) #last time bin of model in requested interval temp_spectra", "'kpc.tar.bz2' with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2') as tf: #creates file in", "sng = SNOwGLoBES(SNOwGLoBESdir) if detector_input == 'all': detector_input = list(sng.detectors)", "window of the model if tstart is None: tstart =", "of start times of the time series bins. tend :", "model_times[-1] if nbin > 1: starting_index = np.zeros(len(times), dtype=np.int64) ending_index", "produce energy tables expected by SNOwGLoBES, and compress the output", "starting_index = [next(j for j, t in enumerate(model_tend) if t", "100, 501) * MeV # Loop over sampled times. for", "containing neutrino flux information from supernova model. model_type : str", "# Choose flavor transformation. Use dict to associate the transformation", "data. \"\"\" model_class = getattr(snewpy.models.ccsn, model_type) # Choose flavor transformation.", "output_path = Path(tarball_path).parent/(output_name+'.tar.gz') with tarfile.open(output_path, \"w:gz\") as tar: for file", "of) detector(s). These event rates are given as a function", "each time step, the latter will compute the integrated neutrino", "output_filename + 'kpc.tar.bz2' else: model_file_root, _ = os.path.splitext(model_file) # strip", "enumerate(model_tend) if t >= tend)] # Generate output. if output_filename", "is not None: tfname = output_filename + 'kpc.tar.bz2' else: model_file_root,", "len(tstart/u.s) except: t0 = tstart t1 = tend nbin =", "for the interaction channels happening in various detectors supported by", "flux table. for j, E in enumerate(energy): for flavor in", "plt.figure(dpi=300) do_plot(table,(flux,det,w,s)) filename = tempdir/f'{filename_base}_log_plot.png' plt.savefig(filename.with_suffix('.png'), dpi=300, bbox_inches='tight') #Make a", "else: plt.xlabel('Neutrino Energy (GeV)') plt.ylabel('Interaction Events') #read the results from", "\".dat\" model_file_root, _ = os.path.splitext(model_file) filename = model_file_root + '.tbin{:01d}.'.format(i+1)", "to file for re-use in collate() cache_file = tarball_path[:tarball_path.rfind('.tar')] +", "rates expected for a given (set of) detector(s). These event", "bins, in the remaining columns the number of events for", "and configures and runs the supernova script inside SNOwGLoBES, which", "(set of) detector(s). These event rates are given as a", "name. ntbins : int Number of time slices. Will be", "{weighted.capitalize()} {smear_title} Events') if smeared=='smeared': plt.xlabel('Detected Energy (GeV)') plt.ylabel('Events') else:", "= flavor_transformation_dict[transformation_type] model_dir, model_file = os.path.split(os.path.abspath(model_path)) snmodel = model_class(model_path) #set", "similar to what produced by: #tables = simulate(SNOwGLoBESdir, tarball_path,detector_input) #dict", "= [next(j for j, t in enumerate(model_tend) if t >", "+ 'kpc.tar.bz2' else: model_file_root, _ = os.path.splitext(model_file) # strip extension", "------- str Path of compressed .tar file with neutrino flux", "generate_time_series(model_path, model_type, transformation_type, d, output_filename=None, ntbins=30, deltat=None): \"\"\"Generate time series", "Returns ------- str Path of compressed .tar file with neutrino", "t_agg = t_sel.sum(axis='columns') #drop processed channels t.drop(t_sel.columns, axis='columns',inplace=True) t[name]=t_agg #fill", "30 time slices. tmin = snmodel.get_time()[0] tmax = snmodel.get_time()[-1] if", "f'{flux}_{det}_events_{s}_{w}' filename = tempdir/f'Collated_{filename_base}.dat' #save results to text files with", "``None``, will be based on input file name. ntbins :", "os import re import tarfile from pathlib import Path from", "= snmodel.get_transformed_spectra(model_times[j], energy, flavor_transformation) for flavor in Flavor: osc_spectra[flavor] +=", "= np.load(cache_file, allow_pickle=True).tolist() #This output is similar to what produced", "int Number of time slices. Will be ignored if ``deltat``", "0.5, 0.5), loc='best', borderaxespad=0) # formats complete graph smear_title =", "= m.groups() if flv!='e': flv='\\\\'+flv if bar: bar='\\\\'+bar s =", "model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type + \\ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(tmin/u.s, tmax/u.s, ntbins,", "result re_chan_label = re.compile(r'nu(e|mu|tau)(bar|)_([A-Z][a-z]*)(\\d*)_?(.*)') def get_channel_label(c): mapp = {'nc':'NeutralCurrent', 'ibd':'Inverse", "\"w:gz\") as tar: for file in tempdir.iterdir(): tar.add(file,arcname=output_name+'/'+file.name) logging.info(f'Created archive:", "neutrino time distribution, for each reaction channel or the sum", "= output_filename+extension else: model_file_root, _ = os.path.splitext(model_file) # strip extension", "'Interaction' if smeared=='unsmeared' else 'Detected' plt.title(f'{flux} {det.capitalize()} {weighted.capitalize()} {smear_title} Events')", "import TemporaryDirectory import matplotlib as mpl import matplotlib.pyplot as plt", "= snmodel.get_time()[0] tmax = snmodel.get_time()[-1] if deltat is not None:", "as tf: #creates file in tar archive that gives information", "import tqdm import snewpy.models from snewpy.flavor_transformation import * from snewpy.neutrino", "= SNOwGLoBES(SNOwGLoBESdir) if detector_input == 'all': detector_input = list(sng.detectors) detector_input.remove('d2O')", "step puts together all the interaction channels and time bins", "t >= tend)] # Generate output. if output_filename is not", "archive. output = '\\n'.join(table).encode('ascii') extension = \".dat\" if output_filename is", "ending_index = [next(j for j, t in enumerate(model_tend) if t", "module contains functions for interacting with SNOwGLoBES. `SNOwGLoBES <https://github.com/SNOwGLoBES/snowglobes>`_ can", "osc_fluence[Flavor.NU_X_BAR]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) table.append(s) logging.debug(s) # Encode energy/flux", "on input file name. tstart : astropy.Quantity or None Start", "time bins evaluated by SNOwGLoBES in a single file (for", "\"\"\" sng = SNOwGLoBES(SNOwGLoBESdir) if detector_input == 'all': detector_input =", "and fluxfilenames for later use with TemporaryDirectory(prefix='snowglobes') as tempdir: with", "from snewpy.snowglobes_interface import SNOwGLoBES logger = logging.getLogger(__name__) def generate_time_series(model_path, model_type,", "1: ta = tstart[i] tb = tend[i] t = times[i]", "SNOwGLoBES:** This step convolves the fluence generated in the previous", "inside SNOwGLoBES, which outputs calculated event rates expected for a", "channel column levels = list(table.columns.names) levels.remove('channel') t = table.stack(levels) for", "#tables = simulate(SNOwGLoBESdir, tarball_path,detector_input) #dict for old-style results, for backward", "file for re-use in collate() cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy'", "'.{:.3f},{:.3f},{:d}-{:.1f}'.format(t0, t1, nbin, d) + 'kpc.tar.bz2' with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2')", "= np.concatenate([[index],data]) results[filename.name] = {'header':header,'data':data} #optionally plot the results if", "supernova model, produce energy tables expected by SNOwGLoBES, and compress", "SNOwGLoBESdir : str Path to directory where SNOwGLoBES is installed.", "SNOwGLoBES in a single file (for each detector and for", "NonAdiabaticMSWH(mh=MassHierarchy.INVERTED), 'TwoFlavorDecoherence': TwoFlavorDecoherence(), 'ThreeFlavorDecoherence': ThreeFlavorDecoherence(), 'NeutrinoDecay_NMO': NeutrinoDecay(mh=MassHierarchy.NORMAL), 'NeutrinoDecay_IMO': NeutrinoDecay(mh=MassHierarchy.INVERTED)} flavor_transformation", "d, output_filename=None, tstart=None, tend=None): \"\"\"Generate fluence files in SNOwGLoBES format.", "return re_chan_label.sub(gen_label, c) def collate(SNOwGLoBESdir, tarball_path, detector_input=\"all\", skip_plots=False, verbose=False, remove_generated_files=True):", "NuE NuMu NuTau aNuE aNuMu aNuTau') # Generate energy +", "in Flavor: osc_spectra[flavor] /= (tb-ta) osc_fluence = {} table =", "of the neutrino energy and time, for each interaction channel.", "series or a fluence file. This is done taking as", "tarball_path[:tarball_path.rfind('.tar')] + '.npy' logging.info(f'Saving simulation results to {cache_file}') np.save(cache_file, result)", "the time bin. The result is a compressed .tar file", "tqdm(detector_input, desc='Detectors', leave=False) for det in detector_input: res=sng.run(flux_files, det) result[det]=dict(zip((f.stem", "present) tfname = model_file_root + '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(tmin,", "MeV)) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s", "the timings up #default if inputs are None: full time", "of data tables: One table per time bin; each table", "this snewpy run. Returns ------- dict Dictionary of data tables:", "import SNOwGLoBES logger = logging.getLogger(__name__) def generate_time_series(model_path, model_type, transformation_type, d,", "# Encode energy/flux table and output to file in tar", "# save result to file for re-use in collate() cache_file", "not None: if nbin > 1: filename = output_filename+\"_\"+str(i)+extension else:", "ntbins : int Number of time slices. Will be ignored", "#Extracts data from tarfile and sets up lists of paths", "TwoFlavorDecoherence(), 'ThreeFlavorDecoherence': ThreeFlavorDecoherence(), 'NeutrinoDecay_NMO': NeutrinoDecay(mh=MassHierarchy.NORMAL), 'NeutrinoDecay_IMO': NeutrinoDecay(mh=MassHierarchy.INVERTED)} flavor_transformation = flavor_transformation_dict[transformation_type]", "def generate_fluence(model_path, model_type, transformation_type, d, output_filename=None, tstart=None, tend=None): \"\"\"Generate fluence", "separate column t_agg = t_sel.sum(axis='columns') #drop processed channels t.drop(t_sel.columns, axis='columns',inplace=True)", "return result re_chan_label = re.compile(r'nu(e|mu|tau)(bar|)_([A-Z][a-z]*)(\\d*)_?(.*)') def get_channel_label(c): mapp = {'nc':'NeutralCurrent',", "#default if inputs are None: full time window of the", "if t > tstart)] ending_index = [next(j for j, t", "= osc_spectra[flavor][j] * dt * 0.2 * MeV / (4.*np.pi*(d*1000*3.086e+18)**2)", "osc_spectra[flavor][j] * dt * 0.2 * MeV / (4.*np.pi*(d*1000*3.086e+18)**2) s", "function of the neutrino energy and time, for each interaction", "from supernova model. model_type : str Format of input file.", "= {} #save collated files: with TemporaryDirectory(prefix='snowglobes') as tempdir: tempdir", "in range(nbin): if nbin > 1: ta = tstart[i] tb", "osc_fluence[Flavor.NU_E]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s", "table.to_numpy().T index = table.index.to_numpy() data = np.concatenate([[index],data]) results[filename.name] = {'header':header,'data':data}", "up #default if inputs are None: full time window of", "bin; each table contains in the first column the energy", "skip_plots=False, verbose=False, remove_generated_files=True): \"\"\"Collates SNOwGLoBES output files and generates plots", "> tstart[i]) ending_index[i] = next(j for j, t in enumerate(model_tend)", "{} #Extracts data from tarfile and sets up lists of", "in neutrinos per cm^2'.format(t, dt)) table.append('# E(GeV) NuE NuMu NuTau", "for file in tempdir.iterdir(): tar.add(file,arcname=output_name+'/'+file.name) logging.info(f'Created archive: {output_path}') return results", "of input file. Matches the name of the corresponding class", "output_filename is not None: if nbin > 1: filename =", "tarfile and sets up lists of paths and fluxfilenames for", "NoTransformation(), 'AdiabaticMSW_NMO': AdiabaticMSW(mh=MassHierarchy.NORMAL), 'AdiabaticMSW_IMO': AdiabaticMSW(mh=MassHierarchy.INVERTED), 'NonAdiabaticMSWH_NMO': NonAdiabaticMSWH(mh=MassHierarchy.NORMAL), 'NonAdiabaticMSWH_IMO': NonAdiabaticMSWH(mh=MassHierarchy.INVERTED), 'TwoFlavorDecoherence':", "index = table.index.to_numpy() data = np.concatenate([[index],data]) results[filename.name] = {'header':header,'data':data} #optionally", "flux data. \"\"\" model_class = getattr(snewpy.models.ccsn, model_type) # Choose flavor", "= f'${bar}{{\\\\nu}}_{flv}$ '+f'${{}}^{{{num}}}{Nuc}$ '+res return s if c in mapp:", "tb = tend t = times dt = tb-ta #first", "Input file containing neutrino flux information from supernova model. model_type", "= simulate(SNOwGLoBESdir, tarball_path,detector_input) #dict for old-style results, for backward compatibiity", "j, t in enumerate(model_tend) if t >= tend[i]) else: starting_index", "collated files: with TemporaryDirectory(prefix='snowglobes') as tempdir: tempdir = Path(tempdir) for", "detectors, detector materials and interaction channels. There are three basic", "single file (for each detector and for each time bin).", "#dict for old-style results, for backward compatibiity results = {}", "SNOwGLoBES format. This version will subsample the times in a", "extension (if present) filename = model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type", "model_file = os.path.split(os.path.abspath(model_path)) snmodel = model_class(model_path) #set the timings up", "import units as u from tqdm.auto import tqdm import snewpy.models", "of detector. If ``\"all\"``, will use all detectors supported by", "model_class(model_path) # Subsample the model time. Default to 30 time", "result[det]=dict(zip((f.stem for f in flux_files),res)) # save result to file", "in requested interval osc_spectra = snmodel.get_transformed_spectra(model_times[starting_index[i]], energy, flavor_transformation) if dt", "with open(filename,'w') as f: f.write(table.to_string(float_format='%23.15g')) #format the results for the", "input the neutrino flux files and configures and runs the", "**Generating input files for SNOwGLoBES:** There are two ways to", "up lists of paths and fluxfilenames for later use with", "= model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type + \\ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(tmin/u.s, tmax/u.s,", "table contains in the first column the energy bins, in", "series bins. tend : astropy.Quantity or None End of time", "1): model_tstart[i] = 0.5*(model_times[i]+model_times[i-1]) model_tend[i-1] = model_tstart[i] model_tend[len(model_times)-1] = model_times[-1]", "from storage cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy' logging.info(f'Reading tables from", "tfname) def generate_fluence(model_path, model_type, transformation_type, d, output_filename=None, tstart=None, tend=None): \"\"\"Generate", "astropy.Quantity or None End of time interval to integrate over,", "with the condensed data files and plots output_name = Path(tarball_path).stem", "utf-8 -*- \"\"\"The ``snewpy.snowglobes`` module contains functions for interacting with", "the time series bins. tend : astropy.Quantity or None End", "SNOwGLoBES(SNOwGLoBESdir) if detector_input == 'all': detector_input = list(sng.detectors) detector_input.remove('d2O') elif", "s if c in mapp: return mapp[c] else: return re_chan_label.sub(gen_label,", "by: #tables = simulate(SNOwGLoBESdir, tarball_path,detector_input) #dict for old-style results, for", "the neutrino energy and time, for each interaction channel. Parameters", "== 'all': detector_input = list(sng.detectors) detector_input.remove('d2O') elif isinstance(detector_input,str): detector_input =", "(GeV)') plt.ylabel('Events') else: plt.xlabel('Neutrino Energy (GeV)') plt.ylabel('Interaction Events') #read the", "given. deltat : astropy.Quantity or None Length of time slices.", "tmax = snmodel.get_time()[-1] if deltat is not None: dt =", "associate the transformation name with its class. flavor_transformation_dict = {'NoTransformation':", "Number of time slices. Will be ignored if ``deltat`` is", "order t = t.unstack(levels) t = t.reorder_levels(table.columns.names, axis=1) return t", "file. This is done taking as input the supernova simulation", "table. for j, E in enumerate(energy): for flavor in Flavor:", "If ``\"all\"``, will use all detectors supported by SNOwGLoBES. verbose", "results from storage cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy' logging.info(f'Reading tables", "Path of compressed .tar file with neutrino flux data. \"\"\"", "if detector_input == 'all': detector_input = list(sng.detectors) detector_input.remove('d2O') elif isinstance(detector_input,str):", "leave=False) for det in detector_input: res=sng.run(flux_files, det) result[det]=dict(zip((f.stem for f", "os.path.split(os.path.abspath(model_path)) snmodel = model_class(model_path) # Subsample the model time. Default", "generated in the previous step with the cross-sections for the", "# formats complete graph smear_title = 'Interaction' if smeared=='unsmeared' else", "'.tbin{:01d}.'.format(i+1) + transformation_type + \\ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(t0, t1, nbin, d, extension)", "documentation for possible values. d : int or float Distance", "table = t[w][s] filename_base = f'{flux}_{det}_events_{s}_{w}' filename = tempdir/f'Collated_{filename_base}.dat' #save", "for this timebin in neutrinos per cm^2'.format(t, dt)) table.append('# E(GeV)", "0.1: plt.plot(table[c],drawstyle='steps',label=get_channel_label(c), lw=1) plt.xlim(right=0.10) plt.ylim(bottom=0.10) plt.yscale('log') plt.legend(bbox_to_anchor=(0.5, 0.5, 0.5, 0.5),", "the energy distribution for each time bin and for each", "tarball_path[:tarball_path.rfind('.tar')] + '.npy' logging.info(f'Reading tables from {cache_file}') tables = np.load(cache_file,", "= snmodel.get_transformed_spectra(model_times[starting_index[i]], energy, flavor_transformation) if dt < model_tend[starting_index[i]]-ta: dt =", "fluence generated in the previous step with the cross-sections for", "NeutrinoDecay(mh=MassHierarchy.NORMAL), 'NeutrinoDecay_IMO': NeutrinoDecay(mh=MassHierarchy.INVERTED)} flavor_transformation = flavor_transformation_dict[transformation_type] model_dir, model_file = os.path.split(os.path.abspath(model_path))", "input file name. ntbins : int Number of time slices.", "with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2') as tf: #creates file in tar", "name. tstart : astropy.Quantity or None Start of time interval", "columns the number of events for each interaction channel in", "a supernova model, produce energy tables expected by SNOwGLoBES, and", "{'header':header,'data':data} #optionally plot the results if skip_plots is False: plt.figure(dpi=300)", "in SNOwGLoBES format. This version will subsample the times in", "compressed .tar file produced e.g. by ``generate_time_series()`` or ``generate_fluence()``. detector_input", "else 'Detected' plt.title(f'{flux} {det.capitalize()} {weighted.capitalize()} {smear_title} Events') if smeared=='smeared': plt.xlabel('Detected", "100, 501) * MeV # 1MeV # Loop over sampled", "def generate_time_series(model_path, model_type, transformation_type, d, output_filename=None, ntbins=30, deltat=None): \"\"\"Generate time", "t1, nbin, d) + 'kpc.tar.bz2' with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2') as", "matplotlib as mpl import matplotlib.pyplot as plt import numpy as", "ending_index = np.zeros(len(times), dtype=np.int64) for i in range(len(tstart)): starting_index[i] =", "output. if output_filename is not None: tfname = output_filename +", "time series bins. Returns ------- str Path of compressed .tar", "SNOwGLoBES output files and generates plots or returns a data", "SNOwGLoBES logger = logging.getLogger(__name__) def generate_time_series(model_path, model_type, transformation_type, d, output_filename=None,", "in tables[det].items(): t = aggregate_channels(t,nc='nc_',e='_e') for w in ['weighted','unweighted']: for", "except: t0 = tstart t1 = tend nbin = 1", "is not None: tfname = output_filename+'.tar.bz2' else: model_file_root, _ =", "energy, flavor_transformation) if dt < model_tend[starting_index[i]]-ta: dt = dt else:", "a smearing matrix describing the energy-dependent detection efficiency. The output", "+= temp_spectra[flavor]*(model_tend[j]-model_tstart[j]) #last time bin of model in requested interval", "from SNOwGLoBES, collated files, and .png's made for this snewpy", "astropy import units as u from tqdm.auto import tqdm import", "/ (4.*np.pi*(d*1000*3.086e+18)**2) s = '{:17.8E}'.format(E/(1e3 * MeV)) s = '{}{:17.8E}'.format(s,", "per cm^2'.format(t, dt)) table.append('# E(GeV) NuE NuMu NuTau aNuE aNuMu", "loc='best', borderaxespad=0) # formats complete graph smear_title = 'Interaction' if", "time interval to integrate over, or list of end times", "over sampled times. for i in range(nbin): if nbin >", "def do_plot(table, params): #plotting the events from given table flux,det,weighted,smeared", "time step, the latter will compute the integrated neutrino flux", "format. This version will subsample the times in a supernova", "< model_tend[starting_index[i]]-ta: dt = dt else: for flavor in Flavor:", "E(GeV) NuE NuMu NuTau aNuE aNuMu aNuTau') # Generate energy", "in the time bin. The result is a compressed .tar", "tstart=None, tend=None): \"\"\"Generate fluence files in SNOwGLoBES format. This version", "for flavor in Flavor: osc_spectra[flavor] /= (tb-ta) osc_fluence = {}", "start times of the time series bins. tend : astropy.Quantity", "a given input supernova neutrino flux. It supports many different", "and runs the supernova script inside SNOwGLoBES, which outputs calculated", "SNOwGLoBES. skip_plots: bool If False, it gives as output the", "t.filter(like=pattern) #sum over them and save to a separate column", "is not None: dt = deltat ntbins = int((tmax-tmin)/dt) else:", "= logging.getLogger(__name__) def generate_time_series(model_path, model_type, transformation_type, d, output_filename=None, ntbins=30, deltat=None):", "transformation_type + \\ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(tmin/u.s, tmax/u.s, ntbins, d, extension) info =", "logging.info(f'Saving simulation results to {cache_file}') np.save(cache_file, result) return result re_chan_label", "inputs are None: full time window of the model if", "file in tar archive. output = '\\n'.join(table).encode('ascii') extension = \".dat\"", "\".dat\" if output_filename is not None: if nbin > 1:", "flux. It supports many different neutrino detectors, detector materials and", "simulate(SNOwGLoBESdir, tarball_path, detector_input=\"all\", verbose=False): \"\"\"Takes as input the neutrino flux", "given as a function of the neutrino energy and time,", "in enumerate(model_tend) if t >= tend)] # Generate output. if", "the original levels order t = t.unstack(levels) t = t.reorder_levels(table.columns.names,", "and .png's made for this snewpy run. Returns ------- dict", "channels which contain `like` t_sel = t.filter(like=pattern) #sum over them", "range(nbin): if nbin > 1: ta = tstart[i] tb =", "for flux,t in tables[det].items(): t = aggregate_channels(t,nc='nc_',e='_e') for w in", "tar archive. output = '\\n'.join(table).encode('ascii') extension = \".dat\" if output_filename", "of the model if tstart is None: tstart = snmodel.get_time()[0]", "into a tarfile. Parameters ---------- model_path : str Input file", "(for each detector and for each time bin). The output", "= tend[i] t = times[i] dt = tb-ta else: ta", "* MeV # Loop over sampled times. for i in", "file (for each detector and for each time bin). The", "os.path.join(model_dir, tfname) def generate_fluence(model_path, model_type, transformation_type, d, output_filename=None, tstart=None, tend=None):", "if t >= tend[i]) else: starting_index = [next(j for j,", "all detectors supported by SNOwGLoBES. verbose : bool Whether to", "(or time bin), or in a snapshot in time. *", "np.concatenate([[index],data]) results[filename.name] = {'header':header,'data':data} #optionally plot the results if skip_plots", "detected neutrino energy spectrum and neutrino time distribution, for each", "model_file_root, _ = os.path.splitext(model_file) filename = model_file_root + '.tbin{:01d}.'.format(i+1) +", "tstart t1 = tend nbin = 1 times = 0.5*(tstart", "be ignored if ``deltat`` is also given. deltat : astropy.Quantity", "= output_filename + 'kpc.tar.bz2' else: model_file_root, _ = os.path.splitext(model_file) #", "by ``generate_time_series()`` or ``generate_fluence()``. detector_input : str Name of detector.", "the detected neutrino energy spectrum and neutrino time distribution, for", "do_plot(table,(flux,det,w,s)) filename = tempdir/f'{filename_base}_log_plot.png' plt.savefig(filename.with_suffix('.png'), dpi=300, bbox_inches='tight') #Make a tarfile", "= output_filename+\"_\"+str(i)+extension else: filename = output_filename+extension else: model_file_root, _ =", "data from tarfile and sets up lists of paths and", "aNuTau') # Generate energy + number flux table. for j,", "name with its class. flavor_transformation_dict = {'NoTransformation': NoTransformation(), 'AdiabaticMSW_NMO': AdiabaticMSW(mh=MassHierarchy.NORMAL),", "transformation_type, d, output_filename=None, ntbins=30, deltat=None): \"\"\"Generate time series files in", "enumerate(model_tend) if t > tstart)] ending_index = [next(j for j,", "The output tables allow to build the detected neutrino energy", "event rates expected for a given (set of) detector(s). These", "the effective mass of the detector as well as a", "in a given time window (or time bin), or in", "it gives as output the plot of the energy distribution", "\"\"\"Collates SNOwGLoBES output files and generates plots or returns a", "and time bins evaluated by SNOwGLoBES in a single file", "matrix describing the energy-dependent detection efficiency. The output gives the", "files. * **Running SNOwGLoBES:** This step convolves the fluence generated", "if inputs are None: full time window of the model", "plt import numpy as np from astropy import units as", "_ = os.path.splitext(model_file) # strip extension (if present) filename =", "info.size = len(output) tf.addfile(info, io.BytesIO(output)) return os.path.join(model_dir, tfname) def simulate(SNOwGLoBESdir,", "times in a supernova model, produce energy tables expected by", "timings up #default if inputs are None: full time window", "detector. If ``\"all\"``, will use all detectors supported by SNOwGLoBES.", "results[det] = {} for flux,t in tables[det].items(): t = aggregate_channels(t,nc='nc_',e='_e')", "None End of time interval to integrate over, or list", "will evaluate the neutrino flux at each time step, the", "flux (fluence) in the time bin. The result is a", "Remove the output files from SNOwGLoBES, collated files, and .png's", "data files and plots output_name = Path(tarball_path).stem output_name = output_name[:output_name.rfind('.tar')]+'_SNOprocessed'", "= np.zeros(len(times), dtype=np.int64) for i in range(len(tstart)): starting_index[i] = next(j", "sampled times. for i, t in enumerate(times): osc_spectra = snmodel.get_transformed_spectra(t,", "dt * 0.2 * MeV / (4.*np.pi*(d*1000*3.086e+18)**2) s = '{:17.8E}'.format(E/(1e3", "from snewpy.neutrino import Flavor, MassHierarchy from snewpy.snowglobes_interface import SNOwGLoBES logger", "supported by SNOwGLoBES. It takes into account the effective mass", "SNOwGLoBES outputs:** This step puts together all the interaction channels", "for det in detector_input: res=sng.run(flux_files, det) result[det]=dict(zip((f.stem for f in", "= model_tstart[i] model_tend[len(model_times)-1] = model_times[-1] if nbin > 1: starting_index", "\"\"\"Takes as input the neutrino flux files and configures and", "filename = tempdir/f'{filename_base}_log_plot.png' plt.savefig(filename.with_suffix('.png'), dpi=300, bbox_inches='tight') #Make a tarfile with", "+ tedges[:-1]) # Generate output. if output_filename is not None:", "t > tstart[i]) ending_index[i] = next(j for j, t in", "in a snapshot in time. * **Collating SNOwGLoBES outputs:** This", "of paths and fluxfilenames for later use with TemporaryDirectory(prefix='snowglobes') as", "tables allow to build the detected neutrino energy spectrum and", "io import logging import os import re import tarfile from", "nbin = len(tstart/u.s) except: t0 = tstart t1 = tend", "flavor_transformation_dict[transformation_type] model_dir, model_file = os.path.split(os.path.abspath(model_path)) snmodel = model_class(model_path) # Subsample", "ThreeFlavorDecoherence(), 'NeutrinoDecay_NMO': NeutrinoDecay(mh=MassHierarchy.NORMAL), 'NeutrinoDecay_IMO': NeutrinoDecay(mh=MassHierarchy.INVERTED)} flavor_transformation = flavor_transformation_dict[transformation_type] model_dir, model_file", "to file in tar archive. output = '\\n'.join(table).encode('ascii') extension =", "None: tfname = output_filename + 'kpc.tar.bz2' else: model_file_root, _ =", "for w in ['weighted','unweighted']: for s in ['smeared','unsmeared']: table =", "the supernova script inside SNOwGLoBES, which outputs calculated event rates", "+ '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(t0, t1, nbin, d) +", "given (set of) detector(s). These event rates are given as", "at Earth for this timebin in neutrinos per cm^2'.format(t, dt))", "or a fluence file. This is done taking as input", "describing the energy-dependent detection efficiency. The output gives the number", "model_type, transformation_type, d, output_filename=None, tstart=None, tend=None): \"\"\"Generate fluence files in", "flavor_transformation_dict[transformation_type] model_dir, model_file = os.path.split(os.path.abspath(model_path)) snmodel = model_class(model_path) #set the", ">= tend[i]) else: starting_index = [next(j for j, t in", "* 0.2 * MeV / (4.*np.pi*(d*1000*3.086e+18)**2) s = '{:17.8E}'.format(E/(1e3 *", "individual input files. * **Running SNOwGLoBES:** This step convolves the", "tend = snmodel.get_time()[-1] try: if len(tstart/u.s) > 0: t0 =", "present) tfname = model_file_root + '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(t0,", "= t.reorder_levels(table.columns.names, axis=1) return t def do_plot(table, params): #plotting the", "flavor_transformation) osc_fluence = {} table = [] table.append('# TBinMid={:g}sec TBinWidth={:g}s", "in requested interval for j in range(starting_index[i]+1, ending_index[i], 1): temp_spectra", "mapp = {'nc':'NeutralCurrent', 'ibd':'Inverse Beta Decay', 'e':r'${\\nu}_x+e^-$'} def gen_label(m): flv,bar,Nuc,num,res", "Name of flavor transformation. See snewpy.flavor_transformation documentation for possible values.", "t[w][s] filename_base = f'{flux}_{det}_events_{s}_{w}' filename = tempdir/f'Collated_{filename_base}.dat' #save results to", "for debugging. remove_generated_files: bool Remove the output files from SNOwGLoBES,", "dtype=np.int64) ending_index = np.zeros(len(times), dtype=np.int64) for i in range(len(tstart)): starting_index[i]", "`SNOwGLoBES <https://github.com/SNOwGLoBES/snowglobes>`_ can estimate detected event rates from a given", "[] table.append('# TBinMid={:g}sec TBinWidth={:g}s EBinWidth=0.2MeV Fluence at Earth for this", "0.5*(model_times[i]+model_times[i-1]) model_tend[i-1] = model_tstart[i] model_tend[len(model_times)-1] = model_times[-1] if nbin >", "logging import os import re import tarfile from pathlib import", "'+' '.join(list(table.columns)) data = table.to_numpy().T index = table.index.to_numpy() data =", "as input the supernova simulation model. The first will evaluate", ": int or float Distance to supernova in kpc. output_filename", "for i, t in enumerate(times): osc_spectra = snmodel.get_transformed_spectra(t, energy, flavor_transformation)", "'+res return s if c in mapp: return mapp[c] else:", "output_filename=None, ntbins=30, deltat=None): \"\"\"Generate time series files in SNOwGLoBES format.", ": int Number of time slices. Will be ignored if", "range(starting_index[i]+1, ending_index[i], 1): temp_spectra = snmodel.get_transformed_spectra(model_times[j], energy, flavor_transformation) for flavor", "return os.path.join(model_dir, tfname) def simulate(SNOwGLoBESdir, tarball_path, detector_input=\"all\", verbose=False): \"\"\"Takes as", "in :py:mod:`snewpy.models`. transformation_type : str Name of flavor transformation. See", "version will subsample the times in a supernova model, produce", "s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s =", "tstart[i] tb = tend[i] t = times[i] dt = tb-ta", "a tarfile with the condensed data files and plots output_name", "each interaction channel. verbose : bool Whether to generate verbose", "model if tstart is None: tstart = snmodel.get_time()[0] tend =", "str Path of compressed .tar file with neutrino flux data.", "<https://github.com/SNOwGLoBES/snowglobes>`_ can estimate detected event rates from a given input", "tfname = output_filename+'.tar.bz2' else: model_file_root, _ = os.path.splitext(model_file) # strip", "interaction channels and time bins evaluated by SNOwGLoBES in a", "= (tmax - tmin) / (ntbins+1) tedges = np.arange(tmin/u.s, tmax/u.s,", "nbin = 1 times = 0.5*(tstart + tend) model_times =", "flux,t in tables[det].items(): t = aggregate_channels(t,nc='nc_',e='_e') for w in ['weighted','unweighted']:", "by SNOwGLoBES in a single file (for each detector and", "detectors supported by SNOwGLoBES. It takes into account the effective", "m.groups() if flv!='e': flv='\\\\'+flv if bar: bar='\\\\'+bar s = f'${bar}{{\\\\nu}}_{flv}$", "skip_plots: bool If False, it gives as output the plot", "use all detectors supported by SNOwGLoBES. skip_plots: bool If False,", "= f'{flux}_{det}_events_{s}_{w}' filename = tempdir/f'Collated_{filename_base}.dat' #save results to text files", "of them. \"\"\" import io import logging import os import", "u.erg energy = np.linspace(0, 100, 501) * MeV # Loop", "neutrino flux data. \"\"\" model_class = getattr(snewpy.models.ccsn, model_type) # Choose", "fluence files in SNOwGLoBES format. This version will subsample the", "det in detector_input: res=sng.run(flux_files, det) result[det]=dict(zip((f.stem for f in flux_files),res))", "output_filename+\"_\"+str(i)+extension else: filename = output_filename+extension else: model_file_root, _ = os.path.splitext(model_file)", "time bin; each table contains in the first column the", "'NeutrinoDecay_IMO': NeutrinoDecay(mh=MassHierarchy.INVERTED)} flavor_transformation = flavor_transformation_dict[transformation_type] model_dir, model_file = os.path.split(os.path.abspath(model_path)) snmodel", "f: f.write(table.to_string(float_format='%23.15g')) #format the results for the output header =", "The output gives the number of events detected as a", "'{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E_BAR]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR])", "len(model_times), 1): model_tstart[i] = 0.5*(model_times[i]+model_times[i-1]) model_tend[i-1] = model_tstart[i] model_tend[len(model_times)-1] =", "= np.linspace(0, 100, 501) * MeV # Loop over sampled", "dict Dictionary of data tables: One table per time bin;", "table[c].max() > 0.1: plt.plot(table[c],drawstyle='steps',label=get_channel_label(c), lw=1) plt.xlim(right=0.10) plt.ylim(bottom=0.10) plt.yscale('log') plt.legend(bbox_to_anchor=(0.5, 0.5,", "= times dt = tb-ta #first time bin of model", "output_name = Path(tarball_path).stem output_name = output_name[:output_name.rfind('.tar')]+'_SNOprocessed' output_path = Path(tarball_path).parent/(output_name+'.tar.gz') with", "for each interaction channel in the detector. \"\"\" def aggregate_channels(table,", "a separate column t_agg = t_sel.sum(axis='columns') #drop processed channels t.drop(t_sel.columns,", "str Path to directory where SNOwGLoBES is installed. tarball_path :", "\\ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(t0, t1, nbin, d, extension) info = tarfile.TarInfo(name=filename) info.size", "E in enumerate(energy): for flavor in Flavor: osc_fluence[flavor] = osc_spectra[flavor][j]", "1MeV # Loop over sampled times. for i, t in", "output_name = output_name[:output_name.rfind('.tar')]+'_SNOprocessed' output_path = Path(tarball_path).parent/(output_name+'.tar.gz') with tarfile.open(output_path, \"w:gz\") as", "energy + number flux table. for j, E in enumerate(energy):", "= np.zeros(len(times), dtype=np.int64) ending_index = np.zeros(len(times), dtype=np.int64) for i in", "detectors supported by SNOwGLoBES. skip_plots: bool If False, it gives", "SNOwGLoBES, which outputs calculated event rates expected for a given", "result is a compressed .tar file containing all individual input", "use all detectors supported by SNOwGLoBES. verbose : bool Whether", "ending_index[i] = next(j for j, t in enumerate(model_tend) if t", "given input supernova neutrino flux. It supports many different neutrino", "step, the latter will compute the integrated neutrino flux (fluence)", "str Input file containing neutrino flux information from supernova model.", "integrate over, or list of end times of the time", "re import tarfile from pathlib import Path from tempfile import", "the energy bins, in the remaining columns the number of", "bbox_inches='tight') #Make a tarfile with the condensed data files and", "to integrate over, or list of start times of the", ".tar file containing all individual input files. * **Running SNOwGLoBES:**", "information on parameters output = '\\n'.join(map(str, transformation_type)).encode('ascii') tf.addfile(tarfile.TarInfo(name='parameterinfo'), io.BytesIO(output)) MeV", "Name of detector. If ``\"all\"``, will use all detectors supported", "Will be ignored if ``deltat`` is also given. deltat :", "'{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) table.append(s) logging.debug(s) # Encode energy/flux table and output", "= tstart[0] t1 = tend[-1] nbin = len(tstart/u.s) except: t0", "time slices. Will be ignored if ``deltat`` is also given.", "corresponding class in :py:mod:`snewpy.models`. transformation_type : str Name of flavor", "of the energy distribution for each time bin and for", "'+f'${{}}^{{{num}}}{Nuc}$ '+res return s if c in mapp: return mapp[c]", "bin). The output tables allow to build the detected neutrino", "complete graph smear_title = 'Interaction' if smeared=='unsmeared' else 'Detected' plt.title(f'{flux}", "elif isinstance(detector_input,str): detector_input = [detector_input] result = {} #Extracts data", "directory where SNOwGLoBES is installed. tarball_path : str Path of", "file. If ``None``, will be based on input file name.", "import logging import os import re import tarfile from pathlib", "= tb-ta #first time bin of model in requested interval", "files and configures and runs the supernova script inside SNOwGLoBES,", "deltat is not None: dt = deltat ntbins = int((tmax-tmin)/dt)", "SNOwGLoBES from SNEWPY: * **Generating input files for SNOwGLoBES:** There", "convolves the fluence generated in the previous step with the", "t = aggregate_channels(t,nc='nc_',e='_e') for w in ['weighted','unweighted']: for s in", "integrated neutrino flux (fluence) in the time bin. The result", "the fluence generated in the previous step with the cross-sections", "levels order t = t.unstack(levels) t = t.reorder_levels(table.columns.names, axis=1) return", "re_chan_label.sub(gen_label, c) def collate(SNOwGLoBESdir, tarball_path, detector_input=\"all\", skip_plots=False, verbose=False, remove_generated_files=True): \"\"\"Collates", "tmax/u.s, ntbins, d, extension) info = tarfile.TarInfo(name=filename) info.size = len(output)", "in ['smeared','unsmeared']: table = t[w][s] filename_base = f'{flux}_{det}_events_{s}_{w}' filename =", "import os import re import tarfile from pathlib import Path", "the energy-dependent detection efficiency. The output gives the number of", "params): #plotting the events from given table flux,det,weighted,smeared = params", "= os.path.splitext(model_file) # strip extension (if present) tfname = model_file_root", "0.5*(tstart + tend) model_times = snmodel.get_time() model_tstart = model_times*1.0 model_tend", ": str Path to directory where SNOwGLoBES is installed. tarball_path", "of model in requested interval osc_spectra = snmodel.get_transformed_spectra(model_times[starting_index[i]], energy, flavor_transformation)", "* MeV / (4.*np.pi*(d*1000*3.086e+18)**2) s = '{:17.8E}'.format(E/(1e3 * MeV)) s", "installed. tarball_path : str Path of compressed .tar file produced", "text files with open(filename,'w') as f: f.write(table.to_string(float_format='%23.15g')) #format the results", "tstart[i]) ending_index[i] = next(j for j, t in enumerate(model_tend) if", "to build the detected neutrino energy spectrum and neutrino time", "for i in range(len(tstart)): starting_index[i] = next(j for j, t", "of the corresponding class in :py:mod:`snewpy.models`. transformation_type : str Name", "and generates plots or returns a data table. Parameters ----------", "aggregate_channels(table, **patterns): #rearrange the table to have only channel column", "compatibiity results = {} #save collated files: with TemporaryDirectory(prefix='snowglobes') as", "t = times dt = tb-ta #first time bin of", "with tarfile.open(tarball_path) as tar: tar.extractall(tempdir) flux_files = list(Path(tempdir).glob('*.dat')) if len(detector_input)>0:", "for s in ['smeared','unsmeared']: table = t[w][s] filename_base = f'{flux}_{det}_events_{s}_{w}'", "flux files and configures and runs the supernova script inside", "(if present) filename = model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type +", "If ``None``, will be based on input file name. ntbins", "(ntbins+1) tedges = np.arange(tmin/u.s, tmax/u.s, dt/u.s)*u.s times = 0.5*(tedges[1:] +", "Flavor: osc_fluence[flavor] = osc_spectra[flavor][j] * dt * 0.2 * MeV", "based on input file name. tstart : astropy.Quantity or None", "for name,pattern in patterns.items(): #get channels which contain `like` t_sel", "1: filename = output_filename+\"_\"+str(i)+extension else: filename = output_filename+extension else: model_file_root,", "bar: bar='\\\\'+bar s = f'${bar}{{\\\\nu}}_{flv}$ '+f'${{}}^{{{num}}}{Nuc}$ '+res return s if", "them and save to a separate column t_agg = t_sel.sum(axis='columns')", "strip extension (if present) tfname = model_file_root + '.' +", "aNuMu aNuTau') # Generate energy + number flux table. for", "'ThreeFlavorDecoherence': ThreeFlavorDecoherence(), 'NeutrinoDecay_NMO': NeutrinoDecay(mh=MassHierarchy.NORMAL), 'NeutrinoDecay_IMO': NeutrinoDecay(mh=MassHierarchy.INVERTED)} flavor_transformation = flavor_transformation_dict[transformation_type] model_dir,", "times. for i, t in enumerate(times): osc_spectra = snmodel.get_transformed_spectra(t, energy,", "table per time bin; each table contains in the first", "supported by SNOwGLoBES. skip_plots: bool If False, it gives as", "+ transformation_type + \\ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(tmin/u.s, tmax/u.s, ntbins, d, extension) info", "on parameters output = '\\n'.join(map(str, transformation_type)).encode('ascii') tf.addfile(tarfile.TarInfo(name='parameterinfo'), io.BytesIO(output)) MeV =", "model_tend = model_times*1.0 model_tstart[0] = model_times[0] for i in range(1,", "e.g. for debugging. \"\"\" sng = SNOwGLoBES(SNOwGLoBESdir) if detector_input ==", "_ = os.path.splitext(model_file) filename = model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type", "501) * MeV # 1MeV # Loop over sampled times.", "= deltat ntbins = int((tmax-tmin)/dt) else: dt = (tmax -", "``snewpy.snowglobes`` module contains functions for interacting with SNOwGLoBES. `SNOwGLoBES <https://github.com/SNOwGLoBES/snowglobes>`_", "\\ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(tmin/u.s, tmax/u.s, ntbins, d, extension) info = tarfile.TarInfo(name=filename) info.size", "# 1MeV # Loop over sampled times. for i, t", "in time. * **Collating SNOwGLoBES outputs:** This step puts together", "result to file for re-use in collate() cache_file = tarball_path[:tarball_path.rfind('.tar')]", "to do this, either generate a time series or a", "# -*- coding: utf-8 -*- \"\"\"The ``snewpy.snowglobes`` module contains functions", "in mapp: return mapp[c] else: return re_chan_label.sub(gen_label, c) def collate(SNOwGLoBESdir,", "float Distance to supernova in kpc. output_filename : str or", "t = table.stack(levels) for name,pattern in patterns.items(): #get channels which", "tend nbin = 1 times = 0.5*(tstart + tend) model_times", "the model if tstart is None: tstart = snmodel.get_time()[0] tend", "Flavor: osc_spectra[flavor] += temp_spectra[flavor]*(model_tend[j]-model_tstart[j]) #last time bin of model in", "tedges = np.arange(tmin/u.s, tmax/u.s, dt/u.s)*u.s times = 0.5*(tedges[1:] + tedges[:-1])", "nbin, d, extension) info = tarfile.TarInfo(name=filename) info.size = len(output) tf.addfile(info,", "different neutrino detectors, detector materials and interaction channels. There are", "flux at each time step, the latter will compute the", "TemporaryDirectory(prefix='snowglobes') as tempdir: tempdir = Path(tempdir) for det in tables:", "= model_file_root + '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(tmin, tmax, ntbins,", "t = times[i] dt = tb-ta else: ta = tstart", "in tar archive. output = '\\n'.join(table).encode('ascii') extension = \".dat\" if", "the transformation name with its class. flavor_transformation_dict = {'NoTransformation': NoTransformation(),", "t.reorder_levels(table.columns.names, axis=1) return t def do_plot(table, params): #plotting the events", "= np.arange(tmin/u.s, tmax/u.s, dt/u.s)*u.s times = 0.5*(tedges[1:] + tedges[:-1]) #", "tb-ta else: ta = tstart tb = tend t =", "s = '{:17.8E}'.format(E/(1e3 * MeV)) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E]) s", "= '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s = '{}{:17.8E}'.format(s,", "t0 = tstart t1 = tend nbin = 1 times", "starting_index = np.zeros(len(times), dtype=np.int64) ending_index = np.zeros(len(times), dtype=np.int64) for i", "= model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type + \\ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(t0, t1,", "These event rates are given as a function of the", "requested interval for j in range(starting_index[i]+1, ending_index[i], 1): temp_spectra =", "column levels = list(table.columns.names) levels.remove('channel') t = table.stack(levels) for name,pattern", "None Start of time interval to integrate over, or list", "'NeutrinoDecay_NMO': NeutrinoDecay(mh=MassHierarchy.NORMAL), 'NeutrinoDecay_IMO': NeutrinoDecay(mh=MassHierarchy.INVERTED)} flavor_transformation = flavor_transformation_dict[transformation_type] model_dir, model_file =", "/ (ntbins+1) tedges = np.arange(tmin/u.s, tmax/u.s, dt/u.s)*u.s times = 0.5*(tedges[1:]", "bins. Returns ------- str Path of compressed .tar file with", "dtype=np.int64) for i in range(len(tstart)): starting_index[i] = next(j for j,", "\"\"\" model_class = getattr(snewpy.models.ccsn, model_type) # Choose flavor transformation. Use", "list(sng.detectors) detector_input.remove('d2O') elif isinstance(detector_input,str): detector_input = [detector_input] result = {}", "smeared=='smeared': plt.xlabel('Detected Energy (GeV)') plt.ylabel('Events') else: plt.xlabel('Neutrino Energy (GeV)') plt.ylabel('Interaction", "integrated in a given time window (or time bin), or", "= {} for flux,t in tables[det].items(): t = aggregate_channels(t,nc='nc_',e='_e') for", "with the cross-sections for the interaction channels happening in various", "TemporaryDirectory(prefix='snowglobes') as tempdir: with tarfile.open(tarball_path) as tar: tar.extractall(tempdir) flux_files =", "each time bin and for each interaction channel. verbose :", "formats complete graph smear_title = 'Interaction' if smeared=='unsmeared' else 'Detected'", "Events') #read the results from storage cache_file = tarball_path[:tarball_path.rfind('.tar')] +", "t def do_plot(table, params): #plotting the events from given table", "save to a separate column t_agg = t_sel.sum(axis='columns') #drop processed", "`like` t_sel = t.filter(like=pattern) #sum over them and save to", "next(j for j, t in enumerate(model_tend) if t >= tend[i])", "contains in the first column the energy bins, in the", "in Flavor: osc_spectra[flavor] += temp_spectra[flavor]*(tb-model_tstart[ending_index[i]]) for flavor in Flavor: osc_spectra[flavor]", "tables[det].items(): t = aggregate_channels(t,nc='nc_',e='_e') for w in ['weighted','unweighted']: for s", "are None: full time window of the model if tstart", "MeV / (4.*np.pi*(d*1000*3.086e+18)**2) s = '{:17.8E}'.format(E/(1e3 * MeV)) s =", "= model_times*1.0 model_tstart[0] = model_times[0] for i in range(1, len(model_times),", "+ transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(t0, t1, nbin, d) + 'kpc.tar.bz2' with", "table flux,det,weighted,smeared = params for c in table.columns: if table[c].max()", "0.5*(tedges[1:] + tedges[:-1]) # Generate output. if output_filename is not", "'\\n'.join(table).encode('ascii') extension = \".dat\" if output_filename is not None: if", "latter will compute the integrated neutrino flux (fluence) in the", "the number of events for each interaction channel in the", "output_filename=None, tstart=None, tend=None): \"\"\"Generate fluence files in SNOwGLoBES format. This", "= tend nbin = 1 times = 0.5*(tstart + tend)", "are two ways to do this, either generate a time", "will be based on input file name. tstart : astropy.Quantity", "build the detected neutrino energy spectrum and neutrino time distribution,", "+ '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(tmin, tmax, ntbins, d) +", "None: tfname = output_filename+'.tar.bz2' else: model_file_root, _ = os.path.splitext(model_file) #", "and time, for each interaction channel. Parameters ---------- SNOwGLoBESdir :", "is done taking as input the supernova simulation model. The", "snmodel.get_time()[-1] try: if len(tstart/u.s) > 0: t0 = tstart[0] t1", "osc_spectra[flavor] += temp_spectra[flavor]*(tb-model_tstart[ending_index[i]]) for flavor in Flavor: osc_spectra[flavor] /= (tb-ta)", "= '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) table.append(s) logging.debug(s) #", "or None Start of time interval to integrate over, or", "tqdm import snewpy.models from snewpy.flavor_transformation import * from snewpy.neutrino import", "files in SNOwGLoBES format. This version will subsample the times", "or None End of time interval to integrate over, or", "result = {} #Extracts data from tarfile and sets up", "= table.index.to_numpy() data = np.concatenate([[index],data]) results[filename.name] = {'header':header,'data':data} #optionally plot", "time window (or time bin), or in a snapshot in", "= len(output) tf.addfile(info, io.BytesIO(output)) return os.path.join(model_dir, tfname) def generate_fluence(model_path, model_type,", "MeV # Loop over sampled times. for i in range(nbin):", "snewpy.neutrino import Flavor, MassHierarchy from snewpy.snowglobes_interface import SNOwGLoBES logger =", "flavor_transformation) for flavor in Flavor: osc_spectra[flavor] += temp_spectra[flavor]*(model_tend[j]-model_tstart[j]) #last time", "series files in SNOwGLoBES format. This version will subsample the", "os.path.splitext(model_file) # strip extension (if present) filename = model_file_root +", "in requested interval temp_spectra = snmodel.get_transformed_spectra( model_times[ending_index[i]], energy, flavor_transformation) for", "results if skip_plots is False: plt.figure(dpi=300) do_plot(table,(flux,det,w,s)) filename = tempdir/f'{filename_base}_log_plot.png'", "'.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(t0, t1, nbin, d) + 'kpc.tar.bz2'", "units as u from tqdm.auto import tqdm import snewpy.models from", "params for c in table.columns: if table[c].max() > 0.1: plt.plot(table[c],drawstyle='steps',label=get_channel_label(c),", "is similar to what produced by: #tables = simulate(SNOwGLoBESdir, tarball_path,detector_input)", "Format of input file. Matches the name of the corresponding", "in enumerate(model_tend) if t > tstart)] ending_index = [next(j for", "MeV = 1.60218e-6 * u.erg energy = np.linspace(0, 100, 501)", "extension (if present) tfname = model_file_root + '.' + transformation_type", "flv!='e': flv='\\\\'+flv if bar: bar='\\\\'+bar s = f'${bar}{{\\\\nu}}_{flv}$ '+f'${{}}^{{{num}}}{Nuc}$ '+res", "supports many different neutrino detectors, detector materials and interaction channels.", "plots or returns a data table. Parameters ---------- SNOwGLoBESdir :", "#drop processed channels t.drop(t_sel.columns, axis='columns',inplace=True) t[name]=t_agg #fill the column #return", "\"\"\"The ``snewpy.snowglobes`` module contains functions for interacting with SNOwGLoBES. `SNOwGLoBES", "np.load(cache_file, allow_pickle=True).tolist() #This output is similar to what produced by:", "either generate a time series or a fluence file. This", "first column the energy bins, in the remaining columns the", "None: full time window of the model if tstart is", "channel or the sum of them. \"\"\" import io import", "each table contains in the first column the energy bins,", "transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(t0, t1, nbin, d) + 'kpc.tar.bz2' with tarfile.open(os.path.join(model_dir,", "generates plots or returns a data table. Parameters ---------- SNOwGLoBESdir", "import snewpy.models from snewpy.flavor_transformation import * from snewpy.neutrino import Flavor,", "the corresponding class in :py:mod:`snewpy.models`. transformation_type : str Name of", "with its class. flavor_transformation_dict = {'NoTransformation': NoTransformation(), 'AdiabaticMSW_NMO': AdiabaticMSW(mh=MassHierarchy.NORMAL), 'AdiabaticMSW_IMO':", "bool Whether to generate verbose output, e.g. for debugging. \"\"\"", "model_dir, model_file = os.path.split(os.path.abspath(model_path)) snmodel = model_class(model_path) #set the timings", "requested interval osc_spectra = snmodel.get_transformed_spectra(model_times[starting_index[i]], energy, flavor_transformation) if dt <", "and interaction channels. There are three basic steps to using", "puts together all the interaction channels and time bins evaluated", "tend : astropy.Quantity or None End of time interval to", "def gen_label(m): flv,bar,Nuc,num,res = m.groups() if flv!='e': flv='\\\\'+flv if bar:", "+ '.npy' logging.info(f'Reading tables from {cache_file}') tables = np.load(cache_file, allow_pickle=True).tolist()", "and for each time bin). The output tables allow to", "in Flavor: osc_spectra[flavor] *= (model_tend[starting_index[i]]-ta) #intermediate time bins of model", "flavor_transformation) for flavor in Flavor: osc_spectra[flavor] += temp_spectra[flavor]*(tb-model_tstart[ending_index[i]]) for flavor", "tarball_path : str Path of compressed .tar file produced e.g.", "enumerate(model_tend) if t >= tend[i]) else: starting_index = [next(j for", "= [] table.append('# TBinMid={:g}sec TBinWidth={:g}s EBinWidth=0.2MeV Fluence at Earth for", "+ '.npy' logging.info(f'Saving simulation results to {cache_file}') np.save(cache_file, result) return", "evaluate the neutrino flux at each time step, the latter", "for each reaction channel or the sum of them. \"\"\"", "in enumerate(times): osc_spectra = snmodel.get_transformed_spectra(t, energy, flavor_transformation) osc_fluence = {}", "= model_times[0] for i in range(1, len(model_times), 1): model_tstart[i] =", "``generate_time_series()`` or ``generate_fluence()``. detector_input : str Name of detector. If", "as tempdir: with tarfile.open(tarball_path) as tar: tar.extractall(tempdir) flux_files = list(Path(tempdir).glob('*.dat'))", "the supernova simulation model. The first will evaluate the neutrino", "tmin) / (ntbins+1) tedges = np.arange(tmin/u.s, tmax/u.s, dt/u.s)*u.s times =", "mass of the detector as well as a smearing matrix", "the output files from SNOwGLoBES, collated files, and .png's made", "nbin > 1: filename = output_filename+\"_\"+str(i)+extension else: filename = output_filename+extension", "tend=None): \"\"\"Generate fluence files in SNOwGLoBES format. This version will", "in the detector. \"\"\" def aggregate_channels(table, **patterns): #rearrange the table", "to 30 time slices. tmin = snmodel.get_time()[0] tmax = snmodel.get_time()[-1]", "_ = os.path.splitext(model_file) # strip extension (if present) tfname =", "t in enumerate(times): osc_spectra = snmodel.get_transformed_spectra(t, energy, flavor_transformation) osc_fluence =", "into account the effective mass of the detector as well", "smeared=='unsmeared' else 'Detected' plt.title(f'{flux} {det.capitalize()} {weighted.capitalize()} {smear_title} Events') if smeared=='smeared':", "flavor_transformation) if dt < model_tend[starting_index[i]]-ta: dt = dt else: for", "neutrino flux information from supernova model. model_type : str Format", "{cache_file}') np.save(cache_file, result) return result re_chan_label = re.compile(r'nu(e|mu|tau)(bar|)_([A-Z][a-z]*)(\\d*)_?(.*)') def get_channel_label(c):", "list(Path(tempdir).glob('*.dat')) if len(detector_input)>0: detector_input = tqdm(detector_input, desc='Detectors', leave=False) for det", "w in ['weighted','unweighted']: for s in ['smeared','unsmeared']: table = t[w][s]", "If ``None``, will be based on input file name. tstart", ".tar file with neutrino flux data. \"\"\" model_class = getattr(snewpy.models.ccsn,", "time series or a fluence file. This is done taking", "{'nc':'NeutralCurrent', 'ibd':'Inverse Beta Decay', 'e':r'${\\nu}_x+e^-$'} def gen_label(m): flv,bar,Nuc,num,res = m.groups()", "EBinWidth=0.2MeV Fluence at Earth for this timebin in neutrinos per", "= tarfile.TarInfo(name=filename) info.size = len(output) tf.addfile(info, io.BytesIO(output)) return os.path.join(model_dir, tfname)", "'ibd':'Inverse Beta Decay', 'e':r'${\\nu}_x+e^-$'} def gen_label(m): flv,bar,Nuc,num,res = m.groups() if", "not None: tfname = output_filename + 'kpc.tar.bz2' else: model_file_root, _", "from tempfile import TemporaryDirectory import matplotlib as mpl import matplotlib.pyplot", "each interaction channel in the detector. \"\"\" def aggregate_channels(table, **patterns):", "= tstart[i] tb = tend[i] t = times[i] dt =", "plt.savefig(filename.with_suffix('.png'), dpi=300, bbox_inches='tight') #Make a tarfile with the condensed data", "This step convolves the fluence generated in the previous step", "ways to do this, either generate a time series or", "detector_input = list(sng.detectors) detector_input.remove('d2O') elif isinstance(detector_input,str): detector_input = [detector_input] result", "t[name]=t_agg #fill the column #return table with the original levels", "time interval to integrate over, or list of start times", "np from astropy import units as u from tqdm.auto import", "MassHierarchy from snewpy.snowglobes_interface import SNOwGLoBES logger = logging.getLogger(__name__) def generate_time_series(model_path,", "the latter will compute the integrated neutrino flux (fluence) in", "``\"all\"``, will use all detectors supported by SNOwGLoBES. skip_plots: bool", "data = np.concatenate([[index],data]) results[filename.name] = {'header':header,'data':data} #optionally plot the results", "tables: One table per time bin; each table contains in", "tar archive that gives information on parameters output = '\\n'.join(map(str,", "``None``, will be based on input file name. tstart :", "dt else: for flavor in Flavor: osc_spectra[flavor] *= (model_tend[starting_index[i]]-ta) #intermediate", "collated files, and .png's made for this snewpy run. Returns", "transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(tmin, tmax, ntbins, d) + 'kpc.tar.bz2' with tarfile.open(os.path.join(model_dir,", "is a compressed .tar file containing all individual input files.", "t1 = tend[-1] nbin = len(tstart/u.s) except: t0 = tstart", "well as a smearing matrix describing the energy-dependent detection efficiency.", "= t[w][s] filename_base = f'{flux}_{det}_events_{s}_{w}' filename = tempdir/f'Collated_{filename_base}.dat' #save results", "collate() cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy' logging.info(f'Saving simulation results to", "happening in various detectors supported by SNOwGLoBES. It takes into", "if smeared=='smeared': plt.xlabel('Detected Energy (GeV)') plt.ylabel('Events') else: plt.xlabel('Neutrino Energy (GeV)')", "= 1.60218e-6 * u.erg energy = np.linspace(0, 100, 501) *", "if bar: bar='\\\\'+bar s = f'${bar}{{\\\\nu}}_{flv}$ '+f'${{}}^{{{num}}}{Nuc}$ '+res return s", "channels happening in various detectors supported by SNOwGLoBES. It takes", "energy distribution for each time bin and for each interaction", "tend)] # Generate output. if output_filename is not None: tfname", "model in requested interval osc_spectra = snmodel.get_transformed_spectra(model_times[starting_index[i]], energy, flavor_transformation) if", "or ``generate_fluence()``. detector_input : str Name of detector. If ``\"all\"``,", "import numpy as np from astropy import units as u", "files and generates plots or returns a data table. Parameters", "for each interaction channel, integrated in a given time window", "outputs:** This step puts together all the interaction channels and", "neutrino flux. It supports many different neutrino detectors, detector materials", "re.compile(r'nu(e|mu|tau)(bar|)_([A-Z][a-z]*)(\\d*)_?(.*)') def get_channel_label(c): mapp = {'nc':'NeutralCurrent', 'ibd':'Inverse Beta Decay', 'e':r'${\\nu}_x+e^-$'}", "j, t in enumerate(model_tend) if t >= tend)] # Generate", "output tables allow to build the detected neutrino energy spectrum", "interval to integrate over, or list of start times of", "flux,det,weighted,smeared = params for c in table.columns: if table[c].max() >", "t >= tend[i]) else: starting_index = [next(j for j, t", "io.BytesIO(output)) return os.path.join(model_dir, tfname) def generate_fluence(model_path, model_type, transformation_type, d, output_filename=None,", "time window of the model if tstart is None: tstart", "the model time. Default to 30 time slices. tmin =", "also given. deltat : astropy.Quantity or None Length of time", "over sampled times. for i, t in enumerate(times): osc_spectra =", "for flavor in Flavor: osc_spectra[flavor] += temp_spectra[flavor]*(tb-model_tstart[ending_index[i]]) for flavor in", "for possible values. d : int or float Distance to", "transformation_type, d, output_filename=None, tstart=None, tend=None): \"\"\"Generate fluence files in SNOwGLoBES", "reaction channel or the sum of them. \"\"\" import io", "if c in mapp: return mapp[c] else: return re_chan_label.sub(gen_label, c)", "a snapshot in time. * **Collating SNOwGLoBES outputs:** This step", "s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) table.append(s) logging.debug(s)", "= os.path.splitext(model_file) # strip extension (if present) filename = model_file_root", "simulate(SNOwGLoBESdir, tarball_path,detector_input) #dict for old-style results, for backward compatibiity results", "containing all individual input files. * **Running SNOwGLoBES:** This step", "distribution, for each reaction channel or the sum of them.", ": str or None Name of output file. If ``None``,", "t in enumerate(model_tend) if t > tstart[i]) ending_index[i] = next(j", ".png's made for this snewpy run. Returns ------- dict Dictionary", "tar: for file in tempdir.iterdir(): tar.add(file,arcname=output_name+'/'+file.name) logging.info(f'Created archive: {output_path}') return", "output_filename is not None: tfname = output_filename + 'kpc.tar.bz2' else:", "file name. ntbins : int Number of time slices. Will", "else: for flavor in Flavor: osc_spectra[flavor] *= (model_tend[starting_index[i]]-ta) #intermediate time", "NonAdiabaticMSWH(mh=MassHierarchy.NORMAL), 'NonAdiabaticMSWH_IMO': NonAdiabaticMSWH(mh=MassHierarchy.INVERTED), 'TwoFlavorDecoherence': TwoFlavorDecoherence(), 'ThreeFlavorDecoherence': ThreeFlavorDecoherence(), 'NeutrinoDecay_NMO': NeutrinoDecay(mh=MassHierarchy.NORMAL), 'NeutrinoDecay_IMO':", "if output_filename is not None: tfname = output_filename+'.tar.bz2' else: model_file_root,", "produced e.g. by ``generate_time_series()`` or ``generate_fluence()``. detector_input : str Name", "of compressed .tar file produced e.g. by ``generate_time_series()`` or ``generate_fluence()``.", "channels and time bins evaluated by SNOwGLoBES in a single", "= list(Path(tempdir).glob('*.dat')) if len(detector_input)>0: detector_input = tqdm(detector_input, desc='Detectors', leave=False) for", "bin of model in requested interval osc_spectra = snmodel.get_transformed_spectra(model_times[starting_index[i]], energy,", "remove_generated_files=True): \"\"\"Collates SNOwGLoBES output files and generates plots or returns", "in flux_files),res)) # save result to file for re-use in", "#creates file in tar archive that gives information on parameters", "in enumerate(model_tend) if t > tstart[i]) ending_index[i] = next(j for", "information from supernova model. model_type : str Format of input", "extension = \".dat\" if output_filename is not None: if nbin", "the detector as well as a smearing matrix describing the", "class in :py:mod:`snewpy.models`. transformation_type : str Name of flavor transformation.", "'Detected' plt.title(f'{flux} {det.capitalize()} {weighted.capitalize()} {smear_title} Events') if smeared=='smeared': plt.xlabel('Detected Energy", "= model_times*1.0 model_tend = model_times*1.0 model_tstart[0] = model_times[0] for i", "False: plt.figure(dpi=300) do_plot(table,(flux,det,w,s)) filename = tempdir/f'{filename_base}_log_plot.png' plt.savefig(filename.with_suffix('.png'), dpi=300, bbox_inches='tight') #Make", "to integrate over, or list of end times of the", ">= tend)] # Generate output. if output_filename is not None:", "in a single file (for each detector and for each", "#optionally plot the results if skip_plots is False: plt.figure(dpi=300) do_plot(table,(flux,det,w,s))", "'TwoFlavorDecoherence': TwoFlavorDecoherence(), 'ThreeFlavorDecoherence': ThreeFlavorDecoherence(), 'NeutrinoDecay_NMO': NeutrinoDecay(mh=MassHierarchy.NORMAL), 'NeutrinoDecay_IMO': NeutrinoDecay(mh=MassHierarchy.INVERTED)} flavor_transformation =", "output = '\\n'.join(table).encode('ascii') extension = \".dat\" model_file_root, _ = os.path.splitext(model_file)", "interacting with SNOwGLoBES. `SNOwGLoBES <https://github.com/SNOwGLoBES/snowglobes>`_ can estimate detected event rates", "time slices. Returns ------- str Path of compressed .tar file", "Matches the name of the corresponding class in :py:mod:`snewpy.models`. transformation_type", "or None Name of output file. If ``None``, will be", "detection efficiency. The output gives the number of events detected", "by SNOwGLoBES. skip_plots: bool If False, it gives as output", "= 'Interaction' if smeared=='unsmeared' else 'Detected' plt.title(f'{flux} {det.capitalize()} {weighted.capitalize()} {smear_title}", "{} for flux,t in tables[det].items(): t = aggregate_channels(t,nc='nc_',e='_e') for w", "'w:bz2') as tf: #creates file in tar archive that gives", "SNOwGLoBES is installed. tarball_path : str Path of compressed .tar", "bin), or in a snapshot in time. * **Collating SNOwGLoBES", "header = 'Energy '+' '.join(list(table.columns)) data = table.to_numpy().T index =", "import io import logging import os import re import tarfile", "for c in table.columns: if table[c].max() > 0.1: plt.plot(table[c],drawstyle='steps',label=get_channel_label(c), lw=1)", "flavor in Flavor: osc_spectra[flavor] *= (model_tend[starting_index[i]]-ta) #intermediate time bins of", "tarball_path,detector_input) #dict for old-style results, for backward compatibiity results =", "to associate the transformation name with its class. flavor_transformation_dict =", "file with neutrino flux data. \"\"\" model_class = getattr(snewpy.models.ccsn, model_type)", "return os.path.join(model_dir, tfname) def generate_fluence(model_path, model_type, transformation_type, d, output_filename=None, tstart=None,", "times = 0.5*(tedges[1:] + tedges[:-1]) # Generate output. if output_filename", "= tend t = times dt = tb-ta #first time", ": astropy.Quantity or None End of time interval to integrate", "each interaction channel. Parameters ---------- SNOwGLoBESdir : str Path to", "filename = tempdir/f'Collated_{filename_base}.dat' #save results to text files with open(filename,'w')", "1 times = 0.5*(tstart + tend) model_times = snmodel.get_time() model_tstart", "table.append(s) logging.debug(s) # Encode energy/flux table and output to file", "sum of them. \"\"\" import io import logging import os", "if t >= tend)] # Generate output. if output_filename is", "= 0.5*(tedges[1:] + tedges[:-1]) # Generate output. if output_filename is", "results, for backward compatibiity results = {} #save collated files:", "i in range(1, len(model_times), 1): model_tstart[i] = 0.5*(model_times[i]+model_times[i-1]) model_tend[i-1] =", "in tar archive. output = '\\n'.join(table).encode('ascii') extension = \".dat\" model_file_root,", "= tstart tb = tend t = times dt =", "def get_channel_label(c): mapp = {'nc':'NeutralCurrent', 'ibd':'Inverse Beta Decay', 'e':r'${\\nu}_x+e^-$'} def", "osc_spectra = snmodel.get_transformed_spectra(model_times[starting_index[i]], energy, flavor_transformation) if dt < model_tend[starting_index[i]]-ta: dt", "deltat=None): \"\"\"Generate time series files in SNOwGLoBES format. This version", "model_tstart[i] = 0.5*(model_times[i]+model_times[i-1]) model_tend[i-1] = model_tstart[i] model_tend[len(model_times)-1] = model_times[-1] if", "'.npy' logging.info(f'Reading tables from {cache_file}') tables = np.load(cache_file, allow_pickle=True).tolist() #This", "'NonAdiabaticMSWH_IMO': NonAdiabaticMSWH(mh=MassHierarchy.INVERTED), 'TwoFlavorDecoherence': TwoFlavorDecoherence(), 'ThreeFlavorDecoherence': ThreeFlavorDecoherence(), 'NeutrinoDecay_NMO': NeutrinoDecay(mh=MassHierarchy.NORMAL), 'NeutrinoDecay_IMO': NeutrinoDecay(mh=MassHierarchy.INVERTED)}", "the cross-sections for the interaction channels happening in various detectors", "together all the interaction channels and time bins evaluated by", "This version will subsample the times in a supernova model,", "of model in requested interval for j in range(starting_index[i]+1, ending_index[i],", "detector_input = tqdm(detector_input, desc='Detectors', leave=False) for det in detector_input: res=sng.run(flux_files,", "tstart : astropy.Quantity or None Start of time interval to", "temp_spectra = snmodel.get_transformed_spectra(model_times[j], energy, flavor_transformation) for flavor in Flavor: osc_spectra[flavor]", "times = 0.5*(tstart + tend) model_times = snmodel.get_time() model_tstart =", "None: dt = deltat ntbins = int((tmax-tmin)/dt) else: dt =", "fluence file. This is done taking as input the supernova", "= '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E_BAR]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) s = '{}{:17.8E}'.format(s,", "is installed. tarball_path : str Path of compressed .tar file", "get_channel_label(c): mapp = {'nc':'NeutralCurrent', 'ibd':'Inverse Beta Decay', 'e':r'${\\nu}_x+e^-$'} def gen_label(m):", "#This output is similar to what produced by: #tables =", "Path from tempfile import TemporaryDirectory import matplotlib as mpl import", "channel. Parameters ---------- SNOwGLoBESdir : str Path to directory where", "{} #save collated files: with TemporaryDirectory(prefix='snowglobes') as tempdir: tempdir =", "energy, flavor_transformation) for flavor in Flavor: osc_spectra[flavor] += temp_spectra[flavor]*(model_tend[j]-model_tstart[j]) #last", "if dt < model_tend[starting_index[i]]-ta: dt = dt else: for flavor", "str Path of compressed .tar file produced e.g. by ``generate_time_series()``", "list of start times of the time series bins. tend", "= t.unstack(levels) t = t.reorder_levels(table.columns.names, axis=1) return t def do_plot(table,", "time bin. The result is a compressed .tar file containing", ": astropy.Quantity or None Start of time interval to integrate", "+ 'kpc.tar.bz2' with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2') as tf: #creates file", "list(table.columns.names) levels.remove('channel') t = table.stack(levels) for name,pattern in patterns.items(): #get", "axis=1) return t def do_plot(table, params): #plotting the events from", "= int((tmax-tmin)/dt) else: dt = (tmax - tmin) / (ntbins+1)", "tarball_path, detector_input=\"all\", verbose=False): \"\"\"Takes as input the neutrino flux files", "its class. flavor_transformation_dict = {'NoTransformation': NoTransformation(), 'AdiabaticMSW_NMO': AdiabaticMSW(mh=MassHierarchy.NORMAL), 'AdiabaticMSW_IMO': AdiabaticMSW(mh=MassHierarchy.INVERTED),", "fluxfilenames for later use with TemporaryDirectory(prefix='snowglobes') as tempdir: with tarfile.open(tarball_path)", "model_type, transformation_type, d, output_filename=None, ntbins=30, deltat=None): \"\"\"Generate time series files", "bins of model in requested interval for j in range(starting_index[i]+1,", "coding: utf-8 -*- \"\"\"The ``snewpy.snowglobes`` module contains functions for interacting", "['smeared','unsmeared']: table = t[w][s] filename_base = f'{flux}_{det}_events_{s}_{w}' filename = tempdir/f'Collated_{filename_base}.dat'", "Choose flavor transformation. Use dict to associate the transformation name", "dt < model_tend[starting_index[i]]-ta: dt = dt else: for flavor in", "given table flux,det,weighted,smeared = params for c in table.columns: if", "using SNOwGLoBES from SNEWPY: * **Generating input files for SNOwGLoBES:**", "dt = dt else: for flavor in Flavor: osc_spectra[flavor] *=", "import Flavor, MassHierarchy from snewpy.snowglobes_interface import SNOwGLoBES logger = logging.getLogger(__name__)", "501) * MeV # Loop over sampled times. for i", "t = t.reorder_levels(table.columns.names, axis=1) return t def do_plot(table, params): #plotting", "smear_title = 'Interaction' if smeared=='unsmeared' else 'Detected' plt.title(f'{flux} {det.capitalize()} {weighted.capitalize()}", "= '{:17.8E}'.format(E/(1e3 * MeV)) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E]) s =", "+ '.tbin{:01d}.'.format(i+1) + transformation_type + \\ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(tmin/u.s, tmax/u.s, ntbins, d,", "model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type + \\ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(t0, t1, nbin,", "filename = model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type + \\ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(tmin/u.s,", "as u from tqdm.auto import tqdm import snewpy.models from snewpy.flavor_transformation", "model_type : str Format of input file. Matches the name", "for j in range(starting_index[i]+1, ending_index[i], 1): temp_spectra = snmodel.get_transformed_spectra(model_times[j], energy,", "desc='Detectors', leave=False) for det in detector_input: res=sng.run(flux_files, det) result[det]=dict(zip((f.stem for", "tempdir/f'{filename_base}_log_plot.png' plt.savefig(filename.with_suffix('.png'), dpi=300, bbox_inches='tight') #Make a tarfile with the condensed", "the neutrino flux files and configures and runs the supernova", "original levels order t = t.unstack(levels) t = t.reorder_levels(table.columns.names, axis=1)", "window (or time bin), or in a snapshot in time.", "as np from astropy import units as u from tqdm.auto", "snmodel.get_transformed_spectra( model_times[ending_index[i]], energy, flavor_transformation) for flavor in Flavor: osc_spectra[flavor] +=", "will compute the integrated neutrino flux (fluence) in the time", "levels = list(table.columns.names) levels.remove('channel') t = table.stack(levels) for name,pattern in", "is None: tstart = snmodel.get_time()[0] tend = snmodel.get_time()[-1] try: if", "plt.ylabel('Interaction Events') #read the results from storage cache_file = tarball_path[:tarball_path.rfind('.tar')]", "snewpy.snowglobes_interface import SNOwGLoBES logger = logging.getLogger(__name__) def generate_time_series(model_path, model_type, transformation_type,", "mpl import matplotlib.pyplot as plt import numpy as np from", "in the first column the energy bins, in the remaining", "= {'nc':'NeutralCurrent', 'ibd':'Inverse Beta Decay', 'e':r'${\\nu}_x+e^-$'} def gen_label(m): flv,bar,Nuc,num,res =", "u from tqdm.auto import tqdm import snewpy.models from snewpy.flavor_transformation import", "output_filename+extension else: model_file_root, _ = os.path.splitext(model_file) # strip extension (if", "return s if c in mapp: return mapp[c] else: return", "the results if skip_plots is False: plt.figure(dpi=300) do_plot(table,(flux,det,w,s)) filename =", "'NonAdiabaticMSWH_NMO': NonAdiabaticMSWH(mh=MassHierarchy.NORMAL), 'NonAdiabaticMSWH_IMO': NonAdiabaticMSWH(mh=MassHierarchy.INVERTED), 'TwoFlavorDecoherence': TwoFlavorDecoherence(), 'ThreeFlavorDecoherence': ThreeFlavorDecoherence(), 'NeutrinoDecay_NMO': NeutrinoDecay(mh=MassHierarchy.NORMAL),", "the results from storage cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy' logging.info(f'Reading", "compressed .tar file containing all individual input files. * **Running", "re-use in collate() cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy' logging.info(f'Saving simulation", "output files and generates plots or returns a data table.", "None Name of output file. If ``None``, will be based", "for later use with TemporaryDirectory(prefix='snowglobes') as tempdir: with tarfile.open(tarball_path) as", "Loop over sampled times. for i, t in enumerate(times): osc_spectra", "have only channel column levels = list(table.columns.names) levels.remove('channel') t =", "re_chan_label = re.compile(r'nu(e|mu|tau)(bar|)_([A-Z][a-z]*)(\\d*)_?(.*)') def get_channel_label(c): mapp = {'nc':'NeutralCurrent', 'ibd':'Inverse Beta", "for det in tables: results[det] = {} for flux,t in", "tempdir: with tarfile.open(tarball_path) as tar: tar.extractall(tempdir) flux_files = list(Path(tempdir).glob('*.dat')) if", "#return table with the original levels order t = t.unstack(levels)", "the remaining columns the number of events for each interaction", "for i in range(1, len(model_times), 1): model_tstart[i] = 0.5*(model_times[i]+model_times[i-1]) model_tend[i-1]", "from astropy import units as u from tqdm.auto import tqdm", "tarfile from pathlib import Path from tempfile import TemporaryDirectory import", "name,pattern in patterns.items(): #get channels which contain `like` t_sel =", "i in range(len(tstart)): starting_index[i] = next(j for j, t in", "str Name of detector. If ``\"all\"``, will use all detectors", "= 1 times = 0.5*(tstart + tend) model_times = snmodel.get_time()", ": str Input file containing neutrino flux information from supernova", "= output_filename+'.tar.bz2' else: model_file_root, _ = os.path.splitext(model_file) # strip extension", "bin and for each interaction channel. verbose : bool Whether", "if flv!='e': flv='\\\\'+flv if bar: bar='\\\\'+bar s = f'${bar}{{\\\\nu}}_{flv}$ '+f'${{}}^{{{num}}}{Nuc}$", "if t > tstart[i]) ending_index[i] = next(j for j, t", "t0 = tstart[0] t1 = tend[-1] nbin = len(tstart/u.s) except:", "try: if len(tstart/u.s) > 0: t0 = tstart[0] t1 =", "integrate over, or list of start times of the time", "tables from {cache_file}') tables = np.load(cache_file, allow_pickle=True).tolist() #This output is", "getattr(snewpy.models.ccsn, model_type) # Choose flavor transformation. Use dict to associate", "and neutrino time distribution, for each reaction channel or the", "t = t.unstack(levels) t = t.reorder_levels(table.columns.names, axis=1) return t def", "generate verbose output, e.g. for debugging. \"\"\" sng = SNOwGLoBES(SNOwGLoBESdir)", "= tarball_path[:tarball_path.rfind('.tar')] + '.npy' logging.info(f'Reading tables from {cache_file}') tables =", "a tarfile. Parameters ---------- model_path : str Input file containing", "energy-dependent detection efficiency. The output gives the number of events", "model_file_root, _ = os.path.splitext(model_file) # strip extension (if present) filename", "bins. tend : astropy.Quantity or None End of time interval", "verbose output, e.g. for debugging. remove_generated_files: bool Remove the output", "output gives the number of events detected as a function", "ending_index[i], 1): temp_spectra = snmodel.get_transformed_spectra(model_times[j], energy, flavor_transformation) for flavor in", "to have only channel column levels = list(table.columns.names) levels.remove('channel') t", "= dt else: for flavor in Flavor: osc_spectra[flavor] *= (model_tend[starting_index[i]]-ta)", "contain `like` t_sel = t.filter(like=pattern) #sum over them and save", "data table. Parameters ---------- SNOwGLoBESdir : str Path to directory", "d, extension) info = tarfile.TarInfo(name=filename) info.size = len(output) tf.addfile(info, io.BytesIO(output))", "(model_tend[starting_index[i]]-ta) #intermediate time bins of model in requested interval for", "step with the cross-sections for the interaction channels happening in", "logging.info(f'Reading tables from {cache_file}') tables = np.load(cache_file, allow_pickle=True).tolist() #This output", "SNOwGLoBES. It takes into account the effective mass of the", "\"\"\" import io import logging import os import re import", "Generate output. if output_filename is not None: tfname = output_filename+'.tar.bz2'", "in enumerate(energy): for flavor in Flavor: osc_fluence[flavor] = osc_spectra[flavor][j] *", "interaction channel. Parameters ---------- SNOwGLoBESdir : str Path to directory", "#intermediate time bins of model in requested interval for j", "# strip extension (if present) tfname = model_file_root + '.'", "calculated event rates expected for a given (set of) detector(s).", "table.stack(levels) for name,pattern in patterns.items(): #get channels which contain `like`", "the output into a tarfile. Parameters ---------- model_path : str", "input files. * **Running SNOwGLoBES:** This step convolves the fluence", "output to file in tar archive. output = '\\n'.join(table).encode('ascii') extension", ": str Path of compressed .tar file produced e.g. by", "interaction channel. verbose : bool Whether to generate verbose output,", "the results for the output header = 'Energy '+' '.join(list(table.columns))", "range(1, len(model_times), 1): model_tstart[i] = 0.5*(model_times[i]+model_times[i-1]) model_tend[i-1] = model_tstart[i] model_tend[len(model_times)-1]", "energy, flavor_transformation) for flavor in Flavor: osc_spectra[flavor] += temp_spectra[flavor]*(tb-model_tstart[ending_index[i]]) for", "supported by SNOwGLoBES. verbose : bool Whether to generate verbose", "processed channels t.drop(t_sel.columns, axis='columns',inplace=True) t[name]=t_agg #fill the column #return table", "flv,bar,Nuc,num,res = m.groups() if flv!='e': flv='\\\\'+flv if bar: bar='\\\\'+bar s", "for each interaction channel. verbose : bool Whether to generate", "tf.addfile(tarfile.TarInfo(name='parameterinfo'), io.BytesIO(output)) MeV = 1.60218e-6 * u.erg energy = np.linspace(0,", "tar: tar.extractall(tempdir) flux_files = list(Path(tempdir).glob('*.dat')) if len(detector_input)>0: detector_input = tqdm(detector_input,", "tarfile with the condensed data files and plots output_name =", "snmodel = model_class(model_path) #set the timings up #default if inputs", "t1, nbin, d, extension) info = tarfile.TarInfo(name=filename) info.size = len(output)", "slices. Will be ignored if ``deltat`` is also given. deltat", "info.size = len(output) tf.addfile(info, io.BytesIO(output)) return os.path.join(model_dir, tfname) def generate_fluence(model_path,", "starting_index[i] = next(j for j, t in enumerate(model_tend) if t", "of compressed .tar file with neutrino flux data. \"\"\" model_class", "False, it gives as output the plot of the energy", "len(tstart/u.s) > 0: t0 = tstart[0] t1 = tend[-1] nbin", "tfname = model_file_root + '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(t0, t1,", "= flavor_transformation_dict[transformation_type] model_dir, model_file = os.path.split(os.path.abspath(model_path)) snmodel = model_class(model_path) #", "for re-use in collate() cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy' logging.info(f'Saving", "basic steps to using SNOwGLoBES from SNEWPY: * **Generating input", "tf.addfile(info, io.BytesIO(output)) return os.path.join(model_dir, tfname) def generate_fluence(model_path, model_type, transformation_type, d,", "Distance to supernova in kpc. output_filename : str or None", "gives as output the plot of the energy distribution for", "= Path(tarball_path).stem output_name = output_name[:output_name.rfind('.tar')]+'_SNOprocessed' output_path = Path(tarball_path).parent/(output_name+'.tar.gz') with tarfile.open(output_path,", "isinstance(detector_input,str): detector_input = [detector_input] result = {} #Extracts data from", "= snmodel.get_time() model_tstart = model_times*1.0 model_tend = model_times*1.0 model_tstart[0] =", "transformation. See snewpy.flavor_transformation documentation for possible values. d : int", "file name. tstart : astropy.Quantity or None Start of time", "for j, t in enumerate(model_tend) if t > tstart)] ending_index", "the number of events detected as a function of energy", "as tempdir: tempdir = Path(tempdir) for det in tables: results[det]", "The result is a compressed .tar file containing all individual", "Start of time interval to integrate over, or list of", "aggregate_channels(t,nc='nc_',e='_e') for w in ['weighted','unweighted']: for s in ['smeared','unsmeared']: table", "There are two ways to do this, either generate a", "bin. The result is a compressed .tar file containing all", "import tarfile from pathlib import Path from tempfile import TemporaryDirectory", "which contain `like` t_sel = t.filter(like=pattern) #sum over them and", "osc_fluence[Flavor.NU_E_BAR]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) table.append(s)", "= 0.5*(tstart + tend) model_times = snmodel.get_time() model_tstart = model_times*1.0", "output. if output_filename is not None: tfname = output_filename+'.tar.bz2' else:", "tstart is None: tstart = snmodel.get_time()[0] tend = snmodel.get_time()[-1] try:", "if output_filename is not None: tfname = output_filename + 'kpc.tar.bz2'", "tables = np.load(cache_file, allow_pickle=True).tolist() #This output is similar to what", "+= temp_spectra[flavor]*(tb-model_tstart[ending_index[i]]) for flavor in Flavor: osc_spectra[flavor] /= (tb-ta) osc_fluence", "def collate(SNOwGLoBESdir, tarball_path, detector_input=\"all\", skip_plots=False, verbose=False, remove_generated_files=True): \"\"\"Collates SNOwGLoBES output", "in Flavor: osc_fluence[flavor] = osc_spectra[flavor][j] * dt * 0.2 *", "= model_file_root + '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(t0, t1, nbin,", "NuMu NuTau aNuE aNuMu aNuTau') # Generate energy + number", "str Name of flavor transformation. See snewpy.flavor_transformation documentation for possible", "(tb-ta) osc_fluence = {} table = [] table.append('# TBinMid={:g}sec TBinWidth={:g}s", "f.write(table.to_string(float_format='%23.15g')) #format the results for the output header = 'Energy", "enumerate(energy): for flavor in Flavor: osc_fluence[flavor] = osc_spectra[flavor][j] * dt", "#get channels which contain `like` t_sel = t.filter(like=pattern) #sum over", "expected by SNOwGLoBES, and compress the output into a tarfile.", "snewpy.flavor_transformation import * from snewpy.neutrino import Flavor, MassHierarchy from snewpy.snowglobes_interface", "ta = tstart tb = tend t = times dt", "None: tstart = snmodel.get_time()[0] tend = snmodel.get_time()[-1] try: if len(tstart/u.s)", "Dictionary of data tables: One table per time bin; each", "flavor in Flavor: osc_spectra[flavor] /= (tb-ta) osc_fluence = {} table", "the output header = 'Energy '+' '.join(list(table.columns)) data = table.to_numpy().T", "are given as a function of the neutrino energy and", "SNOwGLoBES. verbose : bool Whether to generate verbose output, e.g.", "t in enumerate(model_tend) if t > tstart)] ending_index = [next(j", "lw=1) plt.xlim(right=0.10) plt.ylim(bottom=0.10) plt.yscale('log') plt.legend(bbox_to_anchor=(0.5, 0.5, 0.5, 0.5), loc='best', borderaxespad=0)", "bar='\\\\'+bar s = f'${bar}{{\\\\nu}}_{flv}$ '+f'${{}}^{{{num}}}{Nuc}$ '+res return s if c", "ntbins=30, deltat=None): \"\"\"Generate time series files in SNOwGLoBES format. This", "steps to using SNOwGLoBES from SNEWPY: * **Generating input files", "interaction channels happening in various detectors supported by SNOwGLoBES. It", "#format the results for the output header = 'Energy '+'", "else: starting_index = [next(j for j, t in enumerate(model_tend) if", "list of end times of the time series bins. Returns", "'.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(tmin, tmax, ntbins, d) + 'kpc.tar.bz2'", "\"\"\" def aggregate_channels(table, **patterns): #rearrange the table to have only", "file containing all individual input files. * **Running SNOwGLoBES:** This", "= t.filter(like=pattern) #sum over them and save to a separate", "to generate verbose output, e.g. for debugging. remove_generated_files: bool Remove", "tqdm.auto import tqdm import snewpy.models from snewpy.flavor_transformation import * from", "table = [] table.append('# TBinMid={:g}sec TBinWidth={:g}s EBinWidth=0.2MeV Fluence at Earth", "result) return result re_chan_label = re.compile(r'nu(e|mu|tau)(bar|)_([A-Z][a-z]*)(\\d*)_?(.*)') def get_channel_label(c): mapp =", "archive. output = '\\n'.join(table).encode('ascii') extension = \".dat\" model_file_root, _ =", "all the interaction channels and time bins evaluated by SNOwGLoBES", "flavor_transformation = flavor_transformation_dict[transformation_type] model_dir, model_file = os.path.split(os.path.abspath(model_path)) snmodel = model_class(model_path)", "* **Generating input files for SNOwGLoBES:** There are two ways", "from tqdm.auto import tqdm import snewpy.models from snewpy.flavor_transformation import *", "interval osc_spectra = snmodel.get_transformed_spectra(model_times[starting_index[i]], energy, flavor_transformation) if dt < model_tend[starting_index[i]]-ta:", "model_dir, model_file = os.path.split(os.path.abspath(model_path)) snmodel = model_class(model_path) # Subsample the", "in range(starting_index[i]+1, ending_index[i], 1): temp_spectra = snmodel.get_transformed_spectra(model_times[j], energy, flavor_transformation) for", "if len(detector_input)>0: detector_input = tqdm(detector_input, desc='Detectors', leave=False) for det in", ": bool Whether to generate verbose output, e.g. for debugging.", "each reaction channel or the sum of them. \"\"\" import", "model_tend[starting_index[i]]-ta: dt = dt else: for flavor in Flavor: osc_spectra[flavor]", "dt = (tmax - tmin) / (ntbins+1) tedges = np.arange(tmin/u.s,", "model_tstart[i] model_tend[len(model_times)-1] = model_times[-1] if nbin > 1: starting_index =", "interaction channel in the detector. \"\"\" def aggregate_channels(table, **patterns): #rearrange", "a given (set of) detector(s). These event rates are given", "s = f'${bar}{{\\\\nu}}_{flv}$ '+f'${{}}^{{{num}}}{Nuc}$ '+res return s if c in", "compressed .tar file with neutrino flux data. \"\"\" model_class =", "allow to build the detected neutrino energy spectrum and neutrino", "for each time bin). The output tables allow to build", "+ \\ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(t0, t1, nbin, d, extension) info = tarfile.TarInfo(name=filename)", "which outputs calculated event rates expected for a given (set", "tarball_path, detector_input=\"all\", skip_plots=False, verbose=False, remove_generated_files=True): \"\"\"Collates SNOwGLoBES output files and", "(GeV)') plt.ylabel('Interaction Events') #read the results from storage cache_file =", "data tables: One table per time bin; each table contains", "plt.title(f'{flux} {det.capitalize()} {weighted.capitalize()} {smear_title} Events') if smeared=='smeared': plt.xlabel('Detected Energy (GeV)')", "files and plots output_name = Path(tarball_path).stem output_name = output_name[:output_name.rfind('.tar')]+'_SNOprocessed' output_path", "from snewpy.flavor_transformation import * from snewpy.neutrino import Flavor, MassHierarchy from", "'.join(list(table.columns)) data = table.to_numpy().T index = table.index.to_numpy() data = np.concatenate([[index],data])", "This is done taking as input the supernova simulation model.", "as mpl import matplotlib.pyplot as plt import numpy as np", "time bin of model in requested interval osc_spectra = snmodel.get_transformed_spectra(model_times[starting_index[i]],", "TemporaryDirectory import matplotlib as mpl import matplotlib.pyplot as plt import", "#sum over them and save to a separate column t_agg", "for flavor in Flavor: osc_fluence[flavor] = osc_spectra[flavor][j] * dt *", "tempdir = Path(tempdir) for det in tables: results[det] = {}", "energy tables expected by SNOwGLoBES, and compress the output into", "- tmin) / (ntbins+1) tedges = np.arange(tmin/u.s, tmax/u.s, dt/u.s)*u.s times", "tempdir: tempdir = Path(tempdir) for det in tables: results[det] =", "tstart)] ending_index = [next(j for j, t in enumerate(model_tend) if", "with the original levels order t = t.unstack(levels) t =", "model. model_type : str Format of input file. Matches the", "'.{:.3f},{:.3f},{:d}-{:.1f}'.format(tmin, tmax, ntbins, d) + 'kpc.tar.bz2' with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2')", "class. flavor_transformation_dict = {'NoTransformation': NoTransformation(), 'AdiabaticMSW_NMO': AdiabaticMSW(mh=MassHierarchy.NORMAL), 'AdiabaticMSW_IMO': AdiabaticMSW(mh=MassHierarchy.INVERTED), 'NonAdiabaticMSWH_NMO':", "{cache_file}') tables = np.load(cache_file, allow_pickle=True).tolist() #This output is similar to", "bin of model in requested interval temp_spectra = snmodel.get_transformed_spectra( model_times[ending_index[i]],", "MeV # 1MeV # Loop over sampled times. for i,", "tedges[:-1]) # Generate output. if output_filename is not None: tfname", "timebin in neutrinos per cm^2'.format(t, dt)) table.append('# E(GeV) NuE NuMu", "nbin > 1: ta = tstart[i] tb = tend[i] t", "to text files with open(filename,'w') as f: f.write(table.to_string(float_format='%23.15g')) #format the", "time. * **Collating SNOwGLoBES outputs:** This step puts together all", "* MeV # 1MeV # Loop over sampled times. for", "from {cache_file}') tables = np.load(cache_file, allow_pickle=True).tolist() #This output is similar", "tstart[0] t1 = tend[-1] nbin = len(tstart/u.s) except: t0 =", "first will evaluate the neutrino flux at each time step,", "the interaction channels and time bins evaluated by SNOwGLoBES in", "-*- \"\"\"The ``snewpy.snowglobes`` module contains functions for interacting with SNOwGLoBES.", ": astropy.Quantity or None Length of time slices. Returns -------", "'{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E_BAR])", "logging.getLogger(__name__) def generate_time_series(model_path, model_type, transformation_type, d, output_filename=None, ntbins=30, deltat=None): \"\"\"Generate", "tmax/u.s, dt/u.s)*u.s times = 0.5*(tedges[1:] + tedges[:-1]) # Generate output.", "extension) info = tarfile.TarInfo(name=filename) info.size = len(output) tf.addfile(info, io.BytesIO(output)) return", "times of the time series bins. Returns ------- str Path", "osc_fluence[Flavor.NU_X_BAR]) table.append(s) logging.debug(s) # Encode energy/flux table and output to", "Encode energy/flux table and output to file in tar archive.", "detector and for each time bin). The output tables allow", "model, produce energy tables expected by SNOwGLoBES, and compress the", "efficiency. The output gives the number of events detected as", "t in enumerate(model_tend) if t >= tend)] # Generate output.", "from SNEWPY: * **Generating input files for SNOwGLoBES:** There are", "tempfile import TemporaryDirectory import matplotlib as mpl import matplotlib.pyplot as", "end times of the time series bins. Returns ------- str", "the events from given table flux,det,weighted,smeared = params for c", "can estimate detected event rates from a given input supernova", "#read the results from storage cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy'", "SNOwGLoBES, collated files, and .png's made for this snewpy run.", "+ transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(tmin, tmax, ntbins, d) + 'kpc.tar.bz2' with", "model_path : str Input file containing neutrino flux information from", "= [next(j for j, t in enumerate(model_tend) if t >=", "flavor_transformation_dict = {'NoTransformation': NoTransformation(), 'AdiabaticMSW_NMO': AdiabaticMSW(mh=MassHierarchy.NORMAL), 'AdiabaticMSW_IMO': AdiabaticMSW(mh=MassHierarchy.INVERTED), 'NonAdiabaticMSWH_NMO': NonAdiabaticMSWH(mh=MassHierarchy.NORMAL),", "a single file (for each detector and for each time", "* u.erg energy = np.linspace(0, 100, 501) * MeV #", "filename_base = f'{flux}_{det}_events_{s}_{w}' filename = tempdir/f'Collated_{filename_base}.dat' #save results to text", "in detector_input: res=sng.run(flux_files, det) result[det]=dict(zip((f.stem for f in flux_files),res)) #", "*= (model_tend[starting_index[i]]-ta) #intermediate time bins of model in requested interval", "model in requested interval temp_spectra = snmodel.get_transformed_spectra( model_times[ending_index[i]], energy, flavor_transformation)", "script inside SNOwGLoBES, which outputs calculated event rates expected for", "Path(tarball_path).stem output_name = output_name[:output_name.rfind('.tar')]+'_SNOprocessed' output_path = Path(tarball_path).parent/(output_name+'.tar.gz') with tarfile.open(output_path, \"w:gz\")", "results[filename.name] = {'header':header,'data':data} #optionally plot the results if skip_plots is", "each time bin). The output tables allow to build the", "str Format of input file. Matches the name of the", "model_tstart[0] = model_times[0] for i in range(1, len(model_times), 1): model_tstart[i]", "verbose=False, remove_generated_files=True): \"\"\"Collates SNOwGLoBES output files and generates plots or", "= model_times[-1] if nbin > 1: starting_index = np.zeros(len(times), dtype=np.int64)", "= \".dat\" if output_filename is not None: if nbin >", "tmax, ntbins, d) + 'kpc.tar.bz2' with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2') as", "output the plot of the energy distribution for each time", "bins evaluated by SNOwGLoBES in a single file (for each", "plot the results if skip_plots is False: plt.figure(dpi=300) do_plot(table,(flux,det,w,s)) filename", "It supports many different neutrino detectors, detector materials and interaction", "SNOwGLoBES. `SNOwGLoBES <https://github.com/SNOwGLoBES/snowglobes>`_ can estimate detected event rates from a", "and compress the output into a tarfile. Parameters ---------- model_path", "results = {} #save collated files: with TemporaryDirectory(prefix='snowglobes') as tempdir:", "will be based on input file name. ntbins : int", "condensed data files and plots output_name = Path(tarball_path).stem output_name =", "for a given (set of) detector(s). These event rates are", "output is similar to what produced by: #tables = simulate(SNOwGLoBESdir,", "s in ['smeared','unsmeared']: table = t[w][s] filename_base = f'{flux}_{det}_events_{s}_{w}' filename", "for j, E in enumerate(energy): for flavor in Flavor: osc_fluence[flavor]", "energy bins, in the remaining columns the number of events", "save result to file for re-use in collate() cache_file =", "Flavor, MassHierarchy from snewpy.snowglobes_interface import SNOwGLoBES logger = logging.getLogger(__name__) def", "Length of time slices. Returns ------- str Path of compressed", "= tqdm(detector_input, desc='Detectors', leave=False) for det in detector_input: res=sng.run(flux_files, det)", "snmodel.get_transformed_spectra(model_times[j], energy, flavor_transformation) for flavor in Flavor: osc_spectra[flavor] += temp_spectra[flavor]*(model_tend[j]-model_tstart[j])", "detector_input: res=sng.run(flux_files, det) result[det]=dict(zip((f.stem for f in flux_files),res)) # save", "dpi=300, bbox_inches='tight') #Make a tarfile with the condensed data files", "lists of paths and fluxfilenames for later use with TemporaryDirectory(prefix='snowglobes')", "expected for a given (set of) detector(s). These event rates", "in patterns.items(): #get channels which contain `like` t_sel = t.filter(like=pattern)", "table to have only channel column levels = list(table.columns.names) levels.remove('channel')", "matplotlib.pyplot as plt import numpy as np from astropy import", "j, t in enumerate(model_tend) if t > tstart[i]) ending_index[i] =", "enumerate(times): osc_spectra = snmodel.get_transformed_spectra(t, energy, flavor_transformation) osc_fluence = {} table", "osc_spectra[flavor] += temp_spectra[flavor]*(model_tend[j]-model_tstart[j]) #last time bin of model in requested", "output file. If ``None``, will be based on input file", "nbin, d) + 'kpc.tar.bz2' with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2') as tf:", "next(j for j, t in enumerate(model_tend) if t > tstart[i])", "for j, t in enumerate(model_tend) if t > tstart[i]) ending_index[i]", "int((tmax-tmin)/dt) else: dt = (tmax - tmin) / (ntbins+1) tedges", "{smear_title} Events') if smeared=='smeared': plt.xlabel('Detected Energy (GeV)') plt.ylabel('Events') else: plt.xlabel('Neutrino", "of output file. If ``None``, will be based on input", "detector_input : str Name of detector. If ``\"all\"``, will use", "as a function of the neutrino energy and time, for", "the previous step with the cross-sections for the interaction channels", "import * from snewpy.neutrino import Flavor, MassHierarchy from snewpy.snowglobes_interface import", "estimate detected event rates from a given input supernova neutrino", "ta = tstart[i] tb = tend[i] t = times[i] dt", "j, E in enumerate(energy): for flavor in Flavor: osc_fluence[flavor] =", "flv='\\\\'+flv if bar: bar='\\\\'+bar s = f'${bar}{{\\\\nu}}_{flv}$ '+f'${{}}^{{{num}}}{Nuc}$ '+res return", "= {} table = [] table.append('# TBinMid={:g}sec TBinWidth={:g}s EBinWidth=0.2MeV Fluence", "deltat : astropy.Quantity or None Length of time slices. Returns", "> 0: t0 = tstart[0] t1 = tend[-1] nbin =", "'e':r'${\\nu}_x+e^-$'} def gen_label(m): flv,bar,Nuc,num,res = m.groups() if flv!='e': flv='\\\\'+flv if", "of energy for each interaction channel, integrated in a given", "time series bins. tend : astropy.Quantity or None End of", "nbin > 1: starting_index = np.zeros(len(times), dtype=np.int64) ending_index = np.zeros(len(times),", "to what produced by: #tables = simulate(SNOwGLoBESdir, tarball_path,detector_input) #dict for", "= Path(tarball_path).parent/(output_name+'.tar.gz') with tarfile.open(output_path, \"w:gz\") as tar: for file in", "the column #return table with the original levels order t", "generate_fluence(model_path, model_type, transformation_type, d, output_filename=None, tstart=None, tend=None): \"\"\"Generate fluence files", "later use with TemporaryDirectory(prefix='snowglobes') as tempdir: with tarfile.open(tarball_path) as tar:", "files from SNOwGLoBES, collated files, and .png's made for this", "bool If False, it gives as output the plot of", "tfname) def simulate(SNOwGLoBESdir, tarball_path, detector_input=\"all\", verbose=False): \"\"\"Takes as input the", "= np.linspace(0, 100, 501) * MeV # 1MeV # Loop", "**Collating SNOwGLoBES outputs:** This step puts together all the interaction", "if tstart is None: tstart = snmodel.get_time()[0] tend = snmodel.get_time()[-1]", "d : int or float Distance to supernova in kpc.", "+ '.{:.3f},{:.3f},{:d}-{:.1f}'.format(t0, t1, nbin, d) + 'kpc.tar.bz2' with tarfile.open(os.path.join(model_dir, tfname),", "verbose : bool Whether to generate verbose output, e.g. for", "do this, either generate a time series or a fluence", "------- dict Dictionary of data tables: One table per time", "= times[i] dt = tb-ta else: ta = tstart tb", "= {'NoTransformation': NoTransformation(), 'AdiabaticMSW_NMO': AdiabaticMSW(mh=MassHierarchy.NORMAL), 'AdiabaticMSW_IMO': AdiabaticMSW(mh=MassHierarchy.INVERTED), 'NonAdiabaticMSWH_NMO': NonAdiabaticMSWH(mh=MassHierarchy.NORMAL), 'NonAdiabaticMSWH_IMO':", "1): temp_spectra = snmodel.get_transformed_spectra(model_times[j], energy, flavor_transformation) for flavor in Flavor:", "plt.ylim(bottom=0.10) plt.yscale('log') plt.legend(bbox_to_anchor=(0.5, 0.5, 0.5, 0.5), loc='best', borderaxespad=0) # formats", "interval to integrate over, or list of end times of", "= next(j for j, t in enumerate(model_tend) if t >", "levels.remove('channel') t = table.stack(levels) for name,pattern in patterns.items(): #get channels", "or list of end times of the time series bins.", "osc_fluence[Flavor.NU_X]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E_BAR]) s", "for i in range(nbin): if nbin > 1: ta =", "model_times[0] for i in range(1, len(model_times), 1): model_tstart[i] = 0.5*(model_times[i]+model_times[i-1])", "file. Matches the name of the corresponding class in :py:mod:`snewpy.models`.", "return t def do_plot(table, params): #plotting the events from given", "import matplotlib.pyplot as plt import numpy as np from astropy", "events from given table flux,det,weighted,smeared = params for c in", "output, e.g. for debugging. \"\"\" sng = SNOwGLoBES(SNOwGLoBESdir) if detector_input", "from tarfile and sets up lists of paths and fluxfilenames", "table.append('# E(GeV) NuE NuMu NuTau aNuE aNuMu aNuTau') # Generate", "<gh_stars>0 # -*- coding: utf-8 -*- \"\"\"The ``snewpy.snowglobes`` module contains", "else: dt = (tmax - tmin) / (ntbins+1) tedges =", "= tstart t1 = tend nbin = 1 times =", "with TemporaryDirectory(prefix='snowglobes') as tempdir: tempdir = Path(tempdir) for det in", "simulation model. The first will evaluate the neutrino flux at", "of flavor transformation. See snewpy.flavor_transformation documentation for possible values. d", "where SNOwGLoBES is installed. tarball_path : str Path of compressed", "tfname = model_file_root + '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(tmin, tmax,", "t > tstart)] ending_index = [next(j for j, t in", "Returns ------- dict Dictionary of data tables: One table per", "Energy (GeV)') plt.ylabel('Events') else: plt.xlabel('Neutrino Energy (GeV)') plt.ylabel('Interaction Events') #read", "tables expected by SNOwGLoBES, and compress the output into a", "runs the supernova script inside SNOwGLoBES, which outputs calculated event", "os.path.splitext(model_file) filename = model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type + \\", "as output the plot of the energy distribution for each", "detector_input == 'all': detector_input = list(sng.detectors) detector_input.remove('d2O') elif isinstance(detector_input,str): detector_input", "= snmodel.get_time()[-1] if deltat is not None: dt = deltat", "'.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(tmin/u.s, tmax/u.s, ntbins, d, extension) info = tarfile.TarInfo(name=filename) info.size =", "in range(1, len(model_times), 1): model_tstart[i] = 0.5*(model_times[i]+model_times[i-1]) model_tend[i-1] = model_tstart[i]", "neutrino flux at each time step, the latter will compute", "input supernova neutrino flux. It supports many different neutrino detectors,", "= '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) table.append(s) logging.debug(s) # Encode energy/flux table and", "requested interval temp_spectra = snmodel.get_transformed_spectra( model_times[ending_index[i]], energy, flavor_transformation) for flavor", "res=sng.run(flux_files, det) result[det]=dict(zip((f.stem for f in flux_files),res)) # save result", "AdiabaticMSW(mh=MassHierarchy.INVERTED), 'NonAdiabaticMSWH_NMO': NonAdiabaticMSWH(mh=MassHierarchy.NORMAL), 'NonAdiabaticMSWH_IMO': NonAdiabaticMSWH(mh=MassHierarchy.INVERTED), 'TwoFlavorDecoherence': TwoFlavorDecoherence(), 'ThreeFlavorDecoherence': ThreeFlavorDecoherence(), 'NeutrinoDecay_NMO':", "smearing matrix describing the energy-dependent detection efficiency. The output gives", "'{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X])", "all detectors supported by SNOwGLoBES. skip_plots: bool If False, it", "produced by: #tables = simulate(SNOwGLoBESdir, tarball_path,detector_input) #dict for old-style results,", "taking as input the supernova simulation model. The first will", "/= (tb-ta) osc_fluence = {} table = [] table.append('# TBinMid={:g}sec", "by SNOwGLoBES. verbose : bool Whether to generate verbose output,", "Name of output file. If ``None``, will be based on", "event rates from a given input supernova neutrino flux. It", "as a smearing matrix describing the energy-dependent detection efficiency. The", "snewpy run. Returns ------- dict Dictionary of data tables: One", "in the previous step with the cross-sections for the interaction", "based on input file name. ntbins : int Number of", "if ``deltat`` is also given. deltat : astropy.Quantity or None", "debugging. remove_generated_files: bool Remove the output files from SNOwGLoBES, collated", "to using SNOwGLoBES from SNEWPY: * **Generating input files for", "as input the neutrino flux files and configures and runs", "in range(len(tstart)): starting_index[i] = next(j for j, t in enumerate(model_tend)", "are three basic steps to using SNOwGLoBES from SNEWPY: *", "with TemporaryDirectory(prefix='snowglobes') as tempdir: with tarfile.open(tarball_path) as tar: tar.extractall(tempdir) flux_files", "in kpc. output_filename : str or None Name of output", "logging.debug(s) # Encode energy/flux table and output to file in", "[next(j for j, t in enumerate(model_tend) if t > tstart)]", "detector_input=\"all\", skip_plots=False, verbose=False, remove_generated_files=True): \"\"\"Collates SNOwGLoBES output files and generates", "will use all detectors supported by SNOwGLoBES. skip_plots: bool If", "neutrino energy spectrum and neutrino time distribution, for each reaction", "**patterns): #rearrange the table to have only channel column levels", "times. for i in range(nbin): if nbin > 1: ta", "distribution for each time bin and for each interaction channel.", "pathlib import Path from tempfile import TemporaryDirectory import matplotlib as", "time series files in SNOwGLoBES format. This version will subsample", "with neutrino flux data. \"\"\" model_class = getattr(snewpy.models.ccsn, model_type) #", "will use all detectors supported by SNOwGLoBES. verbose : bool", "detector_input.remove('d2O') elif isinstance(detector_input,str): detector_input = [detector_input] result = {} #Extracts", "0: t0 = tstart[0] t1 = tend[-1] nbin = len(tstart/u.s)", "plt.yscale('log') plt.legend(bbox_to_anchor=(0.5, 0.5, 0.5, 0.5), loc='best', borderaxespad=0) # formats complete", "interval for j in range(starting_index[i]+1, ending_index[i], 1): temp_spectra = snmodel.get_transformed_spectra(model_times[j],", "remaining columns the number of events for each interaction channel", "#save collated files: with TemporaryDirectory(prefix='snowglobes') as tempdir: tempdir = Path(tempdir)", "in a supernova model, produce energy tables expected by SNOwGLoBES,", "event rates are given as a function of the neutrino", "model_type) # Choose flavor transformation. Use dict to associate the", "os.path.splitext(model_file) # strip extension (if present) tfname = model_file_root +", "of model in requested interval temp_spectra = snmodel.get_transformed_spectra( model_times[ending_index[i]], energy,", "times[i] dt = tb-ta else: ta = tstart tb =", "It takes into account the effective mass of the detector", "times of the time series bins. tend : astropy.Quantity or", "present) filename = model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type + \\", "file in tar archive that gives information on parameters output", "generate a time series or a fluence file. This is", "> 0.1: plt.plot(table[c],drawstyle='steps',label=get_channel_label(c), lw=1) plt.xlim(right=0.10) plt.ylim(bottom=0.10) plt.yscale('log') plt.legend(bbox_to_anchor=(0.5, 0.5, 0.5,", "for old-style results, for backward compatibiity results = {} #save", "to supernova in kpc. output_filename : str or None Name", "tend[-1] nbin = len(tstart/u.s) except: t0 = tstart t1 =", "compute the integrated neutrino flux (fluence) in the time bin.", "from pathlib import Path from tempfile import TemporaryDirectory import matplotlib", "plots output_name = Path(tarball_path).stem output_name = output_name[:output_name.rfind('.tar')]+'_SNOprocessed' output_path = Path(tarball_path).parent/(output_name+'.tar.gz')", "only channel column levels = list(table.columns.names) levels.remove('channel') t = table.stack(levels)", "neutrino flux (fluence) in the time bin. The result is", "plt.xlabel('Neutrino Energy (GeV)') plt.ylabel('Interaction Events') #read the results from storage", "* MeV)) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X])", "graph smear_title = 'Interaction' if smeared=='unsmeared' else 'Detected' plt.title(f'{flux} {det.capitalize()}", "output header = 'Energy '+' '.join(list(table.columns)) data = table.to_numpy().T index", "of time interval to integrate over, or list of start", "generate verbose output, e.g. for debugging. remove_generated_files: bool Remove the", "skip_plots is False: plt.figure(dpi=300) do_plot(table,(flux,det,w,s)) filename = tempdir/f'{filename_base}_log_plot.png' plt.savefig(filename.with_suffix('.png'), dpi=300,", "and plots output_name = Path(tarball_path).stem output_name = output_name[:output_name.rfind('.tar')]+'_SNOprocessed' output_path =", "numpy as np from astropy import units as u from", "np.zeros(len(times), dtype=np.int64) ending_index = np.zeros(len(times), dtype=np.int64) for i in range(len(tstart)):", "tstart tb = tend t = times dt = tb-ta", "t in enumerate(model_tend) if t >= tend[i]) else: starting_index =", "channel, integrated in a given time window (or time bin),", "the name of the corresponding class in :py:mod:`snewpy.models`. transformation_type :", "np.linspace(0, 100, 501) * MeV # 1MeV # Loop over", "filename = output_filename+extension else: model_file_root, _ = os.path.splitext(model_file) # strip", "a fluence file. This is done taking as input the", "returns a data table. Parameters ---------- SNOwGLoBESdir : str Path", "SNOwGLoBES, and compress the output into a tarfile. Parameters ----------", "given time window (or time bin), or in a snapshot", "osc_fluence[Flavor.NU_X]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E_BAR]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) s", "[detector_input] result = {} #Extracts data from tarfile and sets", "tarfile.open(tarball_path) as tar: tar.extractall(tempdir) flux_files = list(Path(tempdir).glob('*.dat')) if len(detector_input)>0: detector_input", "Flavor: osc_spectra[flavor] += temp_spectra[flavor]*(tb-model_tstart[ending_index[i]]) for flavor in Flavor: osc_spectra[flavor] /=", "t.drop(t_sel.columns, axis='columns',inplace=True) t[name]=t_agg #fill the column #return table with the", "= snmodel.get_time()[-1] try: if len(tstart/u.s) > 0: t0 = tstart[0]", "= '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E_BAR]) s = '{}{:17.8E}'.format(s,", "the table to have only channel column levels = list(table.columns.names)", "snmodel.get_transformed_spectra(t, energy, flavor_transformation) osc_fluence = {} table = [] table.append('#", "events detected as a function of energy for each interaction", "\"\"\"Generate fluence files in SNOwGLoBES format. This version will subsample", "= table.to_numpy().T index = table.index.to_numpy() data = np.concatenate([[index],data]) results[filename.name] =", "storage cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy' logging.info(f'Reading tables from {cache_file}')", "#save results to text files with open(filename,'w') as f: f.write(table.to_string(float_format='%23.15g'))", "channels. There are three basic steps to using SNOwGLoBES from", "of events detected as a function of energy for each", "transformation. Use dict to associate the transformation name with its", "energy = np.linspace(0, 100, 501) * MeV # 1MeV #", "previous step with the cross-sections for the interaction channels happening", "filename = output_filename+\"_\"+str(i)+extension else: filename = output_filename+extension else: model_file_root, _", "table.append('# TBinMid={:g}sec TBinWidth={:g}s EBinWidth=0.2MeV Fluence at Earth for this timebin", "> 1: ta = tstart[i] tb = tend[i] t =", "tar.extractall(tempdir) flux_files = list(Path(tempdir).glob('*.dat')) if len(detector_input)>0: detector_input = tqdm(detector_input, desc='Detectors',", "rates from a given input supernova neutrino flux. It supports", "= params for c in table.columns: if table[c].max() > 0.1:", "tarfile.open(output_path, \"w:gz\") as tar: for file in tempdir.iterdir(): tar.add(file,arcname=output_name+'/'+file.name) logging.info(f'Created", "= re.compile(r'nu(e|mu|tau)(bar|)_([A-Z][a-z]*)(\\d*)_?(.*)') def get_channel_label(c): mapp = {'nc':'NeutralCurrent', 'ibd':'Inverse Beta Decay',", "model time. Default to 30 time slices. tmin = snmodel.get_time()[0]", "as tar: for file in tempdir.iterdir(): tar.add(file,arcname=output_name+'/'+file.name) logging.info(f'Created archive: {output_path}')", "of events for each interaction channel in the detector. \"\"\"", "info = tarfile.TarInfo(name=filename) info.size = len(output) tf.addfile(info, io.BytesIO(output)) return os.path.join(model_dir,", "each detector and for each time bin). The output tables", "snmodel = model_class(model_path) # Subsample the model time. Default to", "interaction channel, integrated in a given time window (or time", "= len(output) tf.addfile(info, io.BytesIO(output)) return os.path.join(model_dir, tfname) def simulate(SNOwGLoBESdir, tarball_path,", "in enumerate(model_tend) if t >= tend[i]) else: starting_index = [next(j", "Energy (GeV)') plt.ylabel('Interaction Events') #read the results from storage cache_file", "and for each interaction channel. verbose : bool Whether to", "if nbin > 1: filename = output_filename+\"_\"+str(i)+extension else: filename =", "Beta Decay', 'e':r'${\\nu}_x+e^-$'} def gen_label(m): flv,bar,Nuc,num,res = m.groups() if flv!='e':", "by SNOwGLoBES, and compress the output into a tarfile. Parameters", "aNuE aNuMu aNuTau') # Generate energy + number flux table.", "'.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(t0, t1, nbin, d, extension) info = tarfile.TarInfo(name=filename) info.size =", "time bin). The output tables allow to build the detected", "mapp: return mapp[c] else: return re_chan_label.sub(gen_label, c) def collate(SNOwGLoBESdir, tarball_path,", "bool Remove the output files from SNOwGLoBES, collated files, and", "in the remaining columns the number of events for each", "{'NoTransformation': NoTransformation(), 'AdiabaticMSW_NMO': AdiabaticMSW(mh=MassHierarchy.NORMAL), 'AdiabaticMSW_IMO': AdiabaticMSW(mh=MassHierarchy.INVERTED), 'NonAdiabaticMSWH_NMO': NonAdiabaticMSWH(mh=MassHierarchy.NORMAL), 'NonAdiabaticMSWH_IMO': NonAdiabaticMSWH(mh=MassHierarchy.INVERTED),", "if deltat is not None: dt = deltat ntbins =", "#fill the column #return table with the original levels order", "Loop over sampled times. for i in range(nbin): if nbin", "model_tstart = model_times*1.0 model_tend = model_times*1.0 model_tstart[0] = model_times[0] for", "to {cache_file}') np.save(cache_file, result) return result re_chan_label = re.compile(r'nu(e|mu|tau)(bar|)_([A-Z][a-z]*)(\\d*)_?(.*)') def", "io.BytesIO(output)) MeV = 1.60218e-6 * u.erg energy = np.linspace(0, 100,", "old-style results, for backward compatibiity results = {} #save collated", "transformation_type : str Name of flavor transformation. See snewpy.flavor_transformation documentation", "or None Length of time slices. Returns ------- str Path", "for SNOwGLoBES:** There are two ways to do this, either", "not None: tfname = output_filename+'.tar.bz2' else: model_file_root, _ = os.path.splitext(model_file)", "= list(table.columns.names) levels.remove('channel') t = table.stack(levels) for name,pattern in patterns.items():", "for j, t in enumerate(model_tend) if t >= tend[i]) else:", "Flavor: osc_spectra[flavor] /= (tb-ta) osc_fluence = {} table = []", "energy/flux table and output to file in tar archive. output", "of time slices. Will be ignored if ``deltat`` is also", "len(output) tf.addfile(info, io.BytesIO(output)) return os.path.join(model_dir, tfname) def generate_fluence(model_path, model_type, transformation_type,", "the time series bins. Returns ------- str Path of compressed", "c in mapp: return mapp[c] else: return re_chan_label.sub(gen_label, c) def", "that gives information on parameters output = '\\n'.join(map(str, transformation_type)).encode('ascii') tf.addfile(tarfile.TarInfo(name='parameterinfo'),", "will subsample the times in a supernova model, produce energy", "c in table.columns: if table[c].max() > 0.1: plt.plot(table[c],drawstyle='steps',label=get_channel_label(c), lw=1) plt.xlim(right=0.10)", "If False, it gives as output the plot of the", "osc_spectra[flavor] *= (model_tend[starting_index[i]]-ta) #intermediate time bins of model in requested", "model_tend[len(model_times)-1] = model_times[-1] if nbin > 1: starting_index = np.zeros(len(times),", "np.zeros(len(times), dtype=np.int64) for i in range(len(tstart)): starting_index[i] = next(j for", "neutrinos per cm^2'.format(t, dt)) table.append('# E(GeV) NuE NuMu NuTau aNuE", "# strip extension (if present) filename = model_file_root + '.tbin{:01d}.'.format(i+1)", "tar archive. output = '\\n'.join(table).encode('ascii') extension = \".dat\" model_file_root, _", "= os.path.split(os.path.abspath(model_path)) snmodel = model_class(model_path) #set the timings up #default", "ntbins, d) + 'kpc.tar.bz2' with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2') as tf:", "gen_label(m): flv,bar,Nuc,num,res = m.groups() if flv!='e': flv='\\\\'+flv if bar: bar='\\\\'+bar", "slices. tmin = snmodel.get_time()[0] tmax = snmodel.get_time()[-1] if deltat is", "i, t in enumerate(times): osc_spectra = snmodel.get_transformed_spectra(t, energy, flavor_transformation) osc_fluence", "spectrum and neutrino time distribution, for each reaction channel or", "this timebin in neutrinos per cm^2'.format(t, dt)) table.append('# E(GeV) NuE", "neutrino detectors, detector materials and interaction channels. There are three", "input the supernova simulation model. The first will evaluate the", "supernova neutrino flux. It supports many different neutrino detectors, detector", "e.g. for debugging. remove_generated_files: bool Remove the output files from", "a data table. Parameters ---------- SNOwGLoBESdir : str Path to", "= snmodel.get_time()[0] tend = snmodel.get_time()[-1] try: if len(tstart/u.s) > 0:", "0.5), loc='best', borderaxespad=0) # formats complete graph smear_title = 'Interaction'", "tend t = times dt = tb-ta #first time bin", "supernova script inside SNOwGLoBES, which outputs calculated event rates expected", "files for SNOwGLoBES:** There are two ways to do this,", "the interaction channels happening in various detectors supported by SNOwGLoBES.", "model in requested interval for j in range(starting_index[i]+1, ending_index[i], 1):", "tf.addfile(info, io.BytesIO(output)) return os.path.join(model_dir, tfname) def simulate(SNOwGLoBESdir, tarball_path, detector_input=\"all\", verbose=False):", "= tempdir/f'{filename_base}_log_plot.png' plt.savefig(filename.with_suffix('.png'), dpi=300, bbox_inches='tight') #Make a tarfile with the", "for flavor in Flavor: osc_spectra[flavor] *= (model_tend[starting_index[i]]-ta) #intermediate time bins", "``deltat`` is also given. deltat : astropy.Quantity or None Length", ".tar file produced e.g. by ``generate_time_series()`` or ``generate_fluence()``. detector_input :", "osc_spectra[flavor] /= (tb-ta) osc_fluence = {} table = [] table.append('#", "account the effective mass of the detector as well as", "enumerate(model_tend) if t > tstart[i]) ending_index[i] = next(j for j,", "dt = deltat ntbins = int((tmax-tmin)/dt) else: dt = (tmax", "channels t.drop(t_sel.columns, axis='columns',inplace=True) t[name]=t_agg #fill the column #return table with", "= '\\n'.join(map(str, transformation_type)).encode('ascii') tf.addfile(tarfile.TarInfo(name='parameterinfo'), io.BytesIO(output)) MeV = 1.60218e-6 * u.erg", "c) def collate(SNOwGLoBESdir, tarball_path, detector_input=\"all\", skip_plots=False, verbose=False, remove_generated_files=True): \"\"\"Collates SNOwGLoBES", "Fluence at Earth for this timebin in neutrinos per cm^2'.format(t,", "gives the number of events detected as a function of", "detector. \"\"\" def aggregate_channels(table, **patterns): #rearrange the table to have", "if table[c].max() > 0.1: plt.plot(table[c],drawstyle='steps',label=get_channel_label(c), lw=1) plt.xlim(right=0.10) plt.ylim(bottom=0.10) plt.yscale('log') plt.legend(bbox_to_anchor=(0.5,", "effective mass of the detector as well as a smearing", "energy = np.linspace(0, 100, 501) * MeV # Loop over", "str or None Name of output file. If ``None``, will", "if output_filename is not None: if nbin > 1: filename", "results for the output header = 'Energy '+' '.join(list(table.columns)) data", "use with TemporaryDirectory(prefix='snowglobes') as tempdir: with tarfile.open(tarball_path) as tar: tar.extractall(tempdir)", "if nbin > 1: starting_index = np.zeros(len(times), dtype=np.int64) ending_index =", "materials and interaction channels. There are three basic steps to", "values. d : int or float Distance to supernova in", "Default to 30 time slices. tmin = snmodel.get_time()[0] tmax =", "= '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s = '{}{:17.8E}'.format(s,", "for f in flux_files),res)) # save result to file for", "model_file = os.path.split(os.path.abspath(model_path)) snmodel = model_class(model_path) # Subsample the model", "This step puts together all the interaction channels and time", "or float Distance to supernova in kpc. output_filename : str", "evaluated by SNOwGLoBES in a single file (for each detector", "'all': detector_input = list(sng.detectors) detector_input.remove('d2O') elif isinstance(detector_input,str): detector_input = [detector_input]", "(fluence) in the time bin. The result is a compressed", "detector materials and interaction channels. There are three basic steps", "if len(tstart/u.s) > 0: t0 = tstart[0] t1 = tend[-1]", "None Length of time slices. Returns ------- str Path of", "temp_spectra = snmodel.get_transformed_spectra( model_times[ending_index[i]], energy, flavor_transformation) for flavor in Flavor:", "plt.plot(table[c],drawstyle='steps',label=get_channel_label(c), lw=1) plt.xlim(right=0.10) plt.ylim(bottom=0.10) plt.yscale('log') plt.legend(bbox_to_anchor=(0.5, 0.5, 0.5, 0.5), loc='best',", "per time bin; each table contains in the first column", "the integrated neutrino flux (fluence) in the time bin. The", "model_class(model_path) #set the timings up #default if inputs are None:", "else: ta = tstart tb = tend t = times", "{det.capitalize()} {weighted.capitalize()} {smear_title} Events') if smeared=='smeared': plt.xlabel('Detected Energy (GeV)') plt.ylabel('Events')", "collate(SNOwGLoBESdir, tarball_path, detector_input=\"all\", skip_plots=False, verbose=False, remove_generated_files=True): \"\"\"Collates SNOwGLoBES output files", "for each interaction channel. Parameters ---------- SNOwGLoBESdir : str Path", "'\\n'.join(table).encode('ascii') extension = \".dat\" model_file_root, _ = os.path.splitext(model_file) filename =", "by SNOwGLoBES. It takes into account the effective mass of", "outputs calculated event rates expected for a given (set of)", "tables: results[det] = {} for flux,t in tables[det].items(): t =", "= '\\n'.join(table).encode('ascii') extension = \".dat\" if output_filename is not None:", "a function of energy for each interaction channel, integrated in", "None: if nbin > 1: filename = output_filename+\"_\"+str(i)+extension else: filename", "(if present) tfname = model_file_root + '.' + transformation_type +", "snmodel.get_time() model_tstart = model_times*1.0 model_tend = model_times*1.0 model_tstart[0] = model_times[0]", "+ \\ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(tmin/u.s, tmax/u.s, ntbins, d, extension) info = tarfile.TarInfo(name=filename)", "file containing neutrino flux information from supernova model. model_type :", "name of the corresponding class in :py:mod:`snewpy.models`. transformation_type : str", "= t_sel.sum(axis='columns') #drop processed channels t.drop(t_sel.columns, axis='columns',inplace=True) t[name]=t_agg #fill the", "files, and .png's made for this snewpy run. Returns -------", "snmodel.get_time()[0] tmax = snmodel.get_time()[-1] if deltat is not None: dt", "(4.*np.pi*(d*1000*3.086e+18)**2) s = '{:17.8E}'.format(E/(1e3 * MeV)) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E])", "dt)) table.append('# E(GeV) NuE NuMu NuTau aNuE aNuMu aNuTau') #", "* **Running SNOwGLoBES:** This step convolves the fluence generated in", "axis='columns',inplace=True) t[name]=t_agg #fill the column #return table with the original", "#last time bin of model in requested interval temp_spectra =", "flux information from supernova model. model_type : str Format of", "many different neutrino detectors, detector materials and interaction channels. There", "various detectors supported by SNOwGLoBES. It takes into account the", "model_tend[i-1] = model_tstart[i] model_tend[len(model_times)-1] = model_times[-1] if nbin > 1:", "sampled times. for i in range(nbin): if nbin > 1:", "Whether to generate verbose output, e.g. for debugging. remove_generated_files: bool", "+ transformation_type + \\ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(t0, t1, nbin, d, extension) info", "= next(j for j, t in enumerate(model_tend) if t >=", "io.BytesIO(output)) return os.path.join(model_dir, tfname) def simulate(SNOwGLoBESdir, tarball_path, detector_input=\"all\", verbose=False): \"\"\"Takes", "See snewpy.flavor_transformation documentation for possible values. d : int or", "1: starting_index = np.zeros(len(times), dtype=np.int64) ending_index = np.zeros(len(times), dtype=np.int64) for", "transformation name with its class. flavor_transformation_dict = {'NoTransformation': NoTransformation(), 'AdiabaticMSW_NMO':", "Generate output. if output_filename is not None: tfname = output_filename", "'{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) table.append(s) logging.debug(s) # Encode", "paths and fluxfilenames for later use with TemporaryDirectory(prefix='snowglobes') as tempdir:", "input file name. tstart : astropy.Quantity or None Start of", "tarfile.open(os.path.join(model_dir, tfname), 'w:bz2') as tf: #creates file in tar archive", "be based on input file name. tstart : astropy.Quantity or", "results to text files with open(filename,'w') as f: f.write(table.to_string(float_format='%23.15g')) #format", "allow_pickle=True).tolist() #This output is similar to what produced by: #tables", ": str Name of flavor transformation. See snewpy.flavor_transformation documentation for", "borderaxespad=0) # formats complete graph smear_title = 'Interaction' if smeared=='unsmeared'", "and sets up lists of paths and fluxfilenames for later", "for the output header = 'Energy '+' '.join(list(table.columns)) data =", "strip extension (if present) filename = model_file_root + '.tbin{:01d}.'.format(i+1) +", "s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E_BAR]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) s =", "open(filename,'w') as f: f.write(table.to_string(float_format='%23.15g')) #format the results for the output", "AdiabaticMSW(mh=MassHierarchy.NORMAL), 'AdiabaticMSW_IMO': AdiabaticMSW(mh=MassHierarchy.INVERTED), 'NonAdiabaticMSWH_NMO': NonAdiabaticMSWH(mh=MassHierarchy.NORMAL), 'NonAdiabaticMSWH_IMO': NonAdiabaticMSWH(mh=MassHierarchy.INVERTED), 'TwoFlavorDecoherence': TwoFlavorDecoherence(), 'ThreeFlavorDecoherence':", "time distribution, for each reaction channel or the sum of", "is not None: if nbin > 1: filename = output_filename+\"_\"+str(i)+extension", "be based on input file name. ntbins : int Number", "plot of the energy distribution for each time bin and", "def aggregate_channels(table, **patterns): #rearrange the table to have only channel", "gives information on parameters output = '\\n'.join(map(str, transformation_type)).encode('ascii') tf.addfile(tarfile.TarInfo(name='parameterinfo'), io.BytesIO(output))", "time bins of model in requested interval for j in", "energy spectrum and neutrino time distribution, for each reaction channel", "number flux table. for j, E in enumerate(energy): for flavor", "model_file_root, _ = os.path.splitext(model_file) # strip extension (if present) tfname", "function of energy for each interaction channel, integrated in a", "to directory where SNOwGLoBES is installed. tarball_path : str Path", "detector_input = [detector_input] result = {} #Extracts data from tarfile", "full time window of the model if tstart is None:", "tfname = output_filename + 'kpc.tar.bz2' else: model_file_root, _ = os.path.splitext(model_file)", "> 1: filename = output_filename+\"_\"+str(i)+extension else: filename = output_filename+extension else:", "tf: #creates file in tar archive that gives information on", "flux_files),res)) # save result to file for re-use in collate()", "tmin = snmodel.get_time()[0] tmax = snmodel.get_time()[-1] if deltat is not", "---------- SNOwGLoBESdir : str Path to directory where SNOwGLoBES is", "in tables: results[det] = {} for flux,t in tables[det].items(): t", "import Path from tempfile import TemporaryDirectory import matplotlib as mpl", "> 1: starting_index = np.zeros(len(times), dtype=np.int64) ending_index = np.zeros(len(times), dtype=np.int64)", "* **Collating SNOwGLoBES outputs:** This step puts together all the", "the neutrino flux at each time step, the latter will", "as well as a smearing matrix describing the energy-dependent detection", "debugging. \"\"\" sng = SNOwGLoBES(SNOwGLoBESdir) if detector_input == 'all': detector_input", "of the time series bins. tend : astropy.Quantity or None", "'AdiabaticMSW_NMO': AdiabaticMSW(mh=MassHierarchy.NORMAL), 'AdiabaticMSW_IMO': AdiabaticMSW(mh=MassHierarchy.INVERTED), 'NonAdiabaticMSWH_NMO': NonAdiabaticMSWH(mh=MassHierarchy.NORMAL), 'NonAdiabaticMSWH_IMO': NonAdiabaticMSWH(mh=MassHierarchy.INVERTED), 'TwoFlavorDecoherence': TwoFlavorDecoherence()," ]
[ "torch.no_grad(): next_q = torch.max(target_network.forward(next_states), 1)[0].unsqueeze(1) n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step", "nn.Module, next_z: torch.Tensor, target_z: torch.Tensor, offset: torch.Tensor, ) -> torch.Tensor:", "dist_projection( self, network: nn.Module, next_z: torch.Tensor, target_z: torch.Tensor, offset: torch.Tensor,", "0, (ub + offset).view(-1), (next_z * (b - lb.float())).view(-1) )", "(distance.detach() < 0).float() ).abs() * self.huber_loss(distance) element_wise_loss = torch.mean(quantile_huber_loss, dim=1,", "...], data: Tuple[torch.Tensor, ...] ) -> Tuple[torch.Tensor, ...]: network, target_network", "__init__(self, hyper_params: DictConfig, use_cuda: bool): Loss.__init__(self, hyper_params, use_cuda) def __call__(", "def huber_loss(x: List[torch.Tensor], k: float = 1.0): return torch.where(x.abs() <=", "proj_dist.view(-1).index_add_( 0, (ub + offset).view(-1), (next_z * (b - lb.float())).view(-1)", "b.floor().long() ub = b.ceil().long() proj_dist = torch.zeros(next_z.size()) if self.use_cuda: proj_dist", "= self.hyper_params.gamma ** self.hyper_params.n_step target_z = rewards + (1 -", "next_z, target_z, offset) log_dist = torch.log(z_dists) element_wise_loss = -(target_proj *", "next_states, dones = data z_dists = network.forward(states) z_dists = z_dists[list(range(states.size(0))),", "(1 - dones) * n_step_gamma * network.support target_z = torch.clamp(target_z,", "network.num_atoms, batch_size) .long() .unsqueeze(1) .expand(batch_size, network.num_atoms) ) if self.use_cuda: offset", "next_actions] n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step target_z = rewards +", "b = (target_z - network.v_min) / network.delta_z lb = b.floor().long()", "rewards + (1 - dones) * n_step_gamma * next_q element_wise_loss", "self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...], ) -> Tuple[torch.Tensor,", "self.huber_loss(distance) element_wise_loss = torch.mean(quantile_huber_loss, dim=1, keepdim=True) return element_wise_loss @staticmethod def", "networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...] ) -> Tuple[torch.Tensor, ...]:", "return element_wise_loss def dist_projection( self, network: nn.Module, next_z: torch.Tensor, target_z:", "log_dist).sum(1) return element_wise_loss def dist_projection( self, network: nn.Module, next_z: torch.Tensor,", "Tuple[torch.Tensor, ...] ) -> Tuple[torch.Tensor, ...]: network, target_network = networks", "dim=1, keepdim=True) return element_wise_loss @staticmethod def huber_loss(x: List[torch.Tensor], k: float", "next_z = target_network.forward(next_states) next_actions = torch.max(next_z.mean(2), dim=1)[1] next_z = next_z[list(range(states.size(0))),", "(next_z * (ub.float() - b)).view(-1) ) proj_dist.view(-1).index_add_( 0, (ub +", "quantile_huber_loss = ( network.tau - (distance.detach() < 0).float() ).abs() *", "( torch.linspace(0, (batch_size - 1) * network.num_atoms, batch_size) .long() .unsqueeze(1)", "= next_z[list(range(states.size(0))), next_actions] n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step target_z =", "hyper_params: DictConfig, use_cuda: bool): Loss.__init__(self, hyper_params, use_cuda) def __call__( self,", "double DQN loss\"\"\" def __init__(self, hyper_params: DictConfig, use_cuda: bool): Loss.__init__(self,", "<= k, 0.5 * x.pow(2), k * (x.abs() - 0.5", "next_states, dones = data q_value = network.forward(states).gather(1, actions) with torch.no_grad():", "q_value, target_q.detach(), reduction=\"none\" ) return element_wise_loss class QRLoss(Loss): \"\"\"Compute quantile", "rewards, next_states, dones = data z_dists = network.forward(states) z_dists =", "0.5 * k)) class CategoricalLoss(Loss): \"\"\"Compute C51 loss\"\"\" def __init__(self,", "-> Tuple[torch.Tensor, ...]: network, target_network = networks states, actions, rewards,", "self.dist_projection(network, next_z, target_z, offset) log_dist = torch.log(z_dists) element_wise_loss = -(target_proj", "target_z, offset) log_dist = torch.log(z_dists) element_wise_loss = -(target_proj * log_dist).sum(1)", "networks states, actions, rewards, next_states, dones = data q_value =", "(target_z - network.v_min) / network.delta_z lb = b.floor().long() ub =", "self.use_cuda: offset = offset.cuda() z_dists = network.forward(states) z_dists = z_dists[list(range(states.size(0))),", "(1 - dones) * n_step_gamma * next_z distance = target_z", "n_step_gamma * next_z distance = target_z - z_dists quantile_huber_loss =", "+ (1 - dones) * n_step_gamma * next_q element_wise_loss =", "= states.size(0) offset = ( torch.linspace(0, (batch_size - 1) *", "target_network = networks states, actions, rewards, next_states, dones = data", "from rlcycle.common.abstract.loss import Loss class DQNLoss(Loss): \"\"\"Compute double DQN loss\"\"\"", "* n_step_gamma * next_q element_wise_loss = F.smooth_l1_loss( q_value, target_q.detach(), reduction=\"none\"", "quantile regression loss\"\"\" def __init__(self, hyper_params: DictConfig, use_cuda: bool): Loss.__init__(self,", "self, network: nn.Module, next_z: torch.Tensor, target_z: torch.Tensor, offset: torch.Tensor, )", "float = 1.0): return torch.where(x.abs() <= k, 0.5 * x.pow(2),", "import torch import torch.nn as nn import torch.nn.functional as F", "+ (1 - dones) * n_step_gamma * next_z distance =", "torch.no_grad(): next_z = target_network.forward(next_states) next_actions = torch.max(next_z.mean(2), dim=1)[1] next_z =", "networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...], ) -> Tuple[torch.Tensor, ...]:", "QRLoss(Loss): \"\"\"Compute quantile regression loss\"\"\" def __init__(self, hyper_params: DictConfig, use_cuda:", "\"\"\"Compute C51 loss\"\"\" def __init__(self, hyper_params: DictConfig, use_cuda: bool): Loss.__init__(self,", "* self.huber_loss(distance) element_wise_loss = torch.mean(quantile_huber_loss, dim=1, keepdim=True) return element_wise_loss @staticmethod", "DictConfig, use_cuda: bool): Loss.__init__(self, hyper_params, use_cuda) def __call__( self, networks:", ") if self.use_cuda: offset = offset.cuda() z_dists = network.forward(states) z_dists", "* (x.abs() - 0.5 * k)) class CategoricalLoss(Loss): \"\"\"Compute C51", "target_q = rewards + (1 - dones) * n_step_gamma *", "torch.clamp(target_z, min=network.v_min, max=network.v_max) target_proj = self.dist_projection(network, next_z, target_z, offset) log_dist", "- dones) * n_step_gamma * network.support target_z = torch.clamp(target_z, min=network.v_min,", "Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...] ) -> Tuple[torch.Tensor, ...]: network,", "torch.Tensor, ) -> torch.Tensor: b = (target_z - network.v_min) /", "n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step target_q = rewards + (1", "use_cuda) def __call__( self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...]", "1) * network.num_atoms, batch_size) .long() .unsqueeze(1) .expand(batch_size, network.num_atoms) ) if", "* network.support target_z = torch.clamp(target_z, min=network.v_min, max=network.v_max) target_proj = self.dist_projection(network,", "data z_dists = network.forward(states) z_dists = z_dists[list(range(states.size(0))), actions.view(-1)] with torch.no_grad():", "* log_dist).sum(1) return element_wise_loss def dist_projection( self, network: nn.Module, next_z:", "(1 - dones) * n_step_gamma * next_q element_wise_loss = F.smooth_l1_loss(", "= networks states, actions, rewards, next_states, dones = data batch_size", "torch.zeros(next_z.size()) if self.use_cuda: proj_dist = proj_dist.cuda() proj_dist.view(-1).index_add_( 0, (lb +", "-> torch.Tensor: b = (target_z - network.v_min) / network.delta_z lb", "Tuple[torch.Tensor, ...]: network, target_network = networks states, actions, rewards, next_states,", "ub = b.ceil().long() proj_dist = torch.zeros(next_z.size()) if self.use_cuda: proj_dist =", "z_dists = network.forward(states) z_dists = z_dists[list(range(states.size(0))), actions.view(-1)] with torch.no_grad(): next_z", "z_dists = z_dists[list(range(states.size(0))), actions.view(-1)] with torch.no_grad(): next_z = target_network.forward(next_states) next_actions", "if self.use_cuda: offset = offset.cuda() z_dists = network.forward(states) z_dists =", "dones) * n_step_gamma * next_q element_wise_loss = F.smooth_l1_loss( q_value, target_q.detach(),", "- network.v_min) / network.delta_z lb = b.floor().long() ub = b.ceil().long()", "torch.Tensor, target_z: torch.Tensor, offset: torch.Tensor, ) -> torch.Tensor: b =", "torch.where(x.abs() <= k, 0.5 * x.pow(2), k * (x.abs() -", "+ offset).view(-1), (next_z * (b - lb.float())).view(-1) ) return proj_dist", "import torch.nn as nn import torch.nn.functional as F from rlcycle.common.abstract.loss", "states, actions, rewards, next_states, dones = data batch_size = states.size(0)", "regression loss\"\"\" def __init__(self, hyper_params: DictConfig, use_cuda: bool): Loss.__init__(self, hyper_params,", "target_q.detach(), reduction=\"none\" ) return element_wise_loss class QRLoss(Loss): \"\"\"Compute quantile regression", "\"\"\"Compute double DQN loss\"\"\" def __init__(self, hyper_params: DictConfig, use_cuda: bool):", "batch_size = states.size(0) offset = ( torch.linspace(0, (batch_size - 1)", "self.hyper_params.n_step target_z = rewards + (1 - dones) * n_step_gamma", "class CategoricalLoss(Loss): \"\"\"Compute C51 loss\"\"\" def __init__(self, hyper_params: DictConfig, use_cuda:", "with torch.no_grad(): next_q = torch.max(target_network.forward(next_states), 1)[0].unsqueeze(1) n_step_gamma = self.hyper_params.gamma **", "= data q_value = network.forward(states).gather(1, actions) with torch.no_grad(): next_q =", "= self.dist_projection(network, next_z, target_z, offset) log_dist = torch.log(z_dists) element_wise_loss =", "keepdim=True) return element_wise_loss @staticmethod def huber_loss(x: List[torch.Tensor], k: float =", "class QRLoss(Loss): \"\"\"Compute quantile regression loss\"\"\" def __init__(self, hyper_params: DictConfig,", "F.smooth_l1_loss( q_value, target_q.detach(), reduction=\"none\" ) return element_wise_loss class QRLoss(Loss): \"\"\"Compute", "element_wise_loss @staticmethod def huber_loss(x: List[torch.Tensor], k: float = 1.0): return", "data q_value = network.forward(states).gather(1, actions) with torch.no_grad(): next_q = torch.max(target_network.forward(next_states),", "(batch_size - 1) * network.num_atoms, batch_size) .long() .unsqueeze(1) .expand(batch_size, network.num_atoms)", "hyper_params, use_cuda) def __call__( self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor,", "** self.hyper_params.n_step target_z = rewards + (1 - dones) *", "proj_dist = proj_dist.cuda() proj_dist.view(-1).index_add_( 0, (lb + offset).view(-1), (next_z *", "b.ceil().long() proj_dist = torch.zeros(next_z.size()) if self.use_cuda: proj_dist = proj_dist.cuda() proj_dist.view(-1).index_add_(", "Loss class DQNLoss(Loss): \"\"\"Compute double DQN loss\"\"\" def __init__(self, hyper_params:", "* next_z distance = target_z - z_dists quantile_huber_loss = (", "next_z distance = target_z - z_dists quantile_huber_loss = ( network.tau", "states.size(0) offset = ( torch.linspace(0, (batch_size - 1) * network.num_atoms,", "* next_q element_wise_loss = F.smooth_l1_loss( q_value, target_q.detach(), reduction=\"none\" ) return", "n_step_gamma * next_q element_wise_loss = F.smooth_l1_loss( q_value, target_q.detach(), reduction=\"none\" )", "q_value = network.forward(states).gather(1, actions) with torch.no_grad(): next_q = torch.max(target_network.forward(next_states), 1)[0].unsqueeze(1)", "+ offset).view(-1), (next_z * (ub.float() - b)).view(-1) ) proj_dist.view(-1).index_add_( 0,", "from typing import List, Tuple from omegaconf import DictConfig import", "= torch.max(next_z.mean(2), dim=1)[1] next_z = next_z[list(range(states.size(0))), next_actions] n_step_gamma = self.hyper_params.gamma", "next_states, dones = data batch_size = states.size(0) offset = (", "= target_z - z_dists quantile_huber_loss = ( network.tau - (distance.detach()", "loss\"\"\" def __init__(self, hyper_params: DictConfig, use_cuda: bool): Loss.__init__(self, hyper_params, use_cuda)", "k)) class CategoricalLoss(Loss): \"\"\"Compute C51 loss\"\"\" def __init__(self, hyper_params: DictConfig,", "= torch.clamp(target_z, min=network.v_min, max=network.v_max) target_proj = self.dist_projection(network, next_z, target_z, offset)", "1.0): return torch.where(x.abs() <= k, 0.5 * x.pow(2), k *", "dones = data q_value = network.forward(states).gather(1, actions) with torch.no_grad(): next_q", "= torch.mean(quantile_huber_loss, dim=1, keepdim=True) return element_wise_loss @staticmethod def huber_loss(x: List[torch.Tensor],", "__call__( self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...] ) ->", "0).float() ).abs() * self.huber_loss(distance) element_wise_loss = torch.mean(quantile_huber_loss, dim=1, keepdim=True) return", "x.pow(2), k * (x.abs() - 0.5 * k)) class CategoricalLoss(Loss):", "* n_step_gamma * network.support target_z = torch.clamp(target_z, min=network.v_min, max=network.v_max) target_proj", "= b.floor().long() ub = b.ceil().long() proj_dist = torch.zeros(next_z.size()) if self.use_cuda:", "bool): Loss.__init__(self, hyper_params, use_cuda) def __call__( self, networks: Tuple[nn.Module, ...],", "* x.pow(2), k * (x.abs() - 0.5 * k)) class", "DictConfig import torch import torch.nn as nn import torch.nn.functional as", "0.5 * x.pow(2), k * (x.abs() - 0.5 * k))", "__call__( self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...], ) ->", "List, Tuple from omegaconf import DictConfig import torch import torch.nn", "= network.forward(states).gather(1, actions) with torch.no_grad(): next_q = torch.max(target_network.forward(next_states), 1)[0].unsqueeze(1) n_step_gamma", "from omegaconf import DictConfig import torch import torch.nn as nn", "network, target_network = networks states, actions, rewards, next_states, dones =", "...], ) -> Tuple[torch.Tensor, ...]: network, target_network = networks states,", "CategoricalLoss(Loss): \"\"\"Compute C51 loss\"\"\" def __init__(self, hyper_params: DictConfig, use_cuda: bool):", "0, (lb + offset).view(-1), (next_z * (ub.float() - b)).view(-1) )", ".unsqueeze(1) .expand(batch_size, network.num_atoms) ) if self.use_cuda: offset = offset.cuda() z_dists", "= self.hyper_params.gamma ** self.hyper_params.n_step target_q = rewards + (1 -", "= ( network.tau - (distance.detach() < 0).float() ).abs() * self.huber_loss(distance)", "- dones) * n_step_gamma * next_z distance = target_z -", "n_step_gamma * network.support target_z = torch.clamp(target_z, min=network.v_min, max=network.v_max) target_proj =", "* n_step_gamma * next_z distance = target_z - z_dists quantile_huber_loss", "element_wise_loss = torch.mean(quantile_huber_loss, dim=1, keepdim=True) return element_wise_loss @staticmethod def huber_loss(x:", "= torch.zeros(next_z.size()) if self.use_cuda: proj_dist = proj_dist.cuda() proj_dist.view(-1).index_add_( 0, (lb", "(ub + offset).view(-1), (next_z * (b - lb.float())).view(-1) ) return", "= networks states, actions, rewards, next_states, dones = data q_value", ") -> torch.Tensor: b = (target_z - network.v_min) / network.delta_z", ").abs() * self.huber_loss(distance) element_wise_loss = torch.mean(quantile_huber_loss, dim=1, keepdim=True) return element_wise_loss", "import torch.nn.functional as F from rlcycle.common.abstract.loss import Loss class DQNLoss(Loss):", "reduction=\"none\" ) return element_wise_loss class QRLoss(Loss): \"\"\"Compute quantile regression loss\"\"\"", "- z_dists quantile_huber_loss = ( network.tau - (distance.detach() < 0).float()", "torch.Tensor: b = (target_z - network.v_min) / network.delta_z lb =", "- (distance.detach() < 0).float() ).abs() * self.huber_loss(distance) element_wise_loss = torch.mean(quantile_huber_loss,", "dones = data batch_size = states.size(0) offset = ( torch.linspace(0,", "(lb + offset).view(-1), (next_z * (ub.float() - b)).view(-1) ) proj_dist.view(-1).index_add_(", "** self.hyper_params.n_step target_q = rewards + (1 - dones) *", ") -> Tuple[torch.Tensor, ...]: network, target_network = networks states, actions,", "offset) log_dist = torch.log(z_dists) element_wise_loss = -(target_proj * log_dist).sum(1) return", "proj_dist.view(-1).index_add_( 0, (lb + offset).view(-1), (next_z * (ub.float() - b)).view(-1)", "self.hyper_params.gamma ** self.hyper_params.n_step target_z = rewards + (1 - dones)", "self.use_cuda: proj_dist = proj_dist.cuda() proj_dist.view(-1).index_add_( 0, (lb + offset).view(-1), (next_z", "actions, rewards, next_states, dones = data q_value = network.forward(states).gather(1, actions)", "import Loss class DQNLoss(Loss): \"\"\"Compute double DQN loss\"\"\" def __init__(self,", "target_z = rewards + (1 - dones) * n_step_gamma *", "DQNLoss(Loss): \"\"\"Compute double DQN loss\"\"\" def __init__(self, hyper_params: DictConfig, use_cuda:", "k: float = 1.0): return torch.where(x.abs() <= k, 0.5 *", "* k)) class CategoricalLoss(Loss): \"\"\"Compute C51 loss\"\"\" def __init__(self, hyper_params:", "states, actions, rewards, next_states, dones = data z_dists = network.forward(states)", "with torch.no_grad(): next_z = target_network.forward(next_states) next_actions = torch.max(next_z.mean(2), dim=1)[1] next_z", "= (target_z - network.v_min) / network.delta_z lb = b.floor().long() ub", "next_z[list(range(states.size(0))), next_actions] n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step target_z = rewards", "huber_loss(x: List[torch.Tensor], k: float = 1.0): return torch.where(x.abs() <= k,", "= b.ceil().long() proj_dist = torch.zeros(next_z.size()) if self.use_cuda: proj_dist = proj_dist.cuda()", "data batch_size = states.size(0) offset = ( torch.linspace(0, (batch_size -", "import List, Tuple from omegaconf import DictConfig import torch import", "= networks states, actions, rewards, next_states, dones = data z_dists", "= data z_dists = network.forward(states) z_dists = z_dists[list(range(states.size(0))), actions.view(-1)] with", "self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...] ) -> Tuple[torch.Tensor,", "- 1) * network.num_atoms, batch_size) .long() .unsqueeze(1) .expand(batch_size, network.num_atoms) )", "Tuple[torch.Tensor, ...], ) -> Tuple[torch.Tensor, ...]: network, target_network = networks", "k, 0.5 * x.pow(2), k * (x.abs() - 0.5 *", "= proj_dist.cuda() proj_dist.view(-1).index_add_( 0, (lb + offset).view(-1), (next_z * (ub.float()", "dones) * n_step_gamma * network.support target_z = torch.clamp(target_z, min=network.v_min, max=network.v_max)", "as F from rlcycle.common.abstract.loss import Loss class DQNLoss(Loss): \"\"\"Compute double", "* network.num_atoms, batch_size) .long() .unsqueeze(1) .expand(batch_size, network.num_atoms) ) if self.use_cuda:", "networks states, actions, rewards, next_states, dones = data z_dists =", "target_proj = self.dist_projection(network, next_z, target_z, offset) log_dist = torch.log(z_dists) element_wise_loss", "- 0.5 * k)) class CategoricalLoss(Loss): \"\"\"Compute C51 loss\"\"\" def", "actions) with torch.no_grad(): next_q = torch.max(target_network.forward(next_states), 1)[0].unsqueeze(1) n_step_gamma = self.hyper_params.gamma", "= rewards + (1 - dones) * n_step_gamma * next_z", "z_dists[list(range(states.size(0))), actions.view(-1)] with torch.no_grad(): next_z = target_network.forward(next_states) next_actions = torch.max(next_z.mean(2),", "= data batch_size = states.size(0) offset = ( torch.linspace(0, (batch_size", "min=network.v_min, max=network.v_max) target_proj = self.dist_projection(network, next_z, target_z, offset) log_dist =", ") return element_wise_loss class QRLoss(Loss): \"\"\"Compute quantile regression loss\"\"\" def", "= target_network.forward(next_states) next_actions = torch.max(next_z.mean(2), dim=1)[1] next_z = next_z[list(range(states.size(0))), next_actions]", "= 1.0): return torch.where(x.abs() <= k, 0.5 * x.pow(2), k", "/ network.delta_z lb = b.floor().long() ub = b.ceil().long() proj_dist =", "self.hyper_params.n_step target_q = rewards + (1 - dones) * n_step_gamma", "z_dists quantile_huber_loss = ( network.tau - (distance.detach() < 0).float() ).abs()", "network.forward(states) z_dists = z_dists[list(range(states.size(0))), actions.view(-1)] with torch.no_grad(): next_z = target_network.forward(next_states)", "\"\"\"Compute quantile regression loss\"\"\" def __init__(self, hyper_params: DictConfig, use_cuda: bool):", "Loss.__init__(self, hyper_params, use_cuda) def __call__( self, networks: Tuple[nn.Module, ...], data:", "...], data: Tuple[torch.Tensor, ...], ) -> Tuple[torch.Tensor, ...]: network, target_network", "torch.nn.functional as F from rlcycle.common.abstract.loss import Loss class DQNLoss(Loss): \"\"\"Compute", "batch_size) .long() .unsqueeze(1) .expand(batch_size, network.num_atoms) ) if self.use_cuda: offset =", "rewards, next_states, dones = data batch_size = states.size(0) offset =", "Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...], ) -> Tuple[torch.Tensor, ...]: network,", "torch.max(next_z.mean(2), dim=1)[1] next_z = next_z[list(range(states.size(0))), next_actions] n_step_gamma = self.hyper_params.gamma **", "(x.abs() - 0.5 * k)) class CategoricalLoss(Loss): \"\"\"Compute C51 loss\"\"\"", "torch.nn as nn import torch.nn.functional as F from rlcycle.common.abstract.loss import", ".long() .unsqueeze(1) .expand(batch_size, network.num_atoms) ) if self.use_cuda: offset = offset.cuda()", "offset: torch.Tensor, ) -> torch.Tensor: b = (target_z - network.v_min)", "= z_dists[list(range(states.size(0))), actions.view(-1)] with torch.no_grad(): next_z = target_network.forward(next_states) next_actions =", "target_z = torch.clamp(target_z, min=network.v_min, max=network.v_max) target_proj = self.dist_projection(network, next_z, target_z,", "lb = b.floor().long() ub = b.ceil().long() proj_dist = torch.zeros(next_z.size()) if", "target_network.forward(next_states) next_actions = torch.max(next_z.mean(2), dim=1)[1] next_z = next_z[list(range(states.size(0))), next_actions] n_step_gamma", "return element_wise_loss @staticmethod def huber_loss(x: List[torch.Tensor], k: float = 1.0):", "network.tau - (distance.detach() < 0).float() ).abs() * self.huber_loss(distance) element_wise_loss =", "target_z - z_dists quantile_huber_loss = ( network.tau - (distance.detach() <", "@staticmethod def huber_loss(x: List[torch.Tensor], k: float = 1.0): return torch.where(x.abs()", "def dist_projection( self, network: nn.Module, next_z: torch.Tensor, target_z: torch.Tensor, offset:", "actions, rewards, next_states, dones = data batch_size = states.size(0) offset", "import DictConfig import torch import torch.nn as nn import torch.nn.functional", "F from rlcycle.common.abstract.loss import Loss class DQNLoss(Loss): \"\"\"Compute double DQN", "k * (x.abs() - 0.5 * k)) class CategoricalLoss(Loss): \"\"\"Compute", "use_cuda) def __call__( self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...],", "rlcycle.common.abstract.loss import Loss class DQNLoss(Loss): \"\"\"Compute double DQN loss\"\"\" def", "actions, rewards, next_states, dones = data z_dists = network.forward(states) z_dists", "log_dist = torch.log(z_dists) element_wise_loss = -(target_proj * log_dist).sum(1) return element_wise_loss", "distance = target_z - z_dists quantile_huber_loss = ( network.tau -", "Tuple from omegaconf import DictConfig import torch import torch.nn as", "-(target_proj * log_dist).sum(1) return element_wise_loss def dist_projection( self, network: nn.Module,", "class DQNLoss(Loss): \"\"\"Compute double DQN loss\"\"\" def __init__(self, hyper_params: DictConfig,", "element_wise_loss = F.smooth_l1_loss( q_value, target_q.detach(), reduction=\"none\" ) return element_wise_loss class", "if self.use_cuda: proj_dist = proj_dist.cuda() proj_dist.view(-1).index_add_( 0, (lb + offset).view(-1),", "max=network.v_max) target_proj = self.dist_projection(network, next_z, target_z, offset) log_dist = torch.log(z_dists)", "( network.tau - (distance.detach() < 0).float() ).abs() * self.huber_loss(distance) element_wise_loss", "next_actions = torch.max(next_z.mean(2), dim=1)[1] next_z = next_z[list(range(states.size(0))), next_actions] n_step_gamma =", "next_z: torch.Tensor, target_z: torch.Tensor, offset: torch.Tensor, ) -> torch.Tensor: b", "use_cuda: bool): Loss.__init__(self, hyper_params, use_cuda) def __call__( self, networks: Tuple[nn.Module,", "network.support target_z = torch.clamp(target_z, min=network.v_min, max=network.v_max) target_proj = self.dist_projection(network, next_z,", "torch.log(z_dists) element_wise_loss = -(target_proj * log_dist).sum(1) return element_wise_loss def dist_projection(", "def __call__( self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...], )", "nn import torch.nn.functional as F from rlcycle.common.abstract.loss import Loss class", "= offset.cuda() z_dists = network.forward(states) z_dists = z_dists[list(range(states.size(0))), actions.view(-1)] with", "torch.mean(quantile_huber_loss, dim=1, keepdim=True) return element_wise_loss @staticmethod def huber_loss(x: List[torch.Tensor], k:", "dones) * n_step_gamma * next_z distance = target_z - z_dists", "typing import List, Tuple from omegaconf import DictConfig import torch", "List[torch.Tensor], k: float = 1.0): return torch.where(x.abs() <= k, 0.5", "offset.cuda() z_dists = network.forward(states) z_dists = z_dists[list(range(states.size(0))), actions.view(-1)] with torch.no_grad():", "network.num_atoms) ) if self.use_cuda: offset = offset.cuda() z_dists = network.forward(states)", "omegaconf import DictConfig import torch import torch.nn as nn import", "= rewards + (1 - dones) * n_step_gamma * next_q", "* (ub.float() - b)).view(-1) ) proj_dist.view(-1).index_add_( 0, (ub + offset).view(-1),", "torch.Tensor, offset: torch.Tensor, ) -> torch.Tensor: b = (target_z -", "network.delta_z lb = b.floor().long() ub = b.ceil().long() proj_dist = torch.zeros(next_z.size())", "- dones) * n_step_gamma * next_q element_wise_loss = F.smooth_l1_loss( q_value,", "= ( torch.linspace(0, (batch_size - 1) * network.num_atoms, batch_size) .long()", "networks states, actions, rewards, next_states, dones = data batch_size =", "= network.forward(states) z_dists = z_dists[list(range(states.size(0))), actions.view(-1)] with torch.no_grad(): next_z =", "= F.smooth_l1_loss( q_value, target_q.detach(), reduction=\"none\" ) return element_wise_loss class QRLoss(Loss):", "element_wise_loss def dist_projection( self, network: nn.Module, next_z: torch.Tensor, target_z: torch.Tensor,", "next_z = next_z[list(range(states.size(0))), next_actions] n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step target_z", "torch import torch.nn as nn import torch.nn.functional as F from", "offset = ( torch.linspace(0, (batch_size - 1) * network.num_atoms, batch_size)", "offset = offset.cuda() z_dists = network.forward(states) z_dists = z_dists[list(range(states.size(0))), actions.view(-1)]", "= torch.log(z_dists) element_wise_loss = -(target_proj * log_dist).sum(1) return element_wise_loss def", "proj_dist.cuda() proj_dist.view(-1).index_add_( 0, (lb + offset).view(-1), (next_z * (ub.float() -", "actions.view(-1)] with torch.no_grad(): next_z = target_network.forward(next_states) next_actions = torch.max(next_z.mean(2), dim=1)[1]", "...] ) -> Tuple[torch.Tensor, ...]: network, target_network = networks states,", "+ (1 - dones) * n_step_gamma * network.support target_z =", "states, actions, rewards, next_states, dones = data q_value = network.forward(states).gather(1,", "return element_wise_loss class QRLoss(Loss): \"\"\"Compute quantile regression loss\"\"\" def __init__(self,", "network.forward(states).gather(1, actions) with torch.no_grad(): next_q = torch.max(target_network.forward(next_states), 1)[0].unsqueeze(1) n_step_gamma =", "network.v_min) / network.delta_z lb = b.floor().long() ub = b.ceil().long() proj_dist", "...]: network, target_network = networks states, actions, rewards, next_states, dones", "next_q element_wise_loss = F.smooth_l1_loss( q_value, target_q.detach(), reduction=\"none\" ) return element_wise_loss", "b)).view(-1) ) proj_dist.view(-1).index_add_( 0, (ub + offset).view(-1), (next_z * (b", "rewards, next_states, dones = data q_value = network.forward(states).gather(1, actions) with", "target_z: torch.Tensor, offset: torch.Tensor, ) -> torch.Tensor: b = (target_z", ".expand(batch_size, network.num_atoms) ) if self.use_cuda: offset = offset.cuda() z_dists =", "= rewards + (1 - dones) * n_step_gamma * network.support", "dones = data z_dists = network.forward(states) z_dists = z_dists[list(range(states.size(0))), actions.view(-1)]", "def __init__(self, hyper_params: DictConfig, use_cuda: bool): Loss.__init__(self, hyper_params, use_cuda) def", "rewards + (1 - dones) * n_step_gamma * network.support target_z", "offset).view(-1), (next_z * (ub.float() - b)).view(-1) ) proj_dist.view(-1).index_add_( 0, (ub", "1)[0].unsqueeze(1) n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step target_q = rewards +", "network: nn.Module, next_z: torch.Tensor, target_z: torch.Tensor, offset: torch.Tensor, ) ->", "as nn import torch.nn.functional as F from rlcycle.common.abstract.loss import Loss", "DQN loss\"\"\" def __init__(self, hyper_params: DictConfig, use_cuda: bool): Loss.__init__(self, hyper_params,", "rewards + (1 - dones) * n_step_gamma * next_z distance", "data: Tuple[torch.Tensor, ...] ) -> Tuple[torch.Tensor, ...]: network, target_network =", "proj_dist = torch.zeros(next_z.size()) if self.use_cuda: proj_dist = proj_dist.cuda() proj_dist.view(-1).index_add_( 0,", "dim=1)[1] next_z = next_z[list(range(states.size(0))), next_actions] n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step", "torch.max(target_network.forward(next_states), 1)[0].unsqueeze(1) n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step target_q = rewards", "data: Tuple[torch.Tensor, ...], ) -> Tuple[torch.Tensor, ...]: network, target_network =", ") proj_dist.view(-1).index_add_( 0, (ub + offset).view(-1), (next_z * (b -", "= torch.max(target_network.forward(next_states), 1)[0].unsqueeze(1) n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step target_q =", "next_q = torch.max(target_network.forward(next_states), 1)[0].unsqueeze(1) n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step target_q", "< 0).float() ).abs() * self.huber_loss(distance) element_wise_loss = torch.mean(quantile_huber_loss, dim=1, keepdim=True)", "(ub.float() - b)).view(-1) ) proj_dist.view(-1).index_add_( 0, (ub + offset).view(-1), (next_z", "torch.linspace(0, (batch_size - 1) * network.num_atoms, batch_size) .long() .unsqueeze(1) .expand(batch_size,", "return torch.where(x.abs() <= k, 0.5 * x.pow(2), k * (x.abs()", "element_wise_loss class QRLoss(Loss): \"\"\"Compute quantile regression loss\"\"\" def __init__(self, hyper_params:", "element_wise_loss = -(target_proj * log_dist).sum(1) return element_wise_loss def dist_projection( self,", "self.hyper_params.gamma ** self.hyper_params.n_step target_q = rewards + (1 - dones)", "C51 loss\"\"\" def __init__(self, hyper_params: DictConfig, use_cuda: bool): Loss.__init__(self, hyper_params,", "= -(target_proj * log_dist).sum(1) return element_wise_loss def dist_projection( self, network:", "n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step target_z = rewards + (1", "def __call__( self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...] )", "- b)).view(-1) ) proj_dist.view(-1).index_add_( 0, (ub + offset).view(-1), (next_z *" ]
[ "buttons and keys root.bind(\"q\", lambda e: self.canvas.master.destroy()) root.bind(\"r\", self.reset_transform) root.bind(\"m\",", "self.refresh() def prev(self, event=None): self.curr_img = (self.curr_img - 1) %", "int(w / 600)):, :, :] = 255 c_list[index, :, :int(w", "/ len(self.imgs_pred)) + 1 imagelist_a = np.zeros((len(self.imgs_pred), imagelist_h, imagelist_h, 3),", "17) b_logos = int(w / 100) self.canvas = Canvas(root, width=w,", "img1 = img1.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS) img2 =", "self.zoomer) root.bind(\"<MouseWheel>\", self.zoomer) root.bind(\"<B2-Motion>\", self.drag_roi) root.bind(\"+\", self.zoomer) root.bind(\"-\", self.zoomer) self.over_button", "errors, logos): self.dates = dates self.errors = errors # setup", ":, (imagelist_h - int(w / 600)):, :] = 255 self.imagelists.append(ImageTk.PhotoImage(Image.fromarray(c_list.reshape(len(self.imgs_pred)", "- (self.logo3.height() / 2 + b_logos)), image=self.logo3) self.canvas.create_text(w / 2,", "= Button(root, font=(\"Courier\", int(h / 50)), text = \"Show masks\",", "Image.ANTIALIAS)) for img in self.imgs_pred] self.i_left = self.canvas.create_image(w / 3.9,", "from datetime import datetime, timedelta from tkinter import Canvas, Tk,", "self.img_w_f / self.zoom / 2)) self.refresh() def toggle_mask(self, event=None): self.mask_toggle", "self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS) img2 = img2.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w,", "logos[1].size[1] * logos[1].size[0]), h_logos), Image.ANTIALIAS)) self.logo3 = ImageTk.PhotoImage(logos[2].resize((int(h_logos / logos[2].size[1]", "self.prev) root.bind(\"<Down>\", self.next) root.bind(\"<Up>\", self.prev) root.bind(\"<Button-3>\", self.click_right) root.bind(\"<Button-1>\", self.click_left) root.bind(\"<Button-2>\",", "range(len(self.imgs_pred)): imagelist_a[index, :, :, :] = np.array(self.imgs_pred[index].resize((imagelist_h, imagelist_h), Image.ANTIALIAS)) self.imagelists", "imgs_p, imgs_o, imgs_m, dates, errors, logos): self.dates = dates self.errors", "= Canvas(root, width=w, height=h) self.canvas.pack() self.canvas.configure(background='white') self.logo1 = ImageTk.PhotoImage(logos[0].resize((int(h_logos /", "0 - int(self.img_w_f / 2 - self.img_w_f / self.zoom /", "text='Predicted') self.day_info = self.canvas.create_text(w / 2, h * 0.13, font=(\"Courier\",", "2 - self.img_w_f / zoom / 2) + self.shift_y self.end_y", "media class MainWindow(): def next(self, event=None): self.curr_img = (self.curr_img +", "and logos h_logos = int(h / 17) b_logos = int(w", "self.shift_x = min(max(self.start_drag[0] - event.x, 0 - int(self.img_w_f / 2", "resample=0)) self.imgs_pred.append(img.resize((self.img_w, self.img_w), resample=0)) self.imgs_orig_m.append(Image.blend(self.imgs_orig[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5)) self.imgs_pred_m.append(Image.blend(self.imgs_pred[-1],", "images in {} than days in the report {}!'.format(args.masks, args.report))", "+ 1 imagelist_a = np.zeros((len(self.imgs_pred), imagelist_h, imagelist_h, 3), dtype='uint8') for", "int(self.start_y + self.img_w_f / zoom) if not self.mask_toggle: self.b_masks.config(relief=RAISED) img1", "# image timeline imagelist_h = int(self.img_w / len(self.imgs_pred)) + 1", "= (self.curr_img + 1) % len(self.imgs_orig) self.refresh() def prev(self, event=None):", "in range(len(self.imgs_pred)): c_list = np.array(imagelist_a) c_list[index, :int(w / 600), :,", "else 'n.a. ' for error in report[:, 5]] logos =", "= \"Reset view\", command=self.reset_transform, state=DISABLED) self.b_quit = Button(root, font=(\"Courier\", int(h", "% len(self.imgs_orig) self.refresh() def prev(self, event=None): self.curr_img = (self.curr_img -", "Tk, Button, RAISED, DISABLED, SUNKEN, NORMAL import numpy as np", "width self.imgs_orig_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img in self.imgs_orig]", "int(h / 50)), text = \"Show masks\", command=self.toggle_mask) self.b_reset =", "= self.imgs_pred[self.curr_img] else: self.b_masks.config(relief=SUNKEN) img1 = self.imgs_orig_m[self.curr_img] img2 = self.imgs_pred_m[self.curr_img]", "self.zoom -= 20 if self.zoom == 100: self.reset_transform() self.refresh() def", "self.img_w), resample=0)) self.imgs_orig_m.append(Image.blend(self.imgs_orig[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5)) self.imgs_pred_m.append(Image.blend(self.imgs_pred[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w),", "index in range(len(self.imgs_pred)): imagelist_a[index, :, :, :] = np.array(self.imgs_pred[index].resize((imagelist_h, imagelist_h),", "/ 35)), text='Predicted') self.day_info = self.canvas.create_text(w / 2, h *", "width of each displayed image self.imgs_orig_m = [] # masked", "imagelist_h, imagelist_h, 3), dtype='uint8') for index in range(len(self.imgs_pred)): imagelist_a[index, :,", "resample=0), alpha=.5)) self.imgs_pred_m.append(Image.blend(self.imgs_pred[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5)) self.cc.append(1 - np.count_nonzero(np.array(imgs_m[index]))", "= 255 c_list[index, (imagelist_h - int(w / 600)):, :, :]", "img in os.listdir(args.masks)])] report = np.genfromtxt(args.report, delimiter=',', dtype=float)[1:-1] dates =", "self.imgs_pred_m = [] self.imgs_orig = [] # unmasked full images", "h * 0.13, font=(\"Courier\", int(h / 30)), text='') self.zoom =", "20 elif event.delta == 240: self.zoom += 40 elif event.delta", "view\", command=self.reset_transform, state=DISABLED) self.b_quit = Button(root, font=(\"Courier\", int(h / 50)),", "filling results\"\"\" import os import argparse from datetime import datetime,", "int(h / 35)), text='Observed') self.canvas.create_text(w - w / 3.9, h", "* 0.94, window=self.b_quit) # bind buttons and keys root.bind(\"q\", lambda", "(imagelist_h - int(w / 600)):, :] = 255 self.imagelists.append(ImageTk.PhotoImage(Image.fromarray(c_list.reshape(len(self.imgs_pred) *", "h, imgs_p, imgs_o, imgs_m, dates, errors, logos): self.dates = dates", "ImageTk.PhotoImage(img1) self.imgs_pred_v[self.curr_img] = ImageTk.PhotoImage(img2) self.canvas.itemconfig(self.i_left, image = self.imgs_orig_v[self.curr_img]) self.canvas.itemconfig(self.i_right, image", "+= 40 elif event.delta == 360: self.zoom += 60 else:", "images self.imgs_pred = [] self.cc = [] for index, img", "/ 3.9, h * 0.56, image=self.imgs_orig_v[self.curr_img]) self.i_right = self.canvas.create_image(w -", "!= len(dates): raise RuntimeError('Different number of images in {} than", "h_logos = int(h / 17) b_logos = int(w / 100)", "\"\"\"viewer application which allows to interactively view spatio-temporal gap filling", "/ 2 - self.img_w_f / self.zoom / 2)), int(self.img_w_f /", "text='ZOOM: {:3d}%'.format(self.zoom)) self.b_reset.config(state=NORMAL) def zoomer(self, event): if event.num == 4", "application which allows to interactively view spatio-temporal gap filling results\"\"\"", "Button, RAISED, DISABLED, SUNKEN, NORMAL import numpy as np from", "right hand images') parser.add_argument('-y', '--year', type=int, default=2018, help='year of data", "self.img_w_f / self.zoom / 2)) self.shift_y = min(max(self.start_drag[1] - event.y,", "/ zoom / 2) + self.shift_y self.end_y = int(self.start_y +", "zoom / 2) + self.shift_y self.end_y = int(self.start_y + self.img_w_f", "[] for index in range(len(self.imgs_pred)): c_list = np.array(imagelist_a) c_list[index, :int(w", "= [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img in self.imgs_pred] self.i_left =", "/ self.zoom / 2)) self.refresh() def toggle_mask(self, event=None): self.mask_toggle =", "self.imgs_orig_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img in self.imgs_orig] #", "command=self.canvas.master.destroy) self.reset_transform() self.canvas.create_window(w * 0.30, h * 0.94, window=self.b_masks) self.canvas.create_window(w", "click_right(self, event): if not self.over_button: self.next() def refresh(self): zoom =", "event=None): self.mask_toggle = not self.mask_toggle self.refresh() def reset_transform(self, event=None): self.mask_toggle", "self.imagelists.append(ImageTk.PhotoImage(Image.fromarray(c_list.reshape(len(self.imgs_pred) * imagelist_h, imagelist_h, 3)))) self.i_list = self.canvas.create_image(w * 0.5,", "self.curr_img = (self.curr_img - 1) % len(self.imgs_orig) self.refresh() def click_wheel(self,", "self.canvas.create_text(w - w / 3.9, h * 0.19, font=(\"Courier\", int(h", "b_logos), int(h - (self.logo3.height() / 2 + b_logos)), image=self.logo3) self.canvas.create_text(w", "self.button_leave) parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-l', '--left', default='imgs/original/', help='directory with images", "allows to interactively view spatio-temporal gap filling results\"\"\" import os", "600)):, :] = 255 self.imagelists.append(ImageTk.PhotoImage(Image.fromarray(c_list.reshape(len(self.imgs_pred) * imagelist_h, imagelist_h, 3)))) self.i_list", "self.click_right) root.bind(\"<Button-1>\", self.click_left) root.bind(\"<Button-2>\", self.click_wheel) root.bind(\"<Button-4>\", self.zoomer) root.bind(\"<Button-5>\", self.zoomer) root.bind(\"<MouseWheel>\",", "self.over_button: self.prev() def click_right(self, event): if not self.over_button: self.next() def", "[Image.open(img) for img in sorted([os.path.join(args.right, img) for img in os.listdir(args.right)])]", "= 100 self.shift_x = 0 self.shift_y = 0 self.refresh() def", "logos[0].size[1] * logos[0].size[0]), h_logos), Image.ANTIALIAS)) self.logo2 = ImageTk.PhotoImage(logos[1].resize((int(h_logos / logos[1].size[1]", "[media.logo1, media.logo2, media.logo3] if len(imgs_o) != len(dates): raise RuntimeError('Different number", "def refresh(self): zoom = float(self.zoom) / 100 self.start_x = int(self.img_w_f", "b_logos), int(self.logo2.height() / 2 + b_logos), image=self.logo2) self.canvas.create_image(int(w - self.logo3.width()", "* 0.94, font=(\"Courier\", int(h / 50)), text='') # image timeline", "if self.zoom == 100: self.reset_transform() self.refresh() def drag_roi(self, event): self.shift_x", "self.curr_img = (self.curr_img + 1) % len(self.imgs_orig) self.refresh() def prev(self,", "0.50, h * 0.94, window=self.b_reset) self.canvas.create_window(w * 0.70, h *", "- 1)).strftime('%b %d %Y') for day in range(report.shape[0])] errors =", "2) + self.shift_y self.end_y = int(self.start_y + self.img_w_f / zoom)", "- int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2)),", "drag_roi(self, event): self.shift_x = min(max(self.start_drag[0] - event.x, 0 - int(self.img_w_f", "self.mask_toggle self.refresh() def reset_transform(self, event=None): self.mask_toggle = False self.zoom =", "# unmasked full images self.imgs_pred = [] self.cc = []", "__init__(self, root, w, h, imgs_p, imgs_o, imgs_m, dates, errors, logos):", "2 - self.img_w_f / zoom / 2) + self.shift_x self.end_x", "self.curr_img = 0 # text labels and logos h_logos =", "datetime import datetime, timedelta from tkinter import Canvas, Tk, Button,", "h_logos), Image.ANTIALIAS)) self.logo3 = ImageTk.PhotoImage(logos[2].resize((int(h_logos / logos[2].size[1] * logos[2].size[0]), h_logos),", "2)), int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2))", "255 c_list[index, :, :int(w / 600), :] = 255 c_list[index,", "int(w / 100) self.canvas = Canvas(root, width=w, height=h) self.canvas.pack() self.canvas.configure(background='white')", "self.i_left = self.canvas.create_image(w / 3.9, h * 0.56, image=self.imgs_orig_v[self.curr_img]) self.i_right", "error information for the right hand images') parser.add_argument('-y', '--year', type=int,", "= min(max(self.start_drag[0] - event.x, 0 - int(self.img_w_f / 2 -", "shown on the right') parser.add_argument('-m', '--masks', default='imgs/mask/', help='directory with mask", "args.report)) if len(imgs_p) != len(dates): raise RuntimeError('Different number of images", "image=self.imagelists[self.curr_img]) # images and buttons self.img_w_f = self.imgs_orig[0].size[0] # full", "delimiter=',', dtype=float)[1:-1] dates = [(datetime(args.year, 1, 1) + timedelta(int(report[day, 1])", "= (self.curr_img - 1) % len(self.imgs_orig) self.refresh() def click_wheel(self, event):", "self.end_y = int(self.start_y + self.img_w_f / zoom) if not self.mask_toggle:", "event.delta == 360: self.zoom += 60 else: if self.zoom -", "2 - b_logos), int(h - (self.logo3.height() / 2 + b_logos)),", "self.next) root.bind(\"<Left>\", self.prev) root.bind(\"<Down>\", self.next) root.bind(\"<Up>\", self.prev) root.bind(\"<Button-3>\", self.click_right) root.bind(\"<Button-1>\",", "imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5)) self.imgs_pred_m.append(Image.blend(self.imgs_pred[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5)) self.cc.append(1", "self.imgs_pred_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img in self.imgs_pred] self.i_left", "self.b_reset.config(state=NORMAL) def zoomer(self, event): if event.num == 4 or event.delta", "600)):, :, :] = 255 c_list[index, :, :int(w / 600),", "image timeline imagelist_h = int(self.img_w / len(self.imgs_pred)) + 1 imagelist_a", "than days in the report {}!'.format(args.masks, args.report)) root = Tk()", "report {}!'.format(args.right, args.report)) if len(imgs_m) != len(dates): raise RuntimeError('Different number", "in {} than days in the report {}!'.format(args.masks, args.report)) root", "[] self.imgs_orig = [] # unmasked full images self.imgs_pred =", "datetime, timedelta from tkinter import Canvas, Tk, Button, RAISED, DISABLED,", "self.img_w_f / zoom) if not self.mask_toggle: self.b_masks.config(relief=RAISED) img1 = self.imgs_orig[self.curr_img]", "imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5)) self.cc.append(1 - np.count_nonzero(np.array(imgs_m[index])) / np.array(imgs_m[index]).size) self.curr_img", "which are shown on the right') parser.add_argument('-m', '--masks', default='imgs/mask/', help='directory", "= int(self.start_y + self.img_w_f / zoom) if not self.mask_toggle: self.b_masks.config(relief=RAISED)", "imagelist_a = np.zeros((len(self.imgs_pred), imagelist_h, imagelist_h, 3), dtype='uint8') for index in", "c_list = np.array(imagelist_a) c_list[index, :int(w / 600), :, :] =", "on the left') parser.add_argument('-r', '--right', default='imgs/pred_outline_lin_spatial_clouds0_2/', help='directory with images which", "self.shift_y) def click_left(self, event): if not self.over_button: self.prev() def click_right(self,", "type=int, default=720, help='window height') args = parser.parse_args() imgs_o = [Image.open(img)", "SUNKEN, NORMAL import numpy as np from PIL import Image,", "self.shift_y = min(max(self.start_drag[1] - event.y, 0 - int(self.img_w_f / 2", "= 0 # text labels and logos h_logos = int(h", "{}'.format(self.dates[self.curr_img], self.cc[self.curr_img] * 100, self.errors[self.curr_img])) if self.zoom == 100: self.canvas.itemconfig(self.zoom,", "= self.imgs_pred_m[self.curr_img] img1 = img1.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS)", "/ 2 + b_logos), image=self.logo1) self.canvas.create_image(int(w - self.logo2.width() / 2", "self.imgs_orig_v[self.curr_img]) self.canvas.itemconfig(self.i_right, image = self.imgs_pred_v[self.curr_img]) self.canvas.itemconfig(self.i_list, image = self.imagelists[self.curr_img]) self.canvas.itemconfig(self.day_info,", "self.imgs_pred_v[self.curr_img] = ImageTk.PhotoImage(img2) self.canvas.itemconfig(self.i_left, image = self.imgs_orig_v[self.curr_img]) self.canvas.itemconfig(self.i_right, image =", "[Image.open(img) for img in sorted([os.path.join(args.left, img) for img in os.listdir(args.left)])]", "Filling Viewer') self.canvas.create_text(w / 3.9, h * 0.19, font=(\"Courier\", int(h", "self.imgs_orig[0].size[0] # full image width self.imgs_orig_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS))", "* 0.19, font=(\"Courier\", int(h / 35)), text='Observed') self.canvas.create_text(w - w", "= [Image.open(img) for img in sorted([os.path.join(args.left, img) for img in", "ImageTk.PhotoImage(logos[1].resize((int(h_logos / logos[1].size[1] * logos[1].size[0]), h_logos), Image.ANTIALIAS)) self.logo3 = ImageTk.PhotoImage(logos[2].resize((int(h_logos", "{}!'.format(args.left, args.report)) if len(imgs_p) != len(dates): raise RuntimeError('Different number of", "/ 2 + b_logos), image=self.logo2) self.canvas.create_image(int(w - self.logo3.width() / 2", "zoomer(self, event): if event.num == 4 or event.delta == 120", "self.zoom / 2)) self.refresh() def toggle_mask(self, event=None): self.mask_toggle = not", "event.y, 0 - int(self.img_w_f / 2 - self.img_w_f / self.zoom", "self.canvas.itemconfig(self.zoom, text='ZOOM: {:3d}%'.format(self.zoom)) self.b_reset.config(state=NORMAL) def zoomer(self, event): if event.num ==", "255 c_list[index, (imagelist_h - int(w / 600)):, :, :] =", "(self.curr_img - 1) % len(self.imgs_orig) self.refresh() def click_wheel(self, event): self.start_drag", "def click_left(self, event): if not self.over_button: self.prev() def click_right(self, event):", "in self.imgs_pred] self.i_left = self.canvas.create_image(w / 3.9, h * 0.56,", "report = np.genfromtxt(args.report, delimiter=',', dtype=float)[1:-1] dates = [(datetime(args.year, 1, 1)", "event): if not self.over_button: self.next() def refresh(self): zoom = float(self.zoom)", "the report {}!'.format(args.left, args.report)) if len(imgs_p) != len(dates): raise RuntimeError('Different", "* 0.94, window=self.b_masks) self.canvas.create_window(w * 0.50, h * 0.94, window=self.b_reset)", "= (event.x + self.shift_x, event.y + self.shift_y) def click_left(self, event):", "days in the report {}!'.format(args.right, args.report)) if len(imgs_m) != len(dates):", "containing date and error information for the right hand images')", "on the right') parser.add_argument('-m', '--masks', default='imgs/mask/', help='directory with mask images')", "of images in {} than days in the report {}!'.format(args.right,", "- estimated MAE {}'.format(self.dates[self.curr_img], self.cc[self.curr_img] * 100, self.errors[self.curr_img])) if self.zoom", "else: self.b_masks.config(relief=SUNKEN) img1 = self.imgs_orig_m[self.curr_img] img2 = self.imgs_pred_m[self.curr_img] img1 =", "def drag_roi(self, event): self.shift_x = min(max(self.start_drag[0] - event.x, 0 -", "self.imagelists[self.curr_img]) self.canvas.itemconfig(self.day_info, text='{} - cloud cover {:06.2f}% - estimated MAE", "root.bind(\"r\", self.reset_transform) root.bind(\"m\", self.toggle_mask) root.bind(\"<Right>\", self.next) root.bind(\"<Left>\", self.prev) root.bind(\"<Down>\", self.next)", "self.zoom += 60 else: if self.zoom - 20 >= 100:", "default=2018, help='year of data acquisition') parser.add_argument('-W', '--width', type=int, default=1280, help='window", "sorted([os.path.join(args.masks, img) for img in os.listdir(args.masks)])] report = np.genfromtxt(args.report, delimiter=',',", "/ 2 - self.img_w_f / zoom / 2) + self.shift_x", "self.zoom = 100 self.shift_x = 0 self.shift_y = 0 self.refresh()", "text labels and logos h_logos = int(h / 17) b_logos", "/ logos[1].size[1] * logos[1].size[0]), h_logos), Image.ANTIALIAS)) self.logo3 = ImageTk.PhotoImage(logos[2].resize((int(h_logos /", "= not self.mask_toggle self.refresh() def reset_transform(self, event=None): self.mask_toggle = False", "self.zoom / 2)), int(self.img_w_f / 2 - self.img_w_f / self.zoom", "= int(self.img_w_f / 2 - self.img_w_f / zoom / 2)", "img1.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS) img2 = img2.crop((self.start_x, self.start_y,", "self.refresh() def reset_transform(self, event=None): self.mask_toggle = False self.zoom = 100", "self.shift_y = 0 self.refresh() def button_enter(self, event): self.over_button = True", "self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS) self.imgs_orig_v[self.curr_img] = ImageTk.PhotoImage(img1) self.imgs_pred_v[self.curr_img] =", "self.imgs_orig = [] # unmasked full images self.imgs_pred = []", "button_leave(self, enter): self.over_button = False def __init__(self, root, w, h,", "int(self.logo1.height() / 2 + b_logos), image=self.logo1) self.canvas.create_image(int(w - self.logo2.width() /", "self.zoomer) root.bind(\"<B2-Motion>\", self.drag_roi) root.bind(\"+\", self.zoomer) root.bind(\"-\", self.zoomer) self.over_button = False", "35)), text='Predicted') self.day_info = self.canvas.create_text(w / 2, h * 0.13,", "masked full images self.imgs_pred_m = [] self.imgs_orig = [] #", "for the right hand images') parser.add_argument('-y', '--year', type=int, default=2018, help='year", "self.reset_transform) root.bind(\"m\", self.toggle_mask) root.bind(\"<Right>\", self.next) root.bind(\"<Left>\", self.prev) root.bind(\"<Down>\", self.next) root.bind(\"<Up>\",", "+ self.img_w_f / zoom) self.start_y = int(self.img_w_f / 2 -", "Image.ANTIALIAS)) self.logo2 = ImageTk.PhotoImage(logos[1].resize((int(h_logos / logos[1].size[1] * logos[1].size[0]), h_logos), Image.ANTIALIAS))", "img2 = img2.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS) self.imgs_orig_v[self.curr_img] =", "import Image, ImageTk import probgf.media as media class MainWindow(): def", "def __init__(self, root, w, h, imgs_p, imgs_o, imgs_m, dates, errors,", "left') parser.add_argument('-r', '--right', default='imgs/pred_outline_lin_spatial_clouds0_2/', help='directory with images which are shown", "= int(self.start_x + self.img_w_f / zoom) self.start_y = int(self.img_w_f /", "False self.zoom = 100 self.shift_x = 0 self.shift_y = 0", "'plus': self.zoom += 20 elif event.delta == 240: self.zoom +=", "with images which are shown on the right') parser.add_argument('-m', '--masks',", "= self.imgs_orig_m[self.curr_img] img2 = self.imgs_pred_m[self.curr_img] img1 = img1.crop((self.start_x, self.start_y, self.end_x,", "1 imagelist_a = np.zeros((len(self.imgs_pred), imagelist_h, imagelist_h, 3), dtype='uint8') for index", "self.img_w_f / zoom) self.start_y = int(self.img_w_f / 2 - self.img_w_f", "* logos[1].size[0]), h_logos), Image.ANTIALIAS)) self.logo3 = ImageTk.PhotoImage(logos[2].resize((int(h_logos / logos[2].size[1] *", "timedelta(int(report[day, 1]) - 1)).strftime('%b %d %Y') for day in range(report.shape[0])]", "self.img_w), Image.ANTIALIAS)) for img in self.imgs_orig] # images for visualization", "error != 0.0 else 'n.a. ' for error in report[:,", "self.canvas.create_image(int(w - self.logo2.width() / 2 - b_logos), int(self.logo2.height() / 2", "self.toggle_mask) root.bind(\"<Right>\", self.next) root.bind(\"<Left>\", self.prev) root.bind(\"<Down>\", self.next) root.bind(\"<Up>\", self.prev) root.bind(\"<Button-3>\",", "'--masks', default='imgs/mask/', help='directory with mask images') parser.add_argument('-R', '--report', default='report_lin_spatial_clouds0_2.csv', help='report", "self.b_reset.bind(\"<Enter>\", self.button_enter) self.b_reset.bind(\"<Leave>\", self.button_leave) self.b_quit.bind(\"<Enter>\", self.button_enter) self.b_quit.bind(\"<Leave>\", self.button_leave) parser =", "2)) self.refresh() def toggle_mask(self, event=None): self.mask_toggle = not self.mask_toggle self.refresh()", "text = \"Quit\", command=self.canvas.master.destroy) self.reset_transform() self.canvas.create_window(w * 0.30, h *", "'--right', default='imgs/pred_outline_lin_spatial_clouds0_2/', help='directory with images which are shown on the", "* 0.12, h * 0.94, font=(\"Courier\", int(h / 50)), text='')", "self.imgs_pred_m[self.curr_img] img1 = img1.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS) img2", "= [] self.imgs_orig = [] # unmasked full images self.imgs_pred", "self.imgs_orig] # images for visualization self.imgs_pred_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS))", "'--width', type=int, default=1280, help='window width') parser.add_argument('-H', '--height', type=int, default=720, help='window", "%d %Y') for day in range(report.shape[0])] errors = ['{:4.1f}'.format(error) if", "# width of each displayed image self.imgs_orig_m = [] #", "root.bind(\"<B2-Motion>\", self.drag_roi) root.bind(\"+\", self.zoomer) root.bind(\"-\", self.zoomer) self.over_button = False self.b_masks.bind(\"<Enter>\",", "self.over_button = False def __init__(self, root, w, h, imgs_p, imgs_o,", "0.94, font=(\"Courier\", int(h / 50)), text='') # image timeline imagelist_h", "def click_wheel(self, event): self.start_drag = (event.x + self.shift_x, event.y +", "self.img_w), Image.ANTIALIAS) self.imgs_orig_v[self.curr_img] = ImageTk.PhotoImage(img1) self.imgs_pred_v[self.curr_img] = ImageTk.PhotoImage(img2) self.canvas.itemconfig(self.i_left, image", "len(imgs_m) != len(dates): raise RuntimeError('Different number of images in {}", "Image.ANTIALIAS)) self.imagelists = [] for index in range(len(self.imgs_pred)): c_list =", "h_logos), Image.ANTIALIAS)) self.logo2 = ImageTk.PhotoImage(logos[1].resize((int(h_logos / logos[1].size[1] * logos[1].size[0]), h_logos),", "= np.array(imagelist_a) c_list[index, :int(w / 600), :, :] = 255", "50)), text = \"Quit\", command=self.canvas.master.destroy) self.reset_transform() self.canvas.create_window(w * 0.30, h", "imagelist_h = int(self.img_w / len(self.imgs_pred)) + 1 imagelist_a = np.zeros((len(self.imgs_pred),", "{} than days in the report {}!'.format(args.right, args.report)) if len(imgs_m)", "/ self.zoom / 2)) self.shift_y = min(max(self.start_drag[1] - event.y, 0", "in os.listdir(args.right)])] imgs_m = [Image.open(img) for img in sorted([os.path.join(args.masks, img)", "as np from PIL import Image, ImageTk import probgf.media as", "np from PIL import Image, ImageTk import probgf.media as media", "resample=0), alpha=.5)) self.cc.append(1 - np.count_nonzero(np.array(imgs_m[index])) / np.array(imgs_m[index]).size) self.curr_img = 0", "# bind buttons and keys root.bind(\"q\", lambda e: self.canvas.master.destroy()) root.bind(\"r\",", "with images which are shown on the left') parser.add_argument('-r', '--right',", "event): self.shift_x = min(max(self.start_drag[0] - event.x, 0 - int(self.img_w_f /", "each displayed image self.imgs_orig_m = [] # masked full images", "= parser.parse_args() imgs_o = [Image.open(img) for img in sorted([os.path.join(args.left, img)", "self.zoomer) self.over_button = False self.b_masks.bind(\"<Enter>\", self.button_enter) self.b_masks.bind(\"<Leave>\", self.button_leave) self.b_reset.bind(\"<Enter>\", self.button_enter)", "+ timedelta(int(report[day, 1]) - 1)).strftime('%b %d %Y') for day in", "/ zoom) if not self.mask_toggle: self.b_masks.config(relief=RAISED) img1 = self.imgs_orig[self.curr_img] img2", "root.bind(\"<Up>\", self.prev) root.bind(\"<Button-3>\", self.click_right) root.bind(\"<Button-1>\", self.click_left) root.bind(\"<Button-2>\", self.click_wheel) root.bind(\"<Button-4>\", self.zoomer)", "text = \"Reset view\", command=self.reset_transform, state=DISABLED) self.b_quit = Button(root, font=(\"Courier\",", "self.imagelists = [] for index in range(len(self.imgs_pred)): c_list = np.array(imagelist_a)", "errors = ['{:4.1f}'.format(error) if error != 0.0 else 'n.a. '", "def next(self, event=None): self.curr_img = (self.curr_img + 1) % len(self.imgs_orig)", "import Canvas, Tk, Button, RAISED, DISABLED, SUNKEN, NORMAL import numpy", "self.shift_x, event.y + self.shift_y) def click_left(self, event): if not self.over_button:", "/ 2 - self.img_w_f / self.zoom / 2)) self.refresh() def", "{}!'.format(args.right, args.report)) if len(imgs_m) != len(dates): raise RuntimeError('Different number of", "self.zoom / 2)) self.shift_y = min(max(self.start_drag[1] - event.y, 0 -", "= self.canvas.create_image(w - w / 3.9, h * 0.56, image=self.imgs_pred_v[self.curr_img])", "self.canvas.create_image(w / 3.9, h * 0.56, image=self.imgs_orig_v[self.curr_img]) self.i_right = self.canvas.create_image(w", "parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-l', '--left', default='imgs/original/', help='directory with images which", "* 100, self.errors[self.curr_img])) if self.zoom == 100: self.canvas.itemconfig(self.zoom, text='') self.b_reset.config(state=DISABLED)", "/ 2)), int(self.img_w_f / 2 - self.img_w_f / self.zoom /", "h * 0.94, font=(\"Courier\", int(h / 50)), text='') # image", "* 0.56, image=self.imagelists[self.curr_img]) # images and buttons self.img_w_f = self.imgs_orig[0].size[0]", "'--year', type=int, default=2018, help='year of data acquisition') parser.add_argument('-W', '--width', type=int,", "min(max(self.start_drag[1] - event.y, 0 - int(self.img_w_f / 2 - self.img_w_f", "in self.imgs_orig] # images for visualization self.imgs_pred_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w),", "toggle_mask(self, event=None): self.mask_toggle = not self.mask_toggle self.refresh() def reset_transform(self, event=None):", "+ b_logos), int(self.logo1.height() / 2 + b_logos), image=self.logo1) self.canvas.create_image(int(w -", "if event.num == 4 or event.delta == 120 or event.keysym", "RAISED, DISABLED, SUNKEN, NORMAL import numpy as np from PIL", "parser.add_argument('-m', '--masks', default='imgs/mask/', help='directory with mask images') parser.add_argument('-R', '--report', default='report_lin_spatial_clouds0_2.csv',", "self.zoomer) root.bind(\"-\", self.zoomer) self.over_button = False self.b_masks.bind(\"<Enter>\", self.button_enter) self.b_masks.bind(\"<Leave>\", self.button_leave)", "float(self.zoom) / 100 self.start_x = int(self.img_w_f / 2 - self.img_w_f", "self.imgs_pred[self.curr_img] else: self.b_masks.config(relief=SUNKEN) img1 = self.imgs_orig_m[self.curr_img] img2 = self.imgs_pred_m[self.curr_img] img1", "self.click_wheel) root.bind(\"<Button-4>\", self.zoomer) root.bind(\"<Button-5>\", self.zoomer) root.bind(\"<MouseWheel>\", self.zoomer) root.bind(\"<B2-Motion>\", self.drag_roi) root.bind(\"+\",", "root.bind(\"<Down>\", self.next) root.bind(\"<Up>\", self.prev) root.bind(\"<Button-3>\", self.click_right) root.bind(\"<Button-1>\", self.click_left) root.bind(\"<Button-2>\", self.click_wheel)", "from tkinter import Canvas, Tk, Button, RAISED, DISABLED, SUNKEN, NORMAL", "1) % len(self.imgs_orig) self.refresh() def prev(self, event=None): self.curr_img = (self.curr_img", "self.imgs_pred_v[self.curr_img]) self.canvas.itemconfig(self.i_list, image = self.imagelists[self.curr_img]) self.canvas.itemconfig(self.day_info, text='{} - cloud cover", "e: self.canvas.master.destroy()) root.bind(\"r\", self.reset_transform) root.bind(\"m\", self.toggle_mask) root.bind(\"<Right>\", self.next) root.bind(\"<Left>\", self.prev)", "img in self.imgs_orig] # images for visualization self.imgs_pred_v = [ImageTk.PhotoImage(img.resize((self.img_w,", "self.cc[self.curr_img] * 100, self.errors[self.curr_img])) if self.zoom == 100: self.canvas.itemconfig(self.zoom, text='')", "0.13, font=(\"Courier\", int(h / 30)), text='') self.zoom = self.canvas.create_text(w *", "self.b_masks.config(relief=RAISED) img1 = self.imgs_orig[self.curr_img] img2 = self.imgs_pred[self.curr_img] else: self.b_masks.config(relief=SUNKEN) img1", "/ zoom) self.start_y = int(self.img_w_f / 2 - self.img_w_f /", "self.b_quit = Button(root, font=(\"Courier\", int(h / 50)), text = \"Quit\",", "int(h * 0.68) # width of each displayed image self.imgs_orig_m", "the report {}!'.format(args.right, args.report)) if len(imgs_m) != len(dates): raise RuntimeError('Different", "numpy as np from PIL import Image, ImageTk import probgf.media", "self.shift_y self.end_y = int(self.start_y + self.img_w_f / zoom) if not", "int(w / 600)):, :] = 255 self.imagelists.append(ImageTk.PhotoImage(Image.fromarray(c_list.reshape(len(self.imgs_pred) * imagelist_h, imagelist_h,", "imgs_m, dates, errors, logos): self.dates = dates self.errors = errors", "= int(self.img_w / len(self.imgs_pred)) + 1 imagelist_a = np.zeros((len(self.imgs_pred), imagelist_h,", "= ImageTk.PhotoImage(img2) self.canvas.itemconfig(self.i_left, image = self.imgs_orig_v[self.curr_img]) self.canvas.itemconfig(self.i_right, image = self.imgs_pred_v[self.curr_img])", "0.06, font=(\"Courier\", int(h / 25)), text='Gap Filling Viewer') self.canvas.create_text(w /", "else: self.canvas.itemconfig(self.zoom, text='ZOOM: {:3d}%'.format(self.zoom)) self.b_reset.config(state=NORMAL) def zoomer(self, event): if event.num", "RuntimeError('Different number of images in {} than days in the", "media.logo2, media.logo3] if len(imgs_o) != len(dates): raise RuntimeError('Different number of", "3.9, h * 0.56, image=self.imgs_pred_v[self.curr_img]) self.b_masks = Button(root, font=(\"Courier\", int(h", "root.bind(\"q\", lambda e: self.canvas.master.destroy()) root.bind(\"r\", self.reset_transform) root.bind(\"m\", self.toggle_mask) root.bind(\"<Right>\", self.next)", "gap filling results\"\"\" import os import argparse from datetime import", "self.img_w), resample=0)) self.imgs_pred.append(img.resize((self.img_w, self.img_w), resample=0)) self.imgs_orig_m.append(Image.blend(self.imgs_orig[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5))", "parser.add_argument('-l', '--left', default='imgs/original/', help='directory with images which are shown on", "reset_transform(self, event=None): self.mask_toggle = False self.zoom = 100 self.shift_x =", "+ b_logos), image=self.logo2) self.canvas.create_image(int(w - self.logo3.width() / 2 - b_logos),", "= Tk() root.title('Gap Filling Viewer') root.geometry(\"%dx%d+0+0\" % (args.width, args.height)) MainWindow(root,", "in os.listdir(args.left)])] imgs_p = [Image.open(img) for img in sorted([os.path.join(args.right, img)", "zoom / 2) + self.shift_x self.end_x = int(self.start_x + self.img_w_f", "Image.ANTIALIAS)) self.logo3 = ImageTk.PhotoImage(logos[2].resize((int(h_logos / logos[2].size[1] * logos[2].size[0]), h_logos), Image.ANTIALIAS))", "= ['{:4.1f}'.format(error) if error != 0.0 else 'n.a. ' for", "20 if self.zoom == 100: self.reset_transform() self.refresh() def drag_roi(self, event):", "unmasked full images self.imgs_pred = [] self.cc = [] for", "elif event.delta == 360: self.zoom += 60 else: if self.zoom", "data acquisition') parser.add_argument('-W', '--width', type=int, default=1280, help='window width') parser.add_argument('-H', '--height',", "logos[2].size[1] * logos[2].size[0]), h_logos), Image.ANTIALIAS)) self.canvas.create_image(int(self.logo1.width() / 2 + b_logos),", "images for visualization self.imgs_pred_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img", "font=(\"Courier\", int(h / 50)), text='') # image timeline imagelist_h =", "self.end_x = int(self.start_x + self.img_w_f / zoom) self.start_y = int(self.img_w_f", "else: if self.zoom - 20 >= 100: self.zoom -= 20", "event.num == 4 or event.delta == 120 or event.keysym ==", "def prev(self, event=None): self.curr_img = (self.curr_img - 1) % len(self.imgs_orig)", "index, img in enumerate(imgs_p): self.imgs_orig.append(imgs_o[index].resize((self.img_w, self.img_w), resample=0)) self.imgs_pred.append(img.resize((self.img_w, self.img_w), resample=0))", "w / 3.9, h * 0.19, font=(\"Courier\", int(h / 35)),", "0.19, font=(\"Courier\", int(h / 35)), text='Predicted') self.day_info = self.canvas.create_text(w /", "logos[1].size[0]), h_logos), Image.ANTIALIAS)) self.logo3 = ImageTk.PhotoImage(logos[2].resize((int(h_logos / logos[2].size[1] * logos[2].size[0]),", "self.button_enter) self.b_masks.bind(\"<Leave>\", self.button_leave) self.b_reset.bind(\"<Enter>\", self.button_enter) self.b_reset.bind(\"<Leave>\", self.button_leave) self.b_quit.bind(\"<Enter>\", self.button_enter) self.b_quit.bind(\"<Leave>\",", "/ logos[0].size[1] * logos[0].size[0]), h_logos), Image.ANTIALIAS)) self.logo2 = ImageTk.PhotoImage(logos[1].resize((int(h_logos /", "root.bind(\"m\", self.toggle_mask) root.bind(\"<Right>\", self.next) root.bind(\"<Left>\", self.prev) root.bind(\"<Down>\", self.next) root.bind(\"<Up>\", self.prev)", "if not self.mask_toggle: self.b_masks.config(relief=RAISED) img1 = self.imgs_orig[self.curr_img] img2 = self.imgs_pred[self.curr_img]", "0 # text labels and logos h_logos = int(h /", "self.logo2 = ImageTk.PhotoImage(logos[1].resize((int(h_logos / logos[1].size[1] * logos[1].size[0]), h_logos), Image.ANTIALIAS)) self.logo3", "self.canvas.create_text(w / 2, h * 0.06, font=(\"Courier\", int(h / 25)),", "for index in range(len(self.imgs_pred)): imagelist_a[index, :, :, :] = np.array(self.imgs_pred[index].resize((imagelist_h,", "shown on the left') parser.add_argument('-r', '--right', default='imgs/pred_outline_lin_spatial_clouds0_2/', help='directory with images", "os.listdir(args.left)])] imgs_p = [Image.open(img) for img in sorted([os.path.join(args.right, img) for", "in the report {}!'.format(args.right, args.report)) if len(imgs_m) != len(dates): raise", ":, :] = np.array(self.imgs_pred[index].resize((imagelist_h, imagelist_h), Image.ANTIALIAS)) self.imagelists = [] for", "/ 50)), text = \"Show masks\", command=self.toggle_mask) self.b_reset = Button(root,", "self.next) root.bind(\"<Up>\", self.prev) root.bind(\"<Button-3>\", self.click_right) root.bind(\"<Button-1>\", self.click_left) root.bind(\"<Button-2>\", self.click_wheel) root.bind(\"<Button-4>\",", "self.canvas.create_image(int(w - self.logo3.width() / 2 - b_logos), int(h - (self.logo3.height()", "timeline imagelist_h = int(self.img_w / len(self.imgs_pred)) + 1 imagelist_a =", "= [] # unmasked full images self.imgs_pred = [] self.cc", "PIL import Image, ImageTk import probgf.media as media class MainWindow():", "def click_right(self, event): if not self.over_button: self.next() def refresh(self): zoom", "= [Image.open(img) for img in sorted([os.path.join(args.right, img) for img in", "int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2)), int(self.img_w_f", "self.zoom = self.canvas.create_text(w * 0.12, h * 0.94, font=(\"Courier\", int(h", "and buttons self.img_w_f = self.imgs_orig[0].size[0] # full image width self.imgs_orig_v", ":] = 255 self.imagelists.append(ImageTk.PhotoImage(Image.fromarray(c_list.reshape(len(self.imgs_pred) * imagelist_h, imagelist_h, 3)))) self.i_list =", "= ImageTk.PhotoImage(img1) self.imgs_pred_v[self.curr_img] = ImageTk.PhotoImage(img2) self.canvas.itemconfig(self.i_left, image = self.imgs_orig_v[self.curr_img]) self.canvas.itemconfig(self.i_right,", "self.b_reset.config(state=DISABLED) else: self.canvas.itemconfig(self.zoom, text='ZOOM: {:3d}%'.format(self.zoom)) self.b_reset.config(state=NORMAL) def zoomer(self, event): if", "setup images self.img_w = int(h * 0.68) # width of", "# text labels and logos h_logos = int(h / 17)", "self.canvas.itemconfig(self.i_list, image = self.imagelists[self.curr_img]) self.canvas.itemconfig(self.day_info, text='{} - cloud cover {:06.2f}%", "dates, errors, logos): self.dates = dates self.errors = errors #", "args.height)) MainWindow(root, args.width, args.height, imgs_p, imgs_o, imgs_m, dates, errors, logos)", "len(self.imgs_pred)) + 1 imagelist_a = np.zeros((len(self.imgs_pred), imagelist_h, imagelist_h, 3), dtype='uint8')", "self.canvas.create_image(w * 0.5, h * 0.56, image=self.imagelists[self.curr_img]) # images and", "in range(len(self.imgs_pred)): imagelist_a[index, :, :, :] = np.array(self.imgs_pred[index].resize((imagelist_h, imagelist_h), Image.ANTIALIAS))", "- event.x, 0 - int(self.img_w_f / 2 - self.img_w_f /", "self.img_w), Image.ANTIALIAS)) for img in self.imgs_pred] self.i_left = self.canvas.create_image(w /", "Button(root, font=(\"Courier\", int(h / 50)), text = \"Quit\", command=self.canvas.master.destroy) self.reset_transform()", "# images for visualization self.imgs_pred_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for", "3), dtype='uint8') for index in range(len(self.imgs_pred)): imagelist_a[index, :, :, :]", "* 0.70, h * 0.94, window=self.b_quit) # bind buttons and", "import os import argparse from datetime import datetime, timedelta from", "+ self.shift_x self.end_x = int(self.start_x + self.img_w_f / zoom) self.start_y", "DISABLED, SUNKEN, NORMAL import numpy as np from PIL import", "h * 0.56, image=self.imgs_orig_v[self.curr_img]) self.i_right = self.canvas.create_image(w - w /", "self.b_masks.bind(\"<Enter>\", self.button_enter) self.b_masks.bind(\"<Leave>\", self.button_leave) self.b_reset.bind(\"<Enter>\", self.button_enter) self.b_reset.bind(\"<Leave>\", self.button_leave) self.b_quit.bind(\"<Enter>\", self.button_enter)", "= self.imgs_orig[0].size[0] # full image width self.imgs_orig_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w),", "self.img_w_f = self.imgs_orig[0].size[0] # full image width self.imgs_orig_v = [ImageTk.PhotoImage(img.resize((self.img_w,", "type=int, default=1280, help='window width') parser.add_argument('-H', '--height', type=int, default=720, help='window height')", "width=w, height=h) self.canvas.pack() self.canvas.configure(background='white') self.logo1 = ImageTk.PhotoImage(logos[0].resize((int(h_logos / logos[0].size[1] *", "self.next() def refresh(self): zoom = float(self.zoom) / 100 self.start_x =", "2 - b_logos), int(self.logo2.height() / 2 + b_logos), image=self.logo2) self.canvas.create_image(int(w", "' for error in report[:, 5]] logos = [media.logo1, media.logo2,", "for img in sorted([os.path.join(args.right, img) for img in os.listdir(args.right)])] imgs_m", "zoom) if not self.mask_toggle: self.b_masks.config(relief=RAISED) img1 = self.imgs_orig[self.curr_img] img2 =", "self.b_quit.bind(\"<Enter>\", self.button_enter) self.b_quit.bind(\"<Leave>\", self.button_leave) parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-l', '--left', default='imgs/original/',", "self.button_enter) self.b_reset.bind(\"<Leave>\", self.button_leave) self.b_quit.bind(\"<Enter>\", self.button_enter) self.b_quit.bind(\"<Leave>\", self.button_leave) parser = argparse.ArgumentParser(description=__doc__)", "in the report {}!'.format(args.masks, args.report)) root = Tk() root.title('Gap Filling", "- b_logos), int(h - (self.logo3.height() / 2 + b_logos)), image=self.logo3)", "and keys root.bind(\"q\", lambda e: self.canvas.master.destroy()) root.bind(\"r\", self.reset_transform) root.bind(\"m\", self.toggle_mask)", "MainWindow(): def next(self, event=None): self.curr_img = (self.curr_img + 1) %", "/ 2) + self.shift_y self.end_y = int(self.start_y + self.img_w_f /", "img1 = self.imgs_orig[self.curr_img] img2 = self.imgs_pred[self.curr_img] else: self.b_masks.config(relief=SUNKEN) img1 =", "100: self.zoom -= 20 if self.zoom == 100: self.reset_transform() self.refresh()", "self.b_reset.bind(\"<Leave>\", self.button_leave) self.b_quit.bind(\"<Enter>\", self.button_enter) self.b_quit.bind(\"<Leave>\", self.button_leave) parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-l',", "of images in {} than days in the report {}!'.format(args.left,", "Image.ANTIALIAS)) self.canvas.create_image(int(self.logo1.width() / 2 + b_logos), int(self.logo1.height() / 2 +", "* 0.19, font=(\"Courier\", int(h / 35)), text='Predicted') self.day_info = self.canvas.create_text(w", "self.img_w), Image.ANTIALIAS) img2 = img2.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS)", "- self.img_w_f / self.zoom / 2)), int(self.img_w_f / 2 -", "= True def button_leave(self, enter): self.over_button = False def __init__(self,", "import probgf.media as media class MainWindow(): def next(self, event=None): self.curr_img", "self.errors = errors # setup images self.img_w = int(h *", "self.logo3.width() / 2 - b_logos), int(h - (self.logo3.height() / 2", "/ 2 + b_logos)), image=self.logo3) self.canvas.create_text(w / 2, h *", "c_list[index, :, (imagelist_h - int(w / 600)):, :] = 255", "= 0 self.refresh() def button_enter(self, event): self.over_button = True def", "h * 0.56, image=self.imgs_pred_v[self.curr_img]) self.b_masks = Button(root, font=(\"Courier\", int(h /", "int(h / 50)), text = \"Quit\", command=self.canvas.master.destroy) self.reset_transform() self.canvas.create_window(w *", "not self.mask_toggle self.refresh() def reset_transform(self, event=None): self.mask_toggle = False self.zoom", "\"Quit\", command=self.canvas.master.destroy) self.reset_transform() self.canvas.create_window(w * 0.30, h * 0.94, window=self.b_masks)", "img in os.listdir(args.right)])] imgs_m = [Image.open(img) for img in sorted([os.path.join(args.masks,", "def button_leave(self, enter): self.over_button = False def __init__(self, root, w,", "b_logos), int(self.logo1.height() / 2 + b_logos), image=self.logo1) self.canvas.create_image(int(w - self.logo2.width()", "/ 100 self.start_x = int(self.img_w_f / 2 - self.img_w_f /", "2 + b_logos), int(self.logo1.height() / 2 + b_logos), image=self.logo1) self.canvas.create_image(int(w", "len(imgs_p) != len(dates): raise RuntimeError('Different number of images in {}", "root.bind(\"<Button-5>\", self.zoomer) root.bind(\"<MouseWheel>\", self.zoomer) root.bind(\"<B2-Motion>\", self.drag_roi) root.bind(\"+\", self.zoomer) root.bind(\"-\", self.zoomer)", "img1 = self.imgs_orig_m[self.curr_img] img2 = self.imgs_pred_m[self.curr_img] img1 = img1.crop((self.start_x, self.start_y,", "['{:4.1f}'.format(error) if error != 0.0 else 'n.a. ' for error", "h * 0.94, window=self.b_reset) self.canvas.create_window(w * 0.70, h * 0.94,", "prev(self, event=None): self.curr_img = (self.curr_img - 1) % len(self.imgs_orig) self.refresh()", "img in sorted([os.path.join(args.right, img) for img in os.listdir(args.right)])] imgs_m =", "logos[0].size[0]), h_logos), Image.ANTIALIAS)) self.logo2 = ImageTk.PhotoImage(logos[1].resize((int(h_logos / logos[1].size[1] * logos[1].size[0]),", "next(self, event=None): self.curr_img = (self.curr_img + 1) % len(self.imgs_orig) self.refresh()", "True def button_leave(self, enter): self.over_button = False def __init__(self, root,", "root.bind(\"+\", self.zoomer) root.bind(\"-\", self.zoomer) self.over_button = False self.b_masks.bind(\"<Enter>\", self.button_enter) self.b_masks.bind(\"<Leave>\",", "= [(datetime(args.year, 1, 1) + timedelta(int(report[day, 1]) - 1)).strftime('%b %d", "0.12, h * 0.94, font=(\"Courier\", int(h / 50)), text='') #", "* 0.56, image=self.imgs_orig_v[self.curr_img]) self.i_right = self.canvas.create_image(w - w / 3.9,", "%Y') for day in range(report.shape[0])] errors = ['{:4.1f}'.format(error) if error", "range(report.shape[0])] errors = ['{:4.1f}'.format(error) if error != 0.0 else 'n.a.", "of images in {} than days in the report {}!'.format(args.masks,", "image=self.imgs_pred_v[self.curr_img]) self.b_masks = Button(root, font=(\"Courier\", int(h / 50)), text =", "font=(\"Courier\", int(h / 25)), text='Gap Filling Viewer') self.canvas.create_text(w / 3.9,", "tkinter import Canvas, Tk, Button, RAISED, DISABLED, SUNKEN, NORMAL import", "font=(\"Courier\", int(h / 50)), text = \"Quit\", command=self.canvas.master.destroy) self.reset_transform() self.canvas.create_window(w", "the right hand images') parser.add_argument('-y', '--year', type=int, default=2018, help='year of", "masks\", command=self.toggle_mask) self.b_reset = Button(root, font=(\"Courier\", int(h / 50)), text", "self.canvas.master.destroy()) root.bind(\"r\", self.reset_transform) root.bind(\"m\", self.toggle_mask) root.bind(\"<Right>\", self.next) root.bind(\"<Left>\", self.prev) root.bind(\"<Down>\",", ":int(w / 600), :, :] = 255 c_list[index, (imagelist_h -", "event=None): self.curr_img = (self.curr_img + 1) % len(self.imgs_orig) self.refresh() def", "/ 600)):, :] = 255 self.imagelists.append(ImageTk.PhotoImage(Image.fromarray(c_list.reshape(len(self.imgs_pred) * imagelist_h, imagelist_h, 3))))", "+= 20 elif event.delta == 240: self.zoom += 40 elif", "dtype='uint8') for index in range(len(self.imgs_pred)): imagelist_a[index, :, :, :] =", "Button(root, font=(\"Courier\", int(h / 50)), text = \"Reset view\", command=self.reset_transform,", "image self.imgs_orig_m = [] # masked full images self.imgs_pred_m =", "image=self.logo2) self.canvas.create_image(int(w - self.logo3.width() / 2 - b_logos), int(h -", "text='Gap Filling Viewer') self.canvas.create_text(w / 3.9, h * 0.19, font=(\"Courier\",", "20 >= 100: self.zoom -= 20 if self.zoom == 100:", "imagelist_h, imagelist_h, 3)))) self.i_list = self.canvas.create_image(w * 0.5, h *", "parser.add_argument('-y', '--year', type=int, default=2018, help='year of data acquisition') parser.add_argument('-W', '--width',", "1)).strftime('%b %d %Y') for day in range(report.shape[0])] errors = ['{:4.1f}'.format(error)", ":, :] = 255 c_list[index, (imagelist_h - int(w / 600)):,", "report {}!'.format(args.left, args.report)) if len(imgs_p) != len(dates): raise RuntimeError('Different number", "timedelta from tkinter import Canvas, Tk, Button, RAISED, DISABLED, SUNKEN,", "range(len(self.imgs_pred)): c_list = np.array(imagelist_a) c_list[index, :int(w / 600), :, :]", "self.over_button: self.next() def refresh(self): zoom = float(self.zoom) / 100 self.start_x", "help='directory with images which are shown on the left') parser.add_argument('-r',", "2, h * 0.13, font=(\"Courier\", int(h / 30)), text='') self.zoom", "/ self.zoom / 2)), int(self.img_w_f / 2 - self.img_w_f /", "0.94, window=self.b_quit) # bind buttons and keys root.bind(\"q\", lambda e:", "min(max(self.start_drag[0] - event.x, 0 - int(self.img_w_f / 2 - self.img_w_f", "np.array(imagelist_a) c_list[index, :int(w / 600), :, :] = 255 c_list[index,", "Canvas, Tk, Button, RAISED, DISABLED, SUNKEN, NORMAL import numpy as", "imagelist_a[index, :, :, :] = np.array(self.imgs_pred[index].resize((imagelist_h, imagelist_h), Image.ANTIALIAS)) self.imagelists =", "for img in self.imgs_orig] # images for visualization self.imgs_pred_v =", "right') parser.add_argument('-m', '--masks', default='imgs/mask/', help='directory with mask images') parser.add_argument('-R', '--report',", "Image, ImageTk import probgf.media as media class MainWindow(): def next(self,", "image = self.imagelists[self.curr_img]) self.canvas.itemconfig(self.day_info, text='{} - cloud cover {:06.2f}% -", ":] = 255 c_list[index, (imagelist_h - int(w / 600)):, :,", "argparse.ArgumentParser(description=__doc__) parser.add_argument('-l', '--left', default='imgs/original/', help='directory with images which are shown", "/ 2 - self.img_w_f / zoom / 2) + self.shift_y", "int(h / 35)), text='Predicted') self.day_info = self.canvas.create_text(w / 2, h", "self.b_masks.config(relief=SUNKEN) img1 = self.imgs_orig_m[self.curr_img] img2 = self.imgs_pred_m[self.curr_img] img1 = img1.crop((self.start_x,", "* logos[2].size[0]), h_logos), Image.ANTIALIAS)) self.canvas.create_image(int(self.logo1.width() / 2 + b_logos), int(self.logo1.height()", "self.drag_roi) root.bind(\"+\", self.zoomer) root.bind(\"-\", self.zoomer) self.over_button = False self.b_masks.bind(\"<Enter>\", self.button_enter)", "help='window width') parser.add_argument('-H', '--height', type=int, default=720, help='window height') args =", "h * 0.94, window=self.b_quit) # bind buttons and keys root.bind(\"q\",", "information for the right hand images') parser.add_argument('-y', '--year', type=int, default=2018,", "b_logos = int(w / 100) self.canvas = Canvas(root, width=w, height=h)", "command=self.toggle_mask) self.b_reset = Button(root, font=(\"Courier\", int(h / 50)), text =", "event.keysym == 'plus': self.zoom += 20 elif event.delta == 240:", "= int(h / 17) b_logos = int(w / 100) self.canvas", "for img in sorted([os.path.join(args.masks, img) for img in os.listdir(args.masks)])] report", "parser.add_argument('-H', '--height', type=int, default=720, help='window height') args = parser.parse_args() imgs_o", "self.dates = dates self.errors = errors # setup images self.img_w", "+ b_logos), image=self.logo1) self.canvas.create_image(int(w - self.logo2.width() / 2 - b_logos),", "for img in os.listdir(args.left)])] imgs_p = [Image.open(img) for img in", ":int(w / 600), :] = 255 c_list[index, :, (imagelist_h -", "in report[:, 5]] logos = [media.logo1, media.logo2, media.logo3] if len(imgs_o)", "img in sorted([os.path.join(args.left, img) for img in os.listdir(args.left)])] imgs_p =", "- w / 3.9, h * 0.19, font=(\"Courier\", int(h /", "/ 35)), text='Observed') self.canvas.create_text(w - w / 3.9, h *", "root.title('Gap Filling Viewer') root.geometry(\"%dx%d+0+0\" % (args.width, args.height)) MainWindow(root, args.width, args.height,", "30)), text='') self.zoom = self.canvas.create_text(w * 0.12, h * 0.94,", "= False self.zoom = 100 self.shift_x = 0 self.shift_y =", "Viewer') self.canvas.create_text(w / 3.9, h * 0.19, font=(\"Courier\", int(h /", ":, :int(w / 600), :] = 255 c_list[index, :, (imagelist_h", "event.x, 0 - int(self.img_w_f / 2 - self.img_w_f / self.zoom", "root.bind(\"-\", self.zoomer) self.over_button = False self.b_masks.bind(\"<Enter>\", self.button_enter) self.b_masks.bind(\"<Leave>\", self.button_leave) self.b_reset.bind(\"<Enter>\",", "= [Image.open(img) for img in sorted([os.path.join(args.masks, img) for img in", "(self.logo3.height() / 2 + b_logos)), image=self.logo3) self.canvas.create_text(w / 2, h", "/ 2)) self.shift_y = min(max(self.start_drag[1] - event.y, 0 - int(self.img_w_f", "full images self.imgs_pred_m = [] self.imgs_orig = [] # unmasked", "= self.imgs_orig[self.curr_img] img2 = self.imgs_pred[self.curr_img] else: self.b_masks.config(relief=SUNKEN) img1 = self.imgs_orig_m[self.curr_img]", "self.mask_toggle = not self.mask_toggle self.refresh() def reset_transform(self, event=None): self.mask_toggle =", "- self.img_w_f / zoom / 2) + self.shift_x self.end_x =", "or event.keysym == 'plus': self.zoom += 20 elif event.delta ==", "= self.imgs_orig_v[self.curr_img]) self.canvas.itemconfig(self.i_right, image = self.imgs_pred_v[self.curr_img]) self.canvas.itemconfig(self.i_list, image = self.imagelists[self.curr_img])", "/ 600), :] = 255 c_list[index, :, (imagelist_h - int(w", "0.70, h * 0.94, window=self.b_quit) # bind buttons and keys", "width') parser.add_argument('-H', '--height', type=int, default=720, help='window height') args = parser.parse_args()", "default='imgs/original/', help='directory with images which are shown on the left')", "img) for img in os.listdir(args.left)])] imgs_p = [Image.open(img) for img", "self.refresh() def toggle_mask(self, event=None): self.mask_toggle = not self.mask_toggle self.refresh() def", "event=None): self.curr_img = (self.curr_img - 1) % len(self.imgs_orig) self.refresh() def", "event): if event.num == 4 or event.delta == 120 or", "# full image width self.imgs_orig_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for", "{:06.2f}% - estimated MAE {}'.format(self.dates[self.curr_img], self.cc[self.curr_img] * 100, self.errors[self.curr_img])) if", "int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2)) self.shift_y", "alpha=.5)) self.cc.append(1 - np.count_nonzero(np.array(imgs_m[index])) / np.array(imgs_m[index]).size) self.curr_img = 0 #", "- event.y, 0 - int(self.img_w_f / 2 - self.img_w_f /", "/ 17) b_logos = int(w / 100) self.canvas = Canvas(root,", "os.listdir(args.right)])] imgs_m = [Image.open(img) for img in sorted([os.path.join(args.masks, img) for", "to interactively view spatio-temporal gap filling results\"\"\" import os import", "img in self.imgs_pred] self.i_left = self.canvas.create_image(w / 3.9, h *", "self.canvas.itemconfig(self.day_info, text='{} - cloud cover {:06.2f}% - estimated MAE {}'.format(self.dates[self.curr_img],", "100: self.reset_transform() self.refresh() def drag_roi(self, event): self.shift_x = min(max(self.start_drag[0] -", "default=1280, help='window width') parser.add_argument('-H', '--height', type=int, default=720, help='window height') args", "self.imgs_orig.append(imgs_o[index].resize((self.img_w, self.img_w), resample=0)) self.imgs_pred.append(img.resize((self.img_w, self.img_w), resample=0)) self.imgs_orig_m.append(Image.blend(self.imgs_orig[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0),", "errors # setup images self.img_w = int(h * 0.68) #", "image = self.imgs_orig_v[self.curr_img]) self.canvas.itemconfig(self.i_right, image = self.imgs_pred_v[self.curr_img]) self.canvas.itemconfig(self.i_list, image =", "* 0.13, font=(\"Courier\", int(h / 30)), text='') self.zoom = self.canvas.create_text(w", "with mask images') parser.add_argument('-R', '--report', default='report_lin_spatial_clouds0_2.csv', help='report containing date and", "# masked full images self.imgs_pred_m = [] self.imgs_orig = []", "event.delta == 120 or event.keysym == 'plus': self.zoom += 20", "self.canvas.pack() self.canvas.configure(background='white') self.logo1 = ImageTk.PhotoImage(logos[0].resize((int(h_logos / logos[0].size[1] * logos[0].size[0]), h_logos),", "0.30, h * 0.94, window=self.b_masks) self.canvas.create_window(w * 0.50, h *", "results\"\"\" import os import argparse from datetime import datetime, timedelta", "== 120 or event.keysym == 'plus': self.zoom += 20 elif", "self.imgs_orig_m[self.curr_img] img2 = self.imgs_pred_m[self.curr_img] img1 = img1.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w,", "in enumerate(imgs_p): self.imgs_orig.append(imgs_o[index].resize((self.img_w, self.img_w), resample=0)) self.imgs_pred.append(img.resize((self.img_w, self.img_w), resample=0)) self.imgs_orig_m.append(Image.blend(self.imgs_orig[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w,", "enter): self.over_button = False def __init__(self, root, w, h, imgs_p,", "default='report_lin_spatial_clouds0_2.csv', help='report containing date and error information for the right", "images in {} than days in the report {}!'.format(args.right, args.report))", "root.bind(\"<Button-1>\", self.click_left) root.bind(\"<Button-2>\", self.click_wheel) root.bind(\"<Button-4>\", self.zoomer) root.bind(\"<Button-5>\", self.zoomer) root.bind(\"<MouseWheel>\", self.zoomer)", "2 - self.img_w_f / self.zoom / 2)) self.shift_y = min(max(self.start_drag[1]", "mask images') parser.add_argument('-R', '--report', default='report_lin_spatial_clouds0_2.csv', help='report containing date and error", "self.zoom += 40 elif event.delta == 360: self.zoom += 60", "self.imgs_pred.append(img.resize((self.img_w, self.img_w), resample=0)) self.imgs_orig_m.append(Image.blend(self.imgs_orig[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5)) self.imgs_pred_m.append(Image.blend(self.imgs_pred[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w,", "int(self.img_w_f / 2 - self.img_w_f / zoom / 2) +", "+ b_logos)), image=self.logo3) self.canvas.create_text(w / 2, h * 0.06, font=(\"Courier\",", "self.imgs_pred] self.i_left = self.canvas.create_image(w / 3.9, h * 0.56, image=self.imgs_orig_v[self.curr_img])", "self.errors[self.curr_img])) if self.zoom == 100: self.canvas.itemconfig(self.zoom, text='') self.b_reset.config(state=DISABLED) else: self.canvas.itemconfig(self.zoom,", "def button_enter(self, event): self.over_button = True def button_leave(self, enter): self.over_button", "= ImageTk.PhotoImage(logos[0].resize((int(h_logos / logos[0].size[1] * logos[0].size[0]), h_logos), Image.ANTIALIAS)) self.logo2 =", "img2.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS) self.imgs_orig_v[self.curr_img] = ImageTk.PhotoImage(img1) self.imgs_pred_v[self.curr_img]", "self.canvas.create_text(w / 2, h * 0.13, font=(\"Courier\", int(h / 30)),", "logos): self.dates = dates self.errors = errors # setup images", "self.prev) root.bind(\"<Button-3>\", self.click_right) root.bind(\"<Button-1>\", self.click_left) root.bind(\"<Button-2>\", self.click_wheel) root.bind(\"<Button-4>\", self.zoomer) root.bind(\"<Button-5>\",", "3.9, h * 0.19, font=(\"Courier\", int(h / 35)), text='Observed') self.canvas.create_text(w", "in the report {}!'.format(args.left, args.report)) if len(imgs_p) != len(dates): raise", "if self.zoom == 100: self.canvas.itemconfig(self.zoom, text='') self.b_reset.config(state=DISABLED) else: self.canvas.itemconfig(self.zoom, text='ZOOM:", "= np.zeros((len(self.imgs_pred), imagelist_h, imagelist_h, 3), dtype='uint8') for index in range(len(self.imgs_pred)):", "state=DISABLED) self.b_quit = Button(root, font=(\"Courier\", int(h / 50)), text =", "os.listdir(args.masks)])] report = np.genfromtxt(args.report, delimiter=',', dtype=float)[1:-1] dates = [(datetime(args.year, 1,", "len(self.imgs_orig) self.refresh() def click_wheel(self, event): self.start_drag = (event.x + self.shift_x,", "text='{} - cloud cover {:06.2f}% - estimated MAE {}'.format(self.dates[self.curr_img], self.cc[self.curr_img]", "c_list[index, :, :int(w / 600), :] = 255 c_list[index, :,", "self.reset_transform() self.refresh() def drag_roi(self, event): self.shift_x = min(max(self.start_drag[0] - event.x,", "= 255 c_list[index, :, :int(w / 600), :] = 255", "'--left', default='imgs/original/', help='directory with images which are shown on the", "default='imgs/pred_outline_lin_spatial_clouds0_2/', help='directory with images which are shown on the right')", "- int(w / 600)):, :, :] = 255 c_list[index, :,", "images which are shown on the right') parser.add_argument('-m', '--masks', default='imgs/mask/',", "imgs_m = [Image.open(img) for img in sorted([os.path.join(args.masks, img) for img", "than days in the report {}!'.format(args.left, args.report)) if len(imgs_p) !=", "images self.img_w = int(h * 0.68) # width of each", "image=self.imgs_orig_v[self.curr_img]) self.i_right = self.canvas.create_image(w - w / 3.9, h *", "Filling Viewer') root.geometry(\"%dx%d+0+0\" % (args.width, args.height)) MainWindow(root, args.width, args.height, imgs_p,", "cover {:06.2f}% - estimated MAE {}'.format(self.dates[self.curr_img], self.cc[self.curr_img] * 100, self.errors[self.curr_img]))", "/ 100) self.canvas = Canvas(root, width=w, height=h) self.canvas.pack() self.canvas.configure(background='white') self.logo1", "self.imgs_orig_m.append(Image.blend(self.imgs_orig[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5)) self.imgs_pred_m.append(Image.blend(self.imgs_pred[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5))", "* 0.94, window=self.b_reset) self.canvas.create_window(w * 0.70, h * 0.94, window=self.b_quit)", "root.bind(\"<Button-2>\", self.click_wheel) root.bind(\"<Button-4>\", self.zoomer) root.bind(\"<Button-5>\", self.zoomer) root.bind(\"<MouseWheel>\", self.zoomer) root.bind(\"<B2-Motion>\", self.drag_roi)", "/ 2)) self.refresh() def toggle_mask(self, event=None): self.mask_toggle = not self.mask_toggle", "lambda e: self.canvas.master.destroy()) root.bind(\"r\", self.reset_transform) root.bind(\"m\", self.toggle_mask) root.bind(\"<Right>\", self.next) root.bind(\"<Left>\",", "h * 0.19, font=(\"Courier\", int(h / 35)), text='Predicted') self.day_info =", "visualization self.imgs_pred_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img in self.imgs_pred]", "w, h, imgs_p, imgs_o, imgs_m, dates, errors, logos): self.dates =", "from PIL import Image, ImageTk import probgf.media as media class", "or event.delta == 120 or event.keysym == 'plus': self.zoom +=", "self.reset_transform() self.canvas.create_window(w * 0.30, h * 0.94, window=self.b_masks) self.canvas.create_window(w *", "height=h) self.canvas.pack() self.canvas.configure(background='white') self.logo1 = ImageTk.PhotoImage(logos[0].resize((int(h_logos / logos[0].size[1] * logos[0].size[0]),", "int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2)) self.refresh()", "event): self.start_drag = (event.x + self.shift_x, event.y + self.shift_y) def", "logos[2].size[0]), h_logos), Image.ANTIALIAS)) self.canvas.create_image(int(self.logo1.width() / 2 + b_logos), int(self.logo1.height() /", "text = \"Show masks\", command=self.toggle_mask) self.b_reset = Button(root, font=(\"Courier\", int(h", "text='') # image timeline imagelist_h = int(self.img_w / len(self.imgs_pred)) +", "[Image.open(img) for img in sorted([os.path.join(args.masks, img) for img in os.listdir(args.masks)])]", "self.canvas.configure(background='white') self.logo1 = ImageTk.PhotoImage(logos[0].resize((int(h_logos / logos[0].size[1] * logos[0].size[0]), h_logos), Image.ANTIALIAS))", "(event.x + self.shift_x, event.y + self.shift_y) def click_left(self, event): if", "def zoomer(self, event): if event.num == 4 or event.delta ==", "event=None): self.mask_toggle = False self.zoom = 100 self.shift_x = 0", "= [] for index, img in enumerate(imgs_p): self.imgs_orig.append(imgs_o[index].resize((self.img_w, self.img_w), resample=0))", "self.img_w_f / self.zoom / 2)), int(self.img_w_f / 2 - self.img_w_f", "0 self.shift_y = 0 self.refresh() def button_enter(self, event): self.over_button =", "for index, img in enumerate(imgs_p): self.imgs_orig.append(imgs_o[index].resize((self.img_w, self.img_w), resample=0)) self.imgs_pred.append(img.resize((self.img_w, self.img_w),", "= 0 self.shift_y = 0 self.refresh() def button_enter(self, event): self.over_button", "25)), text='Gap Filling Viewer') self.canvas.create_text(w / 3.9, h * 0.19,", "self.canvas.create_image(w - w / 3.9, h * 0.56, image=self.imgs_pred_v[self.curr_img]) self.b_masks", "self.canvas.create_window(w * 0.30, h * 0.94, window=self.b_masks) self.canvas.create_window(w * 0.50,", "2)) self.shift_y = min(max(self.start_drag[1] - event.y, 0 - int(self.img_w_f /", "False self.b_masks.bind(\"<Enter>\", self.button_enter) self.b_masks.bind(\"<Leave>\", self.button_leave) self.b_reset.bind(\"<Enter>\", self.button_enter) self.b_reset.bind(\"<Leave>\", self.button_leave) self.b_quit.bind(\"<Enter>\",", "1, 1) + timedelta(int(report[day, 1]) - 1)).strftime('%b %d %Y') for", "255 self.imagelists.append(ImageTk.PhotoImage(Image.fromarray(c_list.reshape(len(self.imgs_pred) * imagelist_h, imagelist_h, 3)))) self.i_list = self.canvas.create_image(w *", "font=(\"Courier\", int(h / 30)), text='') self.zoom = self.canvas.create_text(w * 0.12,", "if error != 0.0 else 'n.a. ' for error in", "args.report)) if len(imgs_m) != len(dates): raise RuntimeError('Different number of images", "= np.genfromtxt(args.report, delimiter=',', dtype=float)[1:-1] dates = [(datetime(args.year, 1, 1) +", "img in os.listdir(args.left)])] imgs_p = [Image.open(img) for img in sorted([os.path.join(args.right,", "window=self.b_reset) self.canvas.create_window(w * 0.70, h * 0.94, window=self.b_quit) # bind", "1]) - 1)).strftime('%b %d %Y') for day in range(report.shape[0])] errors", "!= 0.0 else 'n.a. ' for error in report[:, 5]]", "img) for img in os.listdir(args.masks)])] report = np.genfromtxt(args.report, delimiter=',', dtype=float)[1:-1]", "== 4 or event.delta == 120 or event.keysym == 'plus':", "= int(h * 0.68) # width of each displayed image", "h * 0.19, font=(\"Courier\", int(h / 35)), text='Observed') self.canvas.create_text(w -", "font=(\"Courier\", int(h / 50)), text = \"Show masks\", command=self.toggle_mask) self.b_reset", "- w / 3.9, h * 0.56, image=self.imgs_pred_v[self.curr_img]) self.b_masks =", "<gh_stars>1-10 \"\"\"viewer application which allows to interactively view spatio-temporal gap", "for img in os.listdir(args.masks)])] report = np.genfromtxt(args.report, delimiter=',', dtype=float)[1:-1] dates", "- b_logos), int(self.logo2.height() / 2 + b_logos), image=self.logo2) self.canvas.create_image(int(w -", "Image.ANTIALIAS) self.imgs_orig_v[self.curr_img] = ImageTk.PhotoImage(img1) self.imgs_pred_v[self.curr_img] = ImageTk.PhotoImage(img2) self.canvas.itemconfig(self.i_left, image =", "self.canvas.itemconfig(self.zoom, text='') self.b_reset.config(state=DISABLED) else: self.canvas.itemconfig(self.zoom, text='ZOOM: {:3d}%'.format(self.zoom)) self.b_reset.config(state=NORMAL) def zoomer(self,", "40 elif event.delta == 360: self.zoom += 60 else: if", "- self.img_w_f / zoom / 2) + self.shift_y self.end_y =", "imgs_o = [Image.open(img) for img in sorted([os.path.join(args.left, img) for img", "self.canvas.create_window(w * 0.70, h * 0.94, window=self.b_quit) # bind buttons", "0 self.refresh() def button_enter(self, event): self.over_button = True def button_leave(self,", "imagelist_h, 3), dtype='uint8') for index in range(len(self.imgs_pred)): imagelist_a[index, :, :,", "in os.listdir(args.masks)])] report = np.genfromtxt(args.report, delimiter=',', dtype=float)[1:-1] dates = [(datetime(args.year,", "np.array(imgs_m[index]).size) self.curr_img = 0 # text labels and logos h_logos", "{:3d}%'.format(self.zoom)) self.b_reset.config(state=NORMAL) def zoomer(self, event): if event.num == 4 or", "enumerate(imgs_p): self.imgs_orig.append(imgs_o[index].resize((self.img_w, self.img_w), resample=0)) self.imgs_pred.append(img.resize((self.img_w, self.img_w), resample=0)) self.imgs_orig_m.append(Image.blend(self.imgs_orig[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w),", "window=self.b_masks) self.canvas.create_window(w * 0.50, h * 0.94, window=self.b_reset) self.canvas.create_window(w *", "estimated MAE {}'.format(self.dates[self.curr_img], self.cc[self.curr_img] * 100, self.errors[self.curr_img])) if self.zoom ==", "logos = [media.logo1, media.logo2, media.logo3] if len(imgs_o) != len(dates): raise", "event.y + self.shift_y) def click_left(self, event): if not self.over_button: self.prev()", "== 360: self.zoom += 60 else: if self.zoom - 20", "= 255 c_list[index, :, (imagelist_h - int(w / 600)):, :]", "* 0.56, image=self.imgs_pred_v[self.curr_img]) self.b_masks = Button(root, font=(\"Courier\", int(h / 50)),", "help='directory with mask images') parser.add_argument('-R', '--report', default='report_lin_spatial_clouds0_2.csv', help='report containing date", "/ 50)), text = \"Quit\", command=self.canvas.master.destroy) self.reset_transform() self.canvas.create_window(w * 0.30,", "text='') self.zoom = self.canvas.create_text(w * 0.12, h * 0.94, font=(\"Courier\",", "img2 = self.imgs_pred_m[self.curr_img] img1 = img1.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w),", "self.mask_toggle: self.b_masks.config(relief=RAISED) img1 = self.imgs_orig[self.curr_img] img2 = self.imgs_pred[self.curr_img] else: self.b_masks.config(relief=SUNKEN)", "imgs_p = [Image.open(img) for img in sorted([os.path.join(args.right, img) for img", "buttons self.img_w_f = self.imgs_orig[0].size[0] # full image width self.imgs_orig_v =", "* logos[0].size[0]), h_logos), Image.ANTIALIAS)) self.logo2 = ImageTk.PhotoImage(logos[1].resize((int(h_logos / logos[1].size[1] *", "+ self.shift_y self.end_y = int(self.start_y + self.img_w_f / zoom) if", "- 1) % len(self.imgs_orig) self.refresh() def click_wheel(self, event): self.start_drag =", "- self.img_w_f / self.zoom / 2)) self.shift_y = min(max(self.start_drag[1] -", "self.button_enter) self.b_quit.bind(\"<Leave>\", self.button_leave) parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-l', '--left', default='imgs/original/', help='directory", "* 0.30, h * 0.94, window=self.b_masks) self.canvas.create_window(w * 0.50, h", "self.logo2.width() / 2 - b_logos), int(self.logo2.height() / 2 + b_logos),", "= False self.b_masks.bind(\"<Enter>\", self.button_enter) self.b_masks.bind(\"<Leave>\", self.button_leave) self.b_reset.bind(\"<Enter>\", self.button_enter) self.b_reset.bind(\"<Leave>\", self.button_leave)", "MainWindow(root, args.width, args.height, imgs_p, imgs_o, imgs_m, dates, errors, logos) root.focus_set()", "parser.add_argument('-W', '--width', type=int, default=1280, help='window width') parser.add_argument('-H', '--height', type=int, default=720,", "for visualization self.imgs_pred_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img in", "0.94, window=self.b_masks) self.canvas.create_window(w * 0.50, h * 0.94, window=self.b_reset) self.canvas.create_window(w", "np.zeros((len(self.imgs_pred), imagelist_h, imagelist_h, 3), dtype='uint8') for index in range(len(self.imgs_pred)): imagelist_a[index,", "[ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img in self.imgs_orig] # images for", "self.button_leave) self.b_quit.bind(\"<Enter>\", self.button_enter) self.b_quit.bind(\"<Leave>\", self.button_leave) parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-l', '--left',", "image width self.imgs_orig_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img in", "- int(w / 600)):, :] = 255 self.imagelists.append(ImageTk.PhotoImage(Image.fromarray(c_list.reshape(len(self.imgs_pred) * imagelist_h,", "/ 3.9, h * 0.19, font=(\"Courier\", int(h / 35)), text='Observed')", "= np.array(self.imgs_pred[index].resize((imagelist_h, imagelist_h), Image.ANTIALIAS)) self.imagelists = [] for index in", "self.cc.append(1 - np.count_nonzero(np.array(imgs_m[index])) / np.array(imgs_m[index]).size) self.curr_img = 0 # text", "parser.parse_args() imgs_o = [Image.open(img) for img in sorted([os.path.join(args.left, img) for", "/ 3.9, h * 0.19, font=(\"Courier\", int(h / 35)), text='Predicted')", "int(h / 50)), text='') # image timeline imagelist_h = int(self.img_w", "self.start_y = int(self.img_w_f / 2 - self.img_w_f / zoom /", "images and buttons self.img_w_f = self.imgs_orig[0].size[0] # full image width", "are shown on the left') parser.add_argument('-r', '--right', default='imgs/pred_outline_lin_spatial_clouds0_2/', help='directory with", "1) + timedelta(int(report[day, 1]) - 1)).strftime('%b %d %Y') for day", "2 - self.img_w_f / self.zoom / 2)), int(self.img_w_f / 2", "images') parser.add_argument('-R', '--report', default='report_lin_spatial_clouds0_2.csv', help='report containing date and error information", "for day in range(report.shape[0])] errors = ['{:4.1f}'.format(error) if error !=", "days in the report {}!'.format(args.masks, args.report)) root = Tk() root.title('Gap", "self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS) img2 = img2.crop((self.start_x, self.start_y, self.end_x,", "font=(\"Courier\", int(h / 35)), text='Observed') self.canvas.create_text(w - w / 3.9,", "(self.curr_img + 1) % len(self.imgs_orig) self.refresh() def prev(self, event=None): self.curr_img", "if not self.over_button: self.next() def refresh(self): zoom = float(self.zoom) /", "240: self.zoom += 40 elif event.delta == 360: self.zoom +=", "self.img_w = int(h * 0.68) # width of each displayed", "int(self.img_w / len(self.imgs_pred)) + 1 imagelist_a = np.zeros((len(self.imgs_pred), imagelist_h, imagelist_h,", "{}!'.format(args.masks, args.report)) root = Tk() root.title('Gap Filling Viewer') root.geometry(\"%dx%d+0+0\" %", "/ 3.9, h * 0.56, image=self.imgs_pred_v[self.curr_img]) self.b_masks = Button(root, font=(\"Courier\",", "if not self.over_button: self.prev() def click_right(self, event): if not self.over_button:", "(args.width, args.height)) MainWindow(root, args.width, args.height, imgs_p, imgs_o, imgs_m, dates, errors,", "{} than days in the report {}!'.format(args.masks, args.report)) root =", "self.imgs_orig[self.curr_img] img2 = self.imgs_pred[self.curr_img] else: self.b_masks.config(relief=SUNKEN) img1 = self.imgs_orig_m[self.curr_img] img2", "len(dates): raise RuntimeError('Different number of images in {} than days", "b_logos), image=self.logo2) self.canvas.create_image(int(w - self.logo3.width() / 2 - b_logos), int(h", "ImageTk.PhotoImage(logos[2].resize((int(h_logos / logos[2].size[1] * logos[2].size[0]), h_logos), Image.ANTIALIAS)) self.canvas.create_image(int(self.logo1.width() / 2", "/ 2, h * 0.06, font=(\"Courier\", int(h / 25)), text='Gap", "= 255 self.imagelists.append(ImageTk.PhotoImage(Image.fromarray(c_list.reshape(len(self.imgs_pred) * imagelist_h, imagelist_h, 3)))) self.i_list = self.canvas.create_image(w", "self.b_quit.bind(\"<Leave>\", self.button_leave) parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-l', '--left', default='imgs/original/', help='directory with", "2 + b_logos)), image=self.logo3) self.canvas.create_text(w / 2, h * 0.06,", "[] # unmasked full images self.imgs_pred = [] self.cc =", "(imagelist_h - int(w / 600)):, :, :] = 255 c_list[index,", "Canvas(root, width=w, height=h) self.canvas.pack() self.canvas.configure(background='white') self.logo1 = ImageTk.PhotoImage(logos[0].resize((int(h_logos / logos[0].size[1]", "0.56, image=self.imagelists[self.curr_img]) # images and buttons self.img_w_f = self.imgs_orig[0].size[0] #", "100, self.errors[self.curr_img])) if self.zoom == 100: self.canvas.itemconfig(self.zoom, text='') self.b_reset.config(state=DISABLED) else:", "* imagelist_h, imagelist_h, 3)))) self.i_list = self.canvas.create_image(w * 0.5, h", "= [media.logo1, media.logo2, media.logo3] if len(imgs_o) != len(dates): raise RuntimeError('Different", "35)), text='Observed') self.canvas.create_text(w - w / 3.9, h * 0.19,", "dtype=float)[1:-1] dates = [(datetime(args.year, 1, 1) + timedelta(int(report[day, 1]) -", "- self.logo3.width() / 2 - b_logos), int(h - (self.logo3.height() /", "command=self.reset_transform, state=DISABLED) self.b_quit = Button(root, font=(\"Courier\", int(h / 50)), text", "day in range(report.shape[0])] errors = ['{:4.1f}'.format(error) if error != 0.0", "click_wheel(self, event): self.start_drag = (event.x + self.shift_x, event.y + self.shift_y)", "Viewer') root.geometry(\"%dx%d+0+0\" % (args.width, args.height)) MainWindow(root, args.width, args.height, imgs_p, imgs_o,", "root.geometry(\"%dx%d+0+0\" % (args.width, args.height)) MainWindow(root, args.width, args.height, imgs_p, imgs_o, imgs_m,", "self.zoomer) root.bind(\"<Button-5>\", self.zoomer) root.bind(\"<MouseWheel>\", self.zoomer) root.bind(\"<B2-Motion>\", self.drag_roi) root.bind(\"+\", self.zoomer) root.bind(\"-\",", "self.img_w), resample=0), alpha=.5)) self.cc.append(1 - np.count_nonzero(np.array(imgs_m[index])) / np.array(imgs_m[index]).size) self.curr_img =", "'n.a. ' for error in report[:, 5]] logos = [media.logo1,", "/ 2 - self.img_w_f / self.zoom / 2)) self.shift_y =", "NORMAL import numpy as np from PIL import Image, ImageTk", "100: self.canvas.itemconfig(self.zoom, text='') self.b_reset.config(state=DISABLED) else: self.canvas.itemconfig(self.zoom, text='ZOOM: {:3d}%'.format(self.zoom)) self.b_reset.config(state=NORMAL) def", "* 0.5, h * 0.56, image=self.imagelists[self.curr_img]) # images and buttons", "self.logo3 = ImageTk.PhotoImage(logos[2].resize((int(h_logos / logos[2].size[1] * logos[2].size[0]), h_logos), Image.ANTIALIAS)) self.canvas.create_image(int(self.logo1.width()", "elif event.delta == 240: self.zoom += 40 elif event.delta ==", "= ImageTk.PhotoImage(logos[1].resize((int(h_logos / logos[1].size[1] * logos[1].size[0]), h_logos), Image.ANTIALIAS)) self.logo3 =", "= self.canvas.create_text(w * 0.12, h * 0.94, font=(\"Courier\", int(h /", "4 or event.delta == 120 or event.keysym == 'plus': self.zoom", "self.i_right = self.canvas.create_image(w - w / 3.9, h * 0.56,", "parser.add_argument('-R', '--report', default='report_lin_spatial_clouds0_2.csv', help='report containing date and error information for", "\"Reset view\", command=self.reset_transform, state=DISABLED) self.b_quit = Button(root, font=(\"Courier\", int(h /", "alpha=.5)) self.imgs_pred_m.append(Image.blend(self.imgs_pred[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5)) self.cc.append(1 - np.count_nonzero(np.array(imgs_m[index])) /", "event): if not self.over_button: self.prev() def click_right(self, event): if not", "/ logos[2].size[1] * logos[2].size[0]), h_logos), Image.ANTIALIAS)) self.canvas.create_image(int(self.logo1.width() / 2 +", "os import argparse from datetime import datetime, timedelta from tkinter", "- cloud cover {:06.2f}% - estimated MAE {}'.format(self.dates[self.curr_img], self.cc[self.curr_img] *", "== 100: self.canvas.itemconfig(self.zoom, text='') self.b_reset.config(state=DISABLED) else: self.canvas.itemconfig(self.zoom, text='ZOOM: {:3d}%'.format(self.zoom)) self.b_reset.config(state=NORMAL)", "self.click_left) root.bind(\"<Button-2>\", self.click_wheel) root.bind(\"<Button-4>\", self.zoomer) root.bind(\"<Button-5>\", self.zoomer) root.bind(\"<MouseWheel>\", self.zoomer) root.bind(\"<B2-Motion>\",", "int(self.logo2.height() / 2 + b_logos), image=self.logo2) self.canvas.create_image(int(w - self.logo3.width() /", "zoom = float(self.zoom) / 100 self.start_x = int(self.img_w_f / 2", "img2 = self.imgs_pred[self.curr_img] else: self.b_masks.config(relief=SUNKEN) img1 = self.imgs_orig_m[self.curr_img] img2 =", "self.b_masks = Button(root, font=(\"Courier\", int(h / 50)), text = \"Show", "not self.over_button: self.prev() def click_right(self, event): if not self.over_button: self.next()", "self.imgs_orig_v[self.curr_img] = ImageTk.PhotoImage(img1) self.imgs_pred_v[self.curr_img] = ImageTk.PhotoImage(img2) self.canvas.itemconfig(self.i_left, image = self.imgs_orig_v[self.curr_img])", "= self.imgs_pred_v[self.curr_img]) self.canvas.itemconfig(self.i_list, image = self.imagelists[self.curr_img]) self.canvas.itemconfig(self.day_info, text='{} - cloud", "img in sorted([os.path.join(args.masks, img) for img in os.listdir(args.masks)])] report =", "Image.ANTIALIAS)) for img in self.imgs_orig] # images for visualization self.imgs_pred_v", "args.width, args.height, imgs_p, imgs_o, imgs_m, dates, errors, logos) root.focus_set() root.mainloop()", "in sorted([os.path.join(args.left, img) for img in os.listdir(args.left)])] imgs_p = [Image.open(img)", "False def __init__(self, root, w, h, imgs_p, imgs_o, imgs_m, dates,", "of each displayed image self.imgs_orig_m = [] # masked full", "img) for img in os.listdir(args.right)])] imgs_m = [Image.open(img) for img", "date and error information for the right hand images') parser.add_argument('-y',", "self.i_list = self.canvas.create_image(w * 0.5, h * 0.56, image=self.imagelists[self.curr_img]) #", "self.over_button = False self.b_masks.bind(\"<Enter>\", self.button_enter) self.b_masks.bind(\"<Leave>\", self.button_leave) self.b_reset.bind(\"<Enter>\", self.button_enter) self.b_reset.bind(\"<Leave>\",", "img in enumerate(imgs_p): self.imgs_orig.append(imgs_o[index].resize((self.img_w, self.img_w), resample=0)) self.imgs_pred.append(img.resize((self.img_w, self.img_w), resample=0)) self.imgs_orig_m.append(Image.blend(self.imgs_orig[-1],", "images self.imgs_pred_m = [] self.imgs_orig = [] # unmasked full", "ImageTk.PhotoImage(logos[0].resize((int(h_logos / logos[0].size[1] * logos[0].size[0]), h_logos), Image.ANTIALIAS)) self.logo2 = ImageTk.PhotoImage(logos[1].resize((int(h_logos", "= errors # setup images self.img_w = int(h * 0.68)", "root.bind(\"<Right>\", self.next) root.bind(\"<Left>\", self.prev) root.bind(\"<Down>\", self.next) root.bind(\"<Up>\", self.prev) root.bind(\"<Button-3>\", self.click_right)", "are shown on the right') parser.add_argument('-m', '--masks', default='imgs/mask/', help='directory with", "/ 600), :, :] = 255 c_list[index, (imagelist_h - int(w", "[(datetime(args.year, 1, 1) + timedelta(int(report[day, 1]) - 1)).strftime('%b %d %Y')", "error in report[:, 5]] logos = [media.logo1, media.logo2, media.logo3] if", "= [] # masked full images self.imgs_pred_m = [] self.imgs_orig", "h * 0.06, font=(\"Courier\", int(h / 25)), text='Gap Filling Viewer')", "- self.img_w_f / self.zoom / 2)) self.refresh() def toggle_mask(self, event=None):", "raise RuntimeError('Different number of images in {} than days in", "probgf.media as media class MainWindow(): def next(self, event=None): self.curr_img =", "60 else: if self.zoom - 20 >= 100: self.zoom -=", "50)), text='') # image timeline imagelist_h = int(self.img_w / len(self.imgs_pred))", "root.bind(\"<Button-3>\", self.click_right) root.bind(\"<Button-1>\", self.click_left) root.bind(\"<Button-2>\", self.click_wheel) root.bind(\"<Button-4>\", self.zoomer) root.bind(\"<Button-5>\", self.zoomer)", "help='window height') args = parser.parse_args() imgs_o = [Image.open(img) for img", "help='report containing date and error information for the right hand", "media.logo3] if len(imgs_o) != len(dates): raise RuntimeError('Different number of images", "= dates self.errors = errors # setup images self.img_w =", "window=self.b_quit) # bind buttons and keys root.bind(\"q\", lambda e: self.canvas.master.destroy())", "= Button(root, font=(\"Courier\", int(h / 50)), text = \"Quit\", command=self.canvas.master.destroy)", "self.canvas.itemconfig(self.i_left, image = self.imgs_orig_v[self.curr_img]) self.canvas.itemconfig(self.i_right, image = self.imgs_pred_v[self.curr_img]) self.canvas.itemconfig(self.i_list, image", "self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS) self.imgs_orig_v[self.curr_img] = ImageTk.PhotoImage(img1) self.imgs_pred_v[self.curr_img] = ImageTk.PhotoImage(img2) self.canvas.itemconfig(self.i_left,", "[] for index, img in enumerate(imgs_p): self.imgs_orig.append(imgs_o[index].resize((self.img_w, self.img_w), resample=0)) self.imgs_pred.append(img.resize((self.img_w,", "click_left(self, event): if not self.over_button: self.prev() def click_right(self, event): if", "255 c_list[index, :, (imagelist_h - int(w / 600)):, :] =", "\"Show masks\", command=self.toggle_mask) self.b_reset = Button(root, font=(\"Courier\", int(h / 50)),", "self.img_w_f / zoom / 2) + self.shift_x self.end_x = int(self.start_x", "full image width self.imgs_orig_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img", "for img in self.imgs_pred] self.i_left = self.canvas.create_image(w / 3.9, h", "which are shown on the left') parser.add_argument('-r', '--right', default='imgs/pred_outline_lin_spatial_clouds0_2/', help='directory", "= float(self.zoom) / 100 self.start_x = int(self.img_w_f / 2 -", "2, h * 0.06, font=(\"Courier\", int(h / 25)), text='Gap Filling", "+ self.img_w_f / zoom) if not self.mask_toggle: self.b_masks.config(relief=RAISED) img1 =", "ImageTk.PhotoImage(img2) self.canvas.itemconfig(self.i_left, image = self.imgs_orig_v[self.curr_img]) self.canvas.itemconfig(self.i_right, image = self.imgs_pred_v[self.curr_img]) self.canvas.itemconfig(self.i_list,", "3)))) self.i_list = self.canvas.create_image(w * 0.5, h * 0.56, image=self.imagelists[self.curr_img])", "/ np.array(imgs_m[index]).size) self.curr_img = 0 # text labels and logos", "* 0.50, h * 0.94, window=self.b_reset) self.canvas.create_window(w * 0.70, h", "self.refresh() def drag_roi(self, event): self.shift_x = min(max(self.start_drag[0] - event.x, 0", "self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS) img2 = img2.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w),", "120 or event.keysym == 'plus': self.zoom += 20 elif event.delta", "image=self.logo1) self.canvas.create_image(int(w - self.logo2.width() / 2 - b_logos), int(self.logo2.height() /", "0.0 else 'n.a. ' for error in report[:, 5]] logos", "/ 2, h * 0.13, font=(\"Courier\", int(h / 30)), text='')", "parser.add_argument('-r', '--right', default='imgs/pred_outline_lin_spatial_clouds0_2/', help='directory with images which are shown on", "'--height', type=int, default=720, help='window height') args = parser.parse_args() imgs_o =", "= [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img in self.imgs_orig] # images", "font=(\"Courier\", int(h / 50)), text = \"Reset view\", command=self.reset_transform, state=DISABLED)", "for img in sorted([os.path.join(args.left, img) for img in os.listdir(args.left)])] imgs_p", "report[:, 5]] logos = [media.logo1, media.logo2, media.logo3] if len(imgs_o) !=", "/ 600)):, :, :] = 255 c_list[index, :, :int(w /", "= self.canvas.create_text(w / 2, h * 0.13, font=(\"Courier\", int(h /", "self.zoom += 20 elif event.delta == 240: self.zoom += 40", "0.19, font=(\"Courier\", int(h / 35)), text='Observed') self.canvas.create_text(w - w /", "self.b_masks.bind(\"<Leave>\", self.button_leave) self.b_reset.bind(\"<Enter>\", self.button_enter) self.b_reset.bind(\"<Leave>\", self.button_leave) self.b_quit.bind(\"<Enter>\", self.button_enter) self.b_quit.bind(\"<Leave>\", self.button_leave)", "default='imgs/mask/', help='directory with mask images') parser.add_argument('-R', '--report', default='report_lin_spatial_clouds0_2.csv', help='report containing", "images in {} than days in the report {}!'.format(args.left, args.report))", "the report {}!'.format(args.masks, args.report)) root = Tk() root.title('Gap Filling Viewer')", "[] self.cc = [] for index, img in enumerate(imgs_p): self.imgs_orig.append(imgs_o[index].resize((self.img_w,", "c_list[index, :int(w / 600), :, :] = 255 c_list[index, (imagelist_h", "root.bind(\"<Button-4>\", self.zoomer) root.bind(\"<Button-5>\", self.zoomer) root.bind(\"<MouseWheel>\", self.zoomer) root.bind(\"<B2-Motion>\", self.drag_roi) root.bind(\"+\", self.zoomer)", "MAE {}'.format(self.dates[self.curr_img], self.cc[self.curr_img] * 100, self.errors[self.curr_img])) if self.zoom == 100:", "* 0.68) # width of each displayed image self.imgs_orig_m =", "np.count_nonzero(np.array(imgs_m[index])) / np.array(imgs_m[index]).size) self.curr_img = 0 # text labels and", "/ 2 - b_logos), int(h - (self.logo3.height() / 2 +", "if len(imgs_p) != len(dates): raise RuntimeError('Different number of images in", "in {} than days in the report {}!'.format(args.right, args.report)) if", "self.canvas.itemconfig(self.i_right, image = self.imgs_pred_v[self.curr_img]) self.canvas.itemconfig(self.i_list, image = self.imagelists[self.curr_img]) self.canvas.itemconfig(self.day_info, text='{}", "c_list[index, (imagelist_h - int(w / 600)):, :, :] = 255", "= self.canvas.create_image(w * 0.5, h * 0.56, image=self.imagelists[self.curr_img]) # images", "h_logos), Image.ANTIALIAS)) self.canvas.create_image(int(self.logo1.width() / 2 + b_logos), int(self.logo1.height() / 2", ">= 100: self.zoom -= 20 if self.zoom == 100: self.reset_transform()", "+ self.shift_x, event.y + self.shift_y) def click_left(self, event): if not", "= argparse.ArgumentParser(description=__doc__) parser.add_argument('-l', '--left', default='imgs/original/', help='directory with images which are", "root, w, h, imgs_p, imgs_o, imgs_m, dates, errors, logos): self.dates", ":] = np.array(self.imgs_pred[index].resize((imagelist_h, imagelist_h), Image.ANTIALIAS)) self.imagelists = [] for index", "h * 0.94, window=self.b_masks) self.canvas.create_window(w * 0.50, h * 0.94,", "for img in os.listdir(args.right)])] imgs_m = [Image.open(img) for img in", "+ self.shift_y) def click_left(self, event): if not self.over_button: self.prev() def", "report {}!'.format(args.masks, args.report)) root = Tk() root.title('Gap Filling Viewer') root.geometry(\"%dx%d+0+0\"", "self.refresh() def button_enter(self, event): self.over_button = True def button_leave(self, enter):", "self.button_leave) self.b_reset.bind(\"<Enter>\", self.button_enter) self.b_reset.bind(\"<Leave>\", self.button_leave) self.b_quit.bind(\"<Enter>\", self.button_enter) self.b_quit.bind(\"<Leave>\", self.button_leave) parser", "in sorted([os.path.join(args.right, img) for img in os.listdir(args.right)])] imgs_m = [Image.open(img)", "= img2.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS) self.imgs_orig_v[self.curr_img] = ImageTk.PhotoImage(img1)", "self.canvas = Canvas(root, width=w, height=h) self.canvas.pack() self.canvas.configure(background='white') self.logo1 = ImageTk.PhotoImage(logos[0].resize((int(h_logos", "Image.ANTIALIAS) img2 = img2.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS) self.imgs_orig_v[self.curr_img]", "imagelist_h, 3)))) self.i_list = self.canvas.create_image(w * 0.5, h * 0.56,", "spatio-temporal gap filling results\"\"\" import os import argparse from datetime", "text='') self.b_reset.config(state=DISABLED) else: self.canvas.itemconfig(self.zoom, text='ZOOM: {:3d}%'.format(self.zoom)) self.b_reset.config(state=NORMAL) def zoomer(self, event):", "+= 60 else: if self.zoom - 20 >= 100: self.zoom", "'--report', default='report_lin_spatial_clouds0_2.csv', help='report containing date and error information for the", "not self.over_button: self.next() def refresh(self): zoom = float(self.zoom) / 100", "full images self.imgs_pred = [] self.cc = [] for index,", "= [] self.cc = [] for index, img in enumerate(imgs_p):", "= False def __init__(self, root, w, h, imgs_p, imgs_o, imgs_m,", "/ 2 + b_logos), int(self.logo1.height() / 2 + b_logos), image=self.logo1)", "for index in range(len(self.imgs_pred)): c_list = np.array(imagelist_a) c_list[index, :int(w /", "= [] for index in range(len(self.imgs_pred)): c_list = np.array(imagelist_a) c_list[index,", "{} than days in the report {}!'.format(args.left, args.report)) if len(imgs_p)", "2 - self.img_w_f / self.zoom / 2)) self.refresh() def toggle_mask(self,", "self.imgs_pred_m.append(Image.blend(self.imgs_pred[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5)) self.cc.append(1 - np.count_nonzero(np.array(imgs_m[index])) / np.array(imgs_m[index]).size)", "not self.mask_toggle: self.b_masks.config(relief=RAISED) img1 = self.imgs_orig[self.curr_img] img2 = self.imgs_pred[self.curr_img] else:", "images') parser.add_argument('-y', '--year', type=int, default=2018, help='year of data acquisition') parser.add_argument('-W',", "self.prev() def click_right(self, event): if not self.over_button: self.next() def refresh(self):", "resample=0)) self.imgs_orig_m.append(Image.blend(self.imgs_orig[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5)) self.imgs_pred_m.append(Image.blend(self.imgs_pred[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0),", "interactively view spatio-temporal gap filling results\"\"\" import os import argparse", "self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS) self.imgs_orig_v[self.curr_img] = ImageTk.PhotoImage(img1) self.imgs_pred_v[self.curr_img] = ImageTk.PhotoImage(img2)", "self.refresh() def click_wheel(self, event): self.start_drag = (event.x + self.shift_x, event.y", "2 + b_logos), image=self.logo1) self.canvas.create_image(int(w - self.logo2.width() / 2 -", "image=self.logo3) self.canvas.create_text(w / 2, h * 0.06, font=(\"Courier\", int(h /", "360: self.zoom += 60 else: if self.zoom - 20 >=", ":, :] = 255 c_list[index, :, :int(w / 600), :]", "self.shift_x = 0 self.shift_y = 0 self.refresh() def button_enter(self, event):", "h * 0.56, image=self.imagelists[self.curr_img]) # images and buttons self.img_w_f =", "cloud cover {:06.2f}% - estimated MAE {}'.format(self.dates[self.curr_img], self.cc[self.curr_img] * 100,", "logos h_logos = int(h / 17) b_logos = int(w /", "root = Tk() root.title('Gap Filling Viewer') root.geometry(\"%dx%d+0+0\" % (args.width, args.height))", "view spatio-temporal gap filling results\"\"\" import os import argparse from", "self.canvas.create_text(w * 0.12, h * 0.94, font=(\"Courier\", int(h / 50)),", "args.report)) root = Tk() root.title('Gap Filling Viewer') root.geometry(\"%dx%d+0+0\" % (args.width,", "event.delta == 240: self.zoom += 40 elif event.delta == 360:", "self.canvas.create_window(w * 0.50, h * 0.94, window=self.b_reset) self.canvas.create_window(w * 0.70,", "import numpy as np from PIL import Image, ImageTk import", "dates self.errors = errors # setup images self.img_w = int(h", "2 + b_logos), image=self.logo2) self.canvas.create_image(int(w - self.logo3.width() / 2 -", "number of images in {} than days in the report", ":] = 255 c_list[index, :, :int(w / 600), :] =", "100) self.canvas = Canvas(root, width=w, height=h) self.canvas.pack() self.canvas.configure(background='white') self.logo1 =", "5]] logos = [media.logo1, media.logo2, media.logo3] if len(imgs_o) != len(dates):", "len(self.imgs_orig) self.refresh() def prev(self, event=None): self.curr_img = (self.curr_img - 1)", "b_logos)), image=self.logo3) self.canvas.create_text(w / 2, h * 0.06, font=(\"Courier\", int(h", "- 20 >= 100: self.zoom -= 20 if self.zoom ==", "50)), text = \"Reset view\", command=self.reset_transform, state=DISABLED) self.b_quit = Button(root,", "- np.count_nonzero(np.array(imgs_m[index])) / np.array(imgs_m[index]).size) self.curr_img = 0 # text labels", "root.bind(\"<MouseWheel>\", self.zoomer) root.bind(\"<B2-Motion>\", self.drag_roi) root.bind(\"+\", self.zoomer) root.bind(\"-\", self.zoomer) self.over_button =", "self.imgs_pred = [] self.cc = [] for index, img in", "600), :] = 255 c_list[index, :, (imagelist_h - int(w /", "int(h / 17) b_logos = int(w / 100) self.canvas =", "hand images') parser.add_argument('-y', '--year', type=int, default=2018, help='year of data acquisition')", "image = self.imgs_pred_v[self.curr_img]) self.canvas.itemconfig(self.i_list, image = self.imagelists[self.curr_img]) self.canvas.itemconfig(self.day_info, text='{} -", "/ 25)), text='Gap Filling Viewer') self.canvas.create_text(w / 3.9, h *", "100 self.start_x = int(self.img_w_f / 2 - self.img_w_f / zoom", "* 0.06, font=(\"Courier\", int(h / 25)), text='Gap Filling Viewer') self.canvas.create_text(w", "== 'plus': self.zoom += 20 elif event.delta == 240: self.zoom", "self.start_drag = (event.x + self.shift_x, event.y + self.shift_y) def click_left(self,", "/ 50)), text = \"Reset view\", command=self.reset_transform, state=DISABLED) self.b_quit =", "and error information for the right hand images') parser.add_argument('-y', '--year',", "sorted([os.path.join(args.left, img) for img in os.listdir(args.left)])] imgs_p = [Image.open(img) for", "in {} than days in the report {}!'.format(args.left, args.report)) if", "which allows to interactively view spatio-temporal gap filling results\"\"\" import", "0.56, image=self.imgs_orig_v[self.curr_img]) self.i_right = self.canvas.create_image(w - w / 3.9, h", "event): self.over_button = True def button_leave(self, enter): self.over_button = False", "-= 20 if self.zoom == 100: self.reset_transform() self.refresh() def drag_roi(self,", "button_enter(self, event): self.over_button = True def button_leave(self, enter): self.over_button =", "3.9, h * 0.19, font=(\"Courier\", int(h / 35)), text='Predicted') self.day_info", "self.shift_x self.end_x = int(self.start_x + self.img_w_f / zoom) self.start_y =", "= self.imagelists[self.curr_img]) self.canvas.itemconfig(self.day_info, text='{} - cloud cover {:06.2f}% - estimated", "np.array(self.imgs_pred[index].resize((imagelist_h, imagelist_h), Image.ANTIALIAS)) self.imagelists = [] for index in range(len(self.imgs_pred)):", "root.bind(\"<Left>\", self.prev) root.bind(\"<Down>\", self.next) root.bind(\"<Up>\", self.prev) root.bind(\"<Button-3>\", self.click_right) root.bind(\"<Button-1>\", self.click_left)", "50)), text = \"Show masks\", command=self.toggle_mask) self.b_reset = Button(root, font=(\"Courier\",", "in sorted([os.path.join(args.masks, img) for img in os.listdir(args.masks)])] report = np.genfromtxt(args.report,", "np.genfromtxt(args.report, delimiter=',', dtype=float)[1:-1] dates = [(datetime(args.year, 1, 1) + timedelta(int(report[day,", "= self.canvas.create_image(w / 3.9, h * 0.56, image=self.imgs_orig_v[self.curr_img]) self.i_right =", "than days in the report {}!'.format(args.right, args.report)) if len(imgs_m) !=", "self.imgs_orig_m = [] # masked full images self.imgs_pred_m = []", "as media class MainWindow(): def next(self, event=None): self.curr_img = (self.curr_img", "w / 3.9, h * 0.56, image=self.imgs_pred_v[self.curr_img]) self.b_masks = Button(root,", "1) % len(self.imgs_orig) self.refresh() def click_wheel(self, event): self.start_drag = (event.x", "if len(imgs_o) != len(dates): raise RuntimeError('Different number of images in", "= \"Show masks\", command=self.toggle_mask) self.b_reset = Button(root, font=(\"Courier\", int(h /", "self.img_w_f / zoom / 2) + self.shift_y self.end_y = int(self.start_y", "== 240: self.zoom += 40 elif event.delta == 360: self.zoom", "= \"Quit\", command=self.canvas.master.destroy) self.reset_transform() self.canvas.create_window(w * 0.30, h * 0.94,", "bind buttons and keys root.bind(\"q\", lambda e: self.canvas.master.destroy()) root.bind(\"r\", self.reset_transform)", "int(h / 50)), text = \"Reset view\", command=self.reset_transform, state=DISABLED) self.b_quit", "int(self.start_x + self.img_w_f / zoom) self.start_y = int(self.img_w_f / 2", "0.5, h * 0.56, image=self.imagelists[self.curr_img]) # images and buttons self.img_w_f", "+ 1) % len(self.imgs_orig) self.refresh() def prev(self, event=None): self.curr_img =", "/ 30)), text='') self.zoom = self.canvas.create_text(w * 0.12, h *", "[] # masked full images self.imgs_pred_m = [] self.imgs_orig =", "images which are shown on the left') parser.add_argument('-r', '--right', default='imgs/pred_outline_lin_spatial_clouds0_2/',", "imgs_o, imgs_m, dates, errors, logos): self.dates = dates self.errors =", "int(h / 30)), text='') self.zoom = self.canvas.create_text(w * 0.12, h", "= ImageTk.PhotoImage(logos[2].resize((int(h_logos / logos[2].size[1] * logos[2].size[0]), h_logos), Image.ANTIALIAS)) self.canvas.create_image(int(self.logo1.width() /", "dates = [(datetime(args.year, 1, 1) + timedelta(int(report[day, 1]) - 1)).strftime('%b", "self.canvas.create_text(w / 3.9, h * 0.19, font=(\"Courier\", int(h / 35)),", "int(h / 25)), text='Gap Filling Viewer') self.canvas.create_text(w / 3.9, h", ":, :, :] = np.array(self.imgs_pred[index].resize((imagelist_h, imagelist_h), Image.ANTIALIAS)) self.imagelists = []", "/ 50)), text='') # image timeline imagelist_h = int(self.img_w /", "self.img_w), resample=0), alpha=.5)) self.imgs_pred_m.append(Image.blend(self.imgs_pred[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5)) self.cc.append(1 -", "height') args = parser.parse_args() imgs_o = [Image.open(img) for img in", "class MainWindow(): def next(self, event=None): self.curr_img = (self.curr_img + 1)", "in range(report.shape[0])] errors = ['{:4.1f}'.format(error) if error != 0.0 else", "self.mask_toggle = False self.zoom = 100 self.shift_x = 0 self.shift_y", "days in the report {}!'.format(args.left, args.report)) if len(imgs_p) != len(dates):", "argparse from datetime import datetime, timedelta from tkinter import Canvas,", "import datetime, timedelta from tkinter import Canvas, Tk, Button, RAISED,", "= int(w / 100) self.canvas = Canvas(root, width=w, height=h) self.canvas.pack()", "0.56, image=self.imgs_pred_v[self.curr_img]) self.b_masks = Button(root, font=(\"Courier\", int(h / 50)), text", "[ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img in self.imgs_pred] self.i_left = self.canvas.create_image(w", "imagelist_h), Image.ANTIALIAS)) self.imagelists = [] for index in range(len(self.imgs_pred)): c_list", ":] = 255 c_list[index, :, (imagelist_h - int(w / 600)):,", "self.cc = [] for index, img in enumerate(imgs_p): self.imgs_orig.append(imgs_o[index].resize((self.img_w, self.img_w),", "600), :, :] = 255 c_list[index, (imagelist_h - int(w /", "the right') parser.add_argument('-m', '--masks', default='imgs/mask/', help='directory with mask images') parser.add_argument('-R',", "self.canvas.create_image(int(self.logo1.width() / 2 + b_logos), int(self.logo1.height() / 2 + b_logos),", "if len(imgs_m) != len(dates): raise RuntimeError('Different number of images in", "if self.zoom - 20 >= 100: self.zoom -= 20 if", "def reset_transform(self, event=None): self.mask_toggle = False self.zoom = 100 self.shift_x", "self.day_info = self.canvas.create_text(w / 2, h * 0.13, font=(\"Courier\", int(h", "labels and logos h_logos = int(h / 17) b_logos =", "= Button(root, font=(\"Courier\", int(h / 50)), text = \"Reset view\",", "/ 2) + self.shift_x self.end_x = int(self.start_x + self.img_w_f /", "3.9, h * 0.56, image=self.imgs_orig_v[self.curr_img]) self.i_right = self.canvas.create_image(w - w", "import argparse from datetime import datetime, timedelta from tkinter import", "the left') parser.add_argument('-r', '--right', default='imgs/pred_outline_lin_spatial_clouds0_2/', help='directory with images which are", "for error in report[:, 5]] logos = [media.logo1, media.logo2, media.logo3]", "sorted([os.path.join(args.right, img) for img in os.listdir(args.right)])] imgs_m = [Image.open(img) for", "/ 2 - b_logos), int(self.logo2.height() / 2 + b_logos), image=self.logo2)", "default=720, help='window height') args = parser.parse_args() imgs_o = [Image.open(img) for", "2) + self.shift_x self.end_x = int(self.start_x + self.img_w_f / zoom)", "displayed image self.imgs_orig_m = [] # masked full images self.imgs_pred_m", "ImageTk import probgf.media as media class MainWindow(): def next(self, event=None):", "Tk() root.title('Gap Filling Viewer') root.geometry(\"%dx%d+0+0\" % (args.width, args.height)) MainWindow(root, args.width,", "/ zoom / 2) + self.shift_x self.end_x = int(self.start_x +", "refresh(self): zoom = float(self.zoom) / 100 self.start_x = int(self.img_w_f /", "0.94, window=self.b_reset) self.canvas.create_window(w * 0.70, h * 0.94, window=self.b_quit) #", "def toggle_mask(self, event=None): self.mask_toggle = not self.mask_toggle self.refresh() def reset_transform(self,", "font=(\"Courier\", int(h / 35)), text='Predicted') self.day_info = self.canvas.create_text(w / 2,", "int(h - (self.logo3.height() / 2 + b_logos)), image=self.logo3) self.canvas.create_text(w /", "of data acquisition') parser.add_argument('-W', '--width', type=int, default=1280, help='window width') parser.add_argument('-H',", "self.zoom == 100: self.canvas.itemconfig(self.zoom, text='') self.b_reset.config(state=DISABLED) else: self.canvas.itemconfig(self.zoom, text='ZOOM: {:3d}%'.format(self.zoom))", "self.zoom - 20 >= 100: self.zoom -= 20 if self.zoom", "100 self.shift_x = 0 self.shift_y = 0 self.refresh() def button_enter(self,", "= min(max(self.start_drag[1] - event.y, 0 - int(self.img_w_f / 2 -", "# images and buttons self.img_w_f = self.imgs_orig[0].size[0] # full image", "text='Observed') self.canvas.create_text(w - w / 3.9, h * 0.19, font=(\"Courier\",", "% (args.width, args.height)) MainWindow(root, args.width, args.height, imgs_p, imgs_o, imgs_m, dates,", "args = parser.parse_args() imgs_o = [Image.open(img) for img in sorted([os.path.join(args.left,", "0.68) # width of each displayed image self.imgs_orig_m = []", "zoom) self.start_y = int(self.img_w_f / 2 - self.img_w_f / zoom", "Button(root, font=(\"Courier\", int(h / 50)), text = \"Show masks\", command=self.toggle_mask)", "b_logos), image=self.logo1) self.canvas.create_image(int(w - self.logo2.width() / 2 - b_logos), int(self.logo2.height()", "self.zoom == 100: self.reset_transform() self.refresh() def drag_roi(self, event): self.shift_x =", "self.start_x = int(self.img_w_f / 2 - self.img_w_f / zoom /", "- self.logo2.width() / 2 - b_logos), int(self.logo2.height() / 2 +", "= img1.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS) img2 = img2.crop((self.start_x,", "index in range(len(self.imgs_pred)): c_list = np.array(imagelist_a) c_list[index, :int(w / 600),", "self.b_reset = Button(root, font=(\"Courier\", int(h / 50)), text = \"Reset", "self.logo1 = ImageTk.PhotoImage(logos[0].resize((int(h_logos / logos[0].size[1] * logos[0].size[0]), h_logos), Image.ANTIALIAS)) self.logo2", "== 100: self.reset_transform() self.refresh() def drag_roi(self, event): self.shift_x = min(max(self.start_drag[0]", "% len(self.imgs_orig) self.refresh() def click_wheel(self, event): self.start_drag = (event.x +", "type=int, default=2018, help='year of data acquisition') parser.add_argument('-W', '--width', type=int, default=1280,", "help='directory with images which are shown on the right') parser.add_argument('-m',", "help='year of data acquisition') parser.add_argument('-W', '--width', type=int, default=1280, help='window width')", "acquisition') parser.add_argument('-W', '--width', type=int, default=1280, help='window width') parser.add_argument('-H', '--height', type=int,", "# setup images self.img_w = int(h * 0.68) # width", "len(imgs_o) != len(dates): raise RuntimeError('Different number of images in {}", "keys root.bind(\"q\", lambda e: self.canvas.master.destroy()) root.bind(\"r\", self.reset_transform) root.bind(\"m\", self.toggle_mask) root.bind(\"<Right>\",", "self.over_button = True def button_leave(self, enter): self.over_button = False def" ]
[ "'cvv2': '037', 'acct': '4797503429879309', 'creditcardtype': 'visa', 'ipaddress': '10.0.1.199',} data.update(self.item) self.assertTrue(self.wpp.doDirectPayment(data))", "# We'll have to stub out tests for doExpressCheckoutPayment and", "data) def test_doDirectPayment_valid(self): data = { 'firstname': 'Brave', 'lastname': 'Star',", "class RequestFactory(Client): # Used to generate request objects. def request(self,", "\"\"\"Dummy class for testing PayPalWPP.\"\"\" # responses = { #", "'<PASSWORD>', # 'transactionid': '3TG42202A7335864V', # 'transactiontype': 'expresscheckout', # 'version': '54.0'}", "# 'returnurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'token': u'<PASSWORD>'} # # PayPal Response:", "<gh_stars>1-10 #!/usr/bin/python # -*- coding: utf-8 -*- from django.conf import", "'9.95', 'inv': 'inv', 'custom': 'custom', 'next': 'http://www.example.com/next/', 'returnurl': 'http://www.example.com/pay/', 'cancelurl':", "def test_doDirectPayment_invalid(self): data = { 'firstname': 'Epic', 'lastname': 'Fail', 'street':", "{'ack': 'Success', # 'amt': '10.00', # 'build': '848077', # 'correlationid':", "'Epic', 'lastname': 'Fail', 'street': '100 Georgia St', 'city': 'Vancouver', 'state':", "'instant', # 'pendingreason': 'None', # 'reasoncode': 'None', # 'taxamt': '0.00',", "they're behind paypal's doors. nvp_obj = self.wpp.setExpressCheckout(self.item) self.assertTrue(nvp_obj.ack == \"Success\")", "Used to generate request objects. def request(self, **request): environ =", "'cvv2': '999', 'acct': '1234567890', 'creditcardtype': 'visa', 'ipaddress': '10.0.1.199',} data.update(self.item) self.assertFalse(self.wpp.doDirectPayment(data))", "# 'amt': '10.00', # 'build': '848077', # 'correlationid': '375f4773c3d34', #", "} environ.update(self.defaults) environ.update(request) return WSGIRequest(environ) RF = RequestFactory() REQUEST =", "RF.get(\"/pay/\", REMOTE_ADDR=\"127.0.0.1:8000\") class DummyPayPalWPP(PayPalWPP): pass # \"\"\"Dummy class for testing", "'timestamp': '2009-03-04T20:56:09Z', # 'token': '<PASSWORD>', # 'transactionid': '3TG42202A7335864V', # 'transactiontype':", "self.item = { 'amt': '9.95', 'inv': 'inv', 'custom': 'custom', 'next':", "# 'taxamt': '0.00', # 'timestamp': '2009-03-04T20:56:09Z', # 'token': '<PASSWORD>', #", "from django.core.handlers.wsgi import WSGIRequest from django.forms import ValidationError from django.http", "def testCreditCardField(self): field = CreditCardField() field.clean('4797503429879309') self.assertEquals(field.card_type, \"Visa\") self.assertRaises(ValidationError, CreditCardField().clean,", "# 'token': u'<PASSWORD>'} # # PayPal Response: # {'ack': 'Success',", "# 'paymentstatus': 'Completed', # 'paymenttype': 'instant', # 'pendingreason': 'None', #", "'city': 'Vancouver', 'state': 'BC', 'countrycode': 'CA', 'zip': 'V6V 1V1', 'expdate':", "'ipaddress': '10.0.1.199',} data.update(self.item) self.assertFalse(self.wpp.doDirectPayment(data)) def test_setExpressCheckout(self): # We'll have to", "paypal.pro.fields import CreditCardField from paypal.pro.helpers import PayPalWPP, PayPalError class RequestFactory(Client):", "generate request objects. def request(self, **request): environ = { 'HTTP_COOKIE':", "from django.test.client import Client from paypal.pro.fields import CreditCardField from paypal.pro.helpers", "u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'custom': u'website_id=480&cname=1', # 'inv': u'website-480-cname', # 'method': 'DoExpressCheckoutPayment',", "'None', # 'reasoncode': 'None', # 'taxamt': '0.00', # 'timestamp': '2009-03-04T20:56:09Z',", "DummyPayPalWPP(PayPalWPP): pass # \"\"\"Dummy class for testing PayPalWPP.\"\"\" # responses", "real requests at PayPal. self.old_debug = settings.DEBUG settings.DEBUG = True", "return WSGIRequest(environ) RF = RequestFactory() REQUEST = RF.get(\"/pay/\", REMOTE_ADDR=\"127.0.0.1:8000\") class", "# because they're behind paypal's doors. nvp_obj = self.wpp.setExpressCheckout(self.item) self.assertTrue(nvp_obj.ack", "{ 'firstname': 'Epic', 'lastname': 'Fail', 'street': '100 Georgia St', 'city':", "responses = { # # @@@ Need some reals data", "'lastname': 'Fail', 'street': '100 Georgia St', 'city': 'Vancouver', 'state': 'BC',", "coding: utf-8 -*- from django.conf import settings from django.core.handlers.wsgi import", "'street': '100 Georgia St', 'city': 'Vancouver', 'state': 'BC', 'countrycode': 'CA',", "self.assertRaises(ValidationError, CreditCardField().clean, '1234567890123455') class PayPalWPPTest(TestCase): def setUp(self): # Avoding blasting", "self.old_debug def test_doDirectPayment_missing_params(self): data = {'firstname': 'Chewbacca'} self.assertRaises(PayPalError, self.wpp.doDirectPayment, data)", "'SERVER_PORT': 80, 'SERVER_PROTOCOL': 'HTTP/1.1', } environ.update(self.defaults) environ.update(request) return WSGIRequest(environ) RF", "to stub out tests for doExpressCheckoutPayment and friends # because", "# 'paymenttype': 'instant', # 'pendingreason': 'None', # 'reasoncode': 'None', #", "test_doDirectPayment_invalid(self): data = { 'firstname': 'Epic', 'lastname': 'Fail', 'street': '100", "#!/usr/bin/python # -*- coding: utf-8 -*- from django.conf import settings", "'ipaddress': '10.0.1.199',} data.update(self.item) self.assertTrue(self.wpp.doDirectPayment(data)) def test_doDirectPayment_invalid(self): data = { 'firstname':", "doExpressCheckoutPayment and friends # because they're behind paypal's doors. nvp_obj", "'PATH_INFO': '/', 'QUERY_STRING': '', 'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'SERVER_NAME': 'testserver',", "# @@@ Need some reals data here. # \"DoDirectPayment\": \"\"\"ack=Success&timestamp=2009-03-12T23%3A52%3A33Z&l_severitycode0=Error&l_shortmessage0=Security+error&l_longmessage0=Security+header+is+not+valid&version=54.0&build=854529&l_errorcode0=&correlationid=\"\"\",", "} # # def _request(self, data): # return self.responses[\"DoDirectPayment\"] class", "'012019', 'cvv2': '999', 'acct': '1234567890', 'creditcardtype': 'visa', 'ipaddress': '10.0.1.199',} data.update(self.item)", "test_setExpressCheckout(self): # We'll have to stub out tests for doExpressCheckoutPayment", "RequestFactory() REQUEST = RF.get(\"/pay/\", REMOTE_ADDR=\"127.0.0.1:8000\") class DummyPayPalWPP(PayPalWPP): pass # \"\"\"Dummy", "\"Success\") ### DoExpressCheckoutPayment # PayPal Request: # {'amt': '10.00', #", "from paypal.pro.helpers import PayPalWPP, PayPalError class RequestFactory(Client): # Used to", "here. # \"DoDirectPayment\": \"\"\"ack=Success&timestamp=2009-03-12T23%3A52%3A33Z&l_severitycode0=Error&l_shortmessage0=Security+error&l_longmessage0=Security+header+is+not+valid&version=54.0&build=854529&l_errorcode0=&correlationid=\"\"\", # } # # def _request(self,", "1V1', 'expdate': '012019', 'cvv2': '999', 'acct': '1234567890', 'creditcardtype': 'visa', 'ipaddress':", "= DummyPayPalWPP(REQUEST) def tearDown(self): settings.DEBUG = self.old_debug def test_doDirectPayment_missing_params(self): data", "St', 'city': 'Vancouver', 'state': 'BC', 'countrycode': 'CA', 'zip': 'V6V 1V1',", "'10.0.1.199',} data.update(self.item) self.assertTrue(self.wpp.doDirectPayment(data)) def test_doDirectPayment_invalid(self): data = { 'firstname': 'Epic',", "# -*- coding: utf-8 -*- from django.conf import settings from", "DoExpressCheckoutPayment # PayPal Request: # {'amt': '10.00', # 'cancelurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',", "from django.conf import settings from django.core.handlers.wsgi import WSGIRequest from django.forms", "stub out tests for doExpressCheckoutPayment and friends # because they're", "data = { 'firstname': 'Brave', 'lastname': 'Star', 'street': '1 Main", "have to stub out tests for doExpressCheckoutPayment and friends #", "'Star', 'street': '1 Main St', 'city': u'San Jos\\xe9', 'state': 'CA',", "# 'pendingreason': 'None', # 'reasoncode': 'None', # 'taxamt': '0.00', #", "# 'reasoncode': 'None', # 'taxamt': '0.00', # 'timestamp': '2009-03-04T20:56:09Z', #", "'amt': '9.95', 'inv': 'inv', 'custom': 'custom', 'next': 'http://www.example.com/next/', 'returnurl': 'http://www.example.com/pay/',", "'HTTP/1.1', } environ.update(self.defaults) environ.update(request) return WSGIRequest(environ) RF = RequestFactory() REQUEST", "'expdate': '012019', 'cvv2': '999', 'acct': '1234567890', 'creditcardtype': 'visa', 'ipaddress': '10.0.1.199',}", "'GET', 'SCRIPT_NAME': '', 'SERVER_NAME': 'testserver', 'SERVER_PORT': 80, 'SERVER_PROTOCOL': 'HTTP/1.1', }", "'cancelurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'custom': u'website_id=480&cname=1', # 'inv': u'website-480-cname', # 'method':", "'visa', 'ipaddress': '10.0.1.199',} data.update(self.item) self.assertFalse(self.wpp.doDirectPayment(data)) def test_setExpressCheckout(self): # We'll have", "def setUp(self): # Avoding blasting real requests at PayPal. self.old_debug", "\"DoDirectPayment\": \"\"\"ack=Success&timestamp=2009-03-12T23%3A52%3A33Z&l_severitycode0=Error&l_shortmessage0=Security+error&l_longmessage0=Security+header+is+not+valid&version=54.0&build=854529&l_errorcode0=&correlationid=\"\"\", # } # # def _request(self, data): #", "# 'inv': u'website-480-cname', # 'method': 'DoExpressCheckoutPayment', # 'next': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', #", "settings.DEBUG settings.DEBUG = True self.item = { 'amt': '9.95', 'inv':", "= { 'firstname': 'Brave', 'lastname': 'Star', 'street': '1 Main St',", "Georgia St', 'city': 'Vancouver', 'state': 'BC', 'countrycode': 'CA', 'zip': 'V6V", "'ordertime': '2009-03-04T20:56:08Z', # 'paymentstatus': 'Completed', # 'paymenttype': 'instant', # 'pendingreason':", "from paypal.pro.fields import CreditCardField from paypal.pro.helpers import PayPalWPP, PayPalError class", "'testserver', 'SERVER_PORT': 80, 'SERVER_PROTOCOL': 'HTTP/1.1', } environ.update(self.defaults) environ.update(request) return WSGIRequest(environ)", "'Chewbacca'} self.assertRaises(PayPalError, self.wpp.doDirectPayment, data) def test_doDirectPayment_valid(self): data = { 'firstname':", "'next': 'http://www.example.com/next/', 'returnurl': 'http://www.example.com/pay/', 'cancelurl': 'http://www.example.com/cancel/' } self.wpp = DummyPayPalWPP(REQUEST)", "self.assertFalse(self.wpp.doDirectPayment(data)) def test_setExpressCheckout(self): # We'll have to stub out tests", "'inv': u'website-480-cname', # 'method': 'DoExpressCheckoutPayment', # 'next': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'payerid':", "class DummyPayPalWPP(PayPalWPP): pass # \"\"\"Dummy class for testing PayPalWPP.\"\"\" #", "class PayPalWPPTest(TestCase): def setUp(self): # Avoding blasting real requests at", "'2009-03-04T20:56:08Z', # 'paymentstatus': 'Completed', # 'paymenttype': 'instant', # 'pendingreason': 'None',", "'zip': '95131', 'expdate': '012019', 'cvv2': '037', 'acct': '4797503429879309', 'creditcardtype': 'visa',", "u'website-480-cname', # 'method': 'DoExpressCheckoutPayment', # 'next': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'payerid': u'BN5JZ2V7MLEV4',", "out tests for doExpressCheckoutPayment and friends # because they're behind", "'firstname': 'Epic', 'lastname': 'Fail', 'street': '100 Georgia St', 'city': 'Vancouver',", "data): # return self.responses[\"DoDirectPayment\"] class CreditCardFieldTest(TestCase): def testCreditCardField(self): field =", "'BC', 'countrycode': 'CA', 'zip': 'V6V 1V1', 'expdate': '012019', 'cvv2': '999',", "django.core.handlers.wsgi import WSGIRequest from django.forms import ValidationError from django.http import", "'zip': 'V6V 1V1', 'expdate': '012019', 'cvv2': '999', 'acct': '1234567890', 'creditcardtype':", "'1 Main St', 'city': u'San Jos\\xe9', 'state': 'CA', 'countrycode': 'US',", "= { 'HTTP_COOKIE': self.cookies, 'PATH_INFO': '/', 'QUERY_STRING': '', 'REQUEST_METHOD': 'GET',", "u'<PASSWORD>'} # # PayPal Response: # {'ack': 'Success', # 'amt':", "'amt': '10.00', # 'build': '848077', # 'correlationid': '375f4773c3d34', # 'currencycode':", "# def _request(self, data): # return self.responses[\"DoDirectPayment\"] class CreditCardFieldTest(TestCase): def", "blasting real requests at PayPal. self.old_debug = settings.DEBUG settings.DEBUG =", "test_doDirectPayment_valid(self): data = { 'firstname': 'Brave', 'lastname': 'Star', 'street': '1", "PayPalError class RequestFactory(Client): # Used to generate request objects. def", "'pendingreason': 'None', # 'reasoncode': 'None', # 'taxamt': '0.00', # 'timestamp':", "'USD', # 'feeamt': '0.59', # 'ordertime': '2009-03-04T20:56:08Z', # 'paymentstatus': 'Completed',", "data.update(self.item) self.assertFalse(self.wpp.doDirectPayment(data)) def test_setExpressCheckout(self): # We'll have to stub out", "'SERVER_PROTOCOL': 'HTTP/1.1', } environ.update(self.defaults) environ.update(request) return WSGIRequest(environ) RF = RequestFactory()", "Need some reals data here. # \"DoDirectPayment\": \"\"\"ack=Success&timestamp=2009-03-12T23%3A52%3A33Z&l_severitycode0=Error&l_shortmessage0=Security+error&l_longmessage0=Security+header+is+not+valid&version=54.0&build=854529&l_errorcode0=&correlationid=\"\"\", # }", "u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'payerid': u'BN5JZ2V7MLEV4', # 'paymentaction': 'Sale', # 'returnurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',", "'acct': '1234567890', 'creditcardtype': 'visa', 'ipaddress': '10.0.1.199',} data.update(self.item) self.assertFalse(self.wpp.doDirectPayment(data)) def test_setExpressCheckout(self):", "'None', # 'taxamt': '0.00', # 'timestamp': '2009-03-04T20:56:09Z', # 'token': '<PASSWORD>',", "REQUEST = RF.get(\"/pay/\", REMOTE_ADDR=\"127.0.0.1:8000\") class DummyPayPalWPP(PayPalWPP): pass # \"\"\"Dummy class", "reals data here. # \"DoDirectPayment\": \"\"\"ack=Success&timestamp=2009-03-12T23%3A52%3A33Z&l_severitycode0=Error&l_shortmessage0=Security+error&l_longmessage0=Security+header+is+not+valid&version=54.0&build=854529&l_errorcode0=&correlationid=\"\"\", # } # #", "'', 'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'SERVER_NAME': 'testserver', 'SERVER_PORT': 80, 'SERVER_PROTOCOL':", "**request): environ = { 'HTTP_COOKIE': self.cookies, 'PATH_INFO': '/', 'QUERY_STRING': '',", "'custom': u'website_id=480&cname=1', # 'inv': u'website-480-cname', # 'method': 'DoExpressCheckoutPayment', # 'next':", "'848077', # 'correlationid': '375f4773c3d34', # 'currencycode': 'USD', # 'feeamt': '0.59',", "'CA', 'countrycode': 'US', 'zip': '95131', 'expdate': '012019', 'cvv2': '037', 'acct':", "pass # \"\"\"Dummy class for testing PayPalWPP.\"\"\" # responses =", "# 'custom': u'website_id=480&cname=1', # 'inv': u'website-480-cname', # 'method': 'DoExpressCheckoutPayment', #", "'correlationid': '375f4773c3d34', # 'currencycode': 'USD', # 'feeamt': '0.59', # 'ordertime':", "PayPal. self.old_debug = settings.DEBUG settings.DEBUG = True self.item = {", "'method': 'DoExpressCheckoutPayment', # 'next': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'payerid': u'BN5JZ2V7MLEV4', # 'paymentaction':", "def _request(self, data): # return self.responses[\"DoDirectPayment\"] class CreditCardFieldTest(TestCase): def testCreditCardField(self):", "\"Visa\") self.assertRaises(ValidationError, CreditCardField().clean, '1234567890123455') class PayPalWPPTest(TestCase): def setUp(self): # Avoding", "80, 'SERVER_PROTOCOL': 'HTTP/1.1', } environ.update(self.defaults) environ.update(request) return WSGIRequest(environ) RF =", "DummyPayPalWPP(REQUEST) def tearDown(self): settings.DEBUG = self.old_debug def test_doDirectPayment_missing_params(self): data =", "== \"Success\") ### DoExpressCheckoutPayment # PayPal Request: # {'amt': '10.00',", "'payerid': u'BN5JZ2V7MLEV4', # 'paymentaction': 'Sale', # 'returnurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'token':", "# 'method': 'DoExpressCheckoutPayment', # 'next': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'payerid': u'BN5JZ2V7MLEV4', #", "= { # # @@@ Need some reals data here.", "'http://www.example.com/pay/', 'cancelurl': 'http://www.example.com/cancel/' } self.wpp = DummyPayPalWPP(REQUEST) def tearDown(self): settings.DEBUG", "Request: # {'amt': '10.00', # 'cancelurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'custom': u'website_id=480&cname=1',", "= True self.item = { 'amt': '9.95', 'inv': 'inv', 'custom':", "PayPalWPP, PayPalError class RequestFactory(Client): # Used to generate request objects.", "Avoding blasting real requests at PayPal. self.old_debug = settings.DEBUG settings.DEBUG", "'cancelurl': 'http://www.example.com/cancel/' } self.wpp = DummyPayPalWPP(REQUEST) def tearDown(self): settings.DEBUG =", "settings.DEBUG = self.old_debug def test_doDirectPayment_missing_params(self): data = {'firstname': 'Chewbacca'} self.assertRaises(PayPalError,", "# 'currencycode': 'USD', # 'feeamt': '0.59', # 'ordertime': '2009-03-04T20:56:08Z', #", "'http://www.example.com/next/', 'returnurl': 'http://www.example.com/pay/', 'cancelurl': 'http://www.example.com/cancel/' } self.wpp = DummyPayPalWPP(REQUEST) def", "import settings from django.core.handlers.wsgi import WSGIRequest from django.forms import ValidationError", "= RequestFactory() REQUEST = RF.get(\"/pay/\", REMOTE_ADDR=\"127.0.0.1:8000\") class DummyPayPalWPP(PayPalWPP): pass #", "CreditCardField from paypal.pro.helpers import PayPalWPP, PayPalError class RequestFactory(Client): # Used", "data here. # \"DoDirectPayment\": \"\"\"ack=Success&timestamp=2009-03-12T23%3A52%3A33Z&l_severitycode0=Error&l_shortmessage0=Security+error&l_longmessage0=Security+header+is+not+valid&version=54.0&build=854529&l_errorcode0=&correlationid=\"\"\", # } # # def", "django.test.client import Client from paypal.pro.fields import CreditCardField from paypal.pro.helpers import", "'Completed', # 'paymenttype': 'instant', # 'pendingreason': 'None', # 'reasoncode': 'None',", "testing PayPalWPP.\"\"\" # responses = { # # @@@ Need", "'2009-03-04T20:56:09Z', # 'token': '<PASSWORD>', # 'transactionid': '3TG42202A7335864V', # 'transactiontype': 'expresscheckout',", "tearDown(self): settings.DEBUG = self.old_debug def test_doDirectPayment_missing_params(self): data = {'firstname': 'Chewbacca'}", "'expdate': '012019', 'cvv2': '037', 'acct': '4797503429879309', 'creditcardtype': 'visa', 'ipaddress': '10.0.1.199',}", "'next': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'payerid': u'BN5JZ2V7MLEV4', # 'paymentaction': 'Sale', # 'returnurl':", "{ # # @@@ Need some reals data here. #", "# Avoding blasting real requests at PayPal. self.old_debug = settings.DEBUG", "= settings.DEBUG settings.DEBUG = True self.item = { 'amt': '9.95',", "# # @@@ Need some reals data here. # \"DoDirectPayment\":", "= { 'amt': '9.95', 'inv': 'inv', 'custom': 'custom', 'next': 'http://www.example.com/next/',", "St', 'city': u'San Jos\\xe9', 'state': 'CA', 'countrycode': 'US', 'zip': '95131',", "# 'feeamt': '0.59', # 'ordertime': '2009-03-04T20:56:08Z', # 'paymentstatus': 'Completed', #", "django.test import TestCase from django.test.client import Client from paypal.pro.fields import", "testCreditCardField(self): field = CreditCardField() field.clean('4797503429879309') self.assertEquals(field.card_type, \"Visa\") self.assertRaises(ValidationError, CreditCardField().clean, '1234567890123455')", "paypal's doors. nvp_obj = self.wpp.setExpressCheckout(self.item) self.assertTrue(nvp_obj.ack == \"Success\") ### DoExpressCheckoutPayment", "'creditcardtype': 'visa', 'ipaddress': '10.0.1.199',} data.update(self.item) self.assertTrue(self.wpp.doDirectPayment(data)) def test_doDirectPayment_invalid(self): data =", "environ.update(self.defaults) environ.update(request) return WSGIRequest(environ) RF = RequestFactory() REQUEST = RF.get(\"/pay/\",", "self.assertTrue(self.wpp.doDirectPayment(data)) def test_doDirectPayment_invalid(self): data = { 'firstname': 'Epic', 'lastname': 'Fail',", "class for testing PayPalWPP.\"\"\" # responses = { # #", "at PayPal. self.old_debug = settings.DEBUG settings.DEBUG = True self.item =", "doors. nvp_obj = self.wpp.setExpressCheckout(self.item) self.assertTrue(nvp_obj.ack == \"Success\") ### DoExpressCheckoutPayment #", "_request(self, data): # return self.responses[\"DoDirectPayment\"] class CreditCardFieldTest(TestCase): def testCreditCardField(self): field", "# return self.responses[\"DoDirectPayment\"] class CreditCardFieldTest(TestCase): def testCreditCardField(self): field = CreditCardField()", "# Used to generate request objects. def request(self, **request): environ", "'inv', 'custom': 'custom', 'next': 'http://www.example.com/next/', 'returnurl': 'http://www.example.com/pay/', 'cancelurl': 'http://www.example.com/cancel/' }", "True self.item = { 'amt': '9.95', 'inv': 'inv', 'custom': 'custom',", "def test_setExpressCheckout(self): # We'll have to stub out tests for", "'http://www.example.com/cancel/' } self.wpp = DummyPayPalWPP(REQUEST) def tearDown(self): settings.DEBUG = self.old_debug", "CreditCardField().clean, '1234567890123455') class PayPalWPPTest(TestCase): def setUp(self): # Avoding blasting real", "Response: # {'ack': 'Success', # 'amt': '10.00', # 'build': '848077',", "PayPalWPP.\"\"\" # responses = { # # @@@ Need some", "'CA', 'zip': 'V6V 1V1', 'expdate': '012019', 'cvv2': '999', 'acct': '1234567890',", "\"\"\"ack=Success&timestamp=2009-03-12T23%3A52%3A33Z&l_severitycode0=Error&l_shortmessage0=Security+error&l_longmessage0=Security+header+is+not+valid&version=54.0&build=854529&l_errorcode0=&correlationid=\"\"\", # } # # def _request(self, data): # return", "TestCase from django.test.client import Client from paypal.pro.fields import CreditCardField from", "data = { 'firstname': 'Epic', 'lastname': 'Fail', 'street': '100 Georgia", "# 'timestamp': '2009-03-04T20:56:09Z', # 'token': '<PASSWORD>', # 'transactionid': '3TG42202A7335864V', #", "self.wpp.setExpressCheckout(self.item) self.assertTrue(nvp_obj.ack == \"Success\") ### DoExpressCheckoutPayment # PayPal Request: #", "'currencycode': 'USD', # 'feeamt': '0.59', # 'ordertime': '2009-03-04T20:56:08Z', # 'paymentstatus':", "behind paypal's doors. nvp_obj = self.wpp.setExpressCheckout(self.item) self.assertTrue(nvp_obj.ack == \"Success\") ###", "# 'correlationid': '375f4773c3d34', # 'currencycode': 'USD', # 'feeamt': '0.59', #", "'US', 'zip': '95131', 'expdate': '012019', 'cvv2': '037', 'acct': '4797503429879309', 'creditcardtype':", "# # PayPal Response: # {'ack': 'Success', # 'amt': '10.00',", "# } # # def _request(self, data): # return self.responses[\"DoDirectPayment\"]", "= self.wpp.setExpressCheckout(self.item) self.assertTrue(nvp_obj.ack == \"Success\") ### DoExpressCheckoutPayment # PayPal Request:", "self.wpp = DummyPayPalWPP(REQUEST) def tearDown(self): settings.DEBUG = self.old_debug def test_doDirectPayment_missing_params(self):", "# 'cancelurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'custom': u'website_id=480&cname=1', # 'inv': u'website-480-cname', #", "'Brave', 'lastname': 'Star', 'street': '1 Main St', 'city': u'San Jos\\xe9',", "# {'amt': '10.00', # 'cancelurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'custom': u'website_id=480&cname=1', #", "'paymentaction': 'Sale', # 'returnurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'token': u'<PASSWORD>'} # #", "# 'paymentaction': 'Sale', # 'returnurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'token': u'<PASSWORD>'} #", "class CreditCardFieldTest(TestCase): def testCreditCardField(self): field = CreditCardField() field.clean('4797503429879309') self.assertEquals(field.card_type, \"Visa\")", "'returnurl': 'http://www.example.com/pay/', 'cancelurl': 'http://www.example.com/cancel/' } self.wpp = DummyPayPalWPP(REQUEST) def tearDown(self):", "field = CreditCardField() field.clean('4797503429879309') self.assertEquals(field.card_type, \"Visa\") self.assertRaises(ValidationError, CreditCardField().clean, '1234567890123455') class", "def request(self, **request): environ = { 'HTTP_COOKIE': self.cookies, 'PATH_INFO': '/',", "'acct': '4797503429879309', 'creditcardtype': 'visa', 'ipaddress': '10.0.1.199',} data.update(self.item) self.assertTrue(self.wpp.doDirectPayment(data)) def test_doDirectPayment_invalid(self):", "CreditCardFieldTest(TestCase): def testCreditCardField(self): field = CreditCardField() field.clean('4797503429879309') self.assertEquals(field.card_type, \"Visa\") self.assertRaises(ValidationError,", "from django.http import QueryDict from django.test import TestCase from django.test.client", "# 'build': '848077', # 'correlationid': '375f4773c3d34', # 'currencycode': 'USD', #", "CreditCardField() field.clean('4797503429879309') self.assertEquals(field.card_type, \"Visa\") self.assertRaises(ValidationError, CreditCardField().clean, '1234567890123455') class PayPalWPPTest(TestCase): def", "'build': '848077', # 'correlationid': '375f4773c3d34', # 'currencycode': 'USD', # 'feeamt':", "'Fail', 'street': '100 Georgia St', 'city': 'Vancouver', 'state': 'BC', 'countrycode':", "self.wpp.doDirectPayment, data) def test_doDirectPayment_valid(self): data = { 'firstname': 'Brave', 'lastname':", "= self.old_debug def test_doDirectPayment_missing_params(self): data = {'firstname': 'Chewbacca'} self.assertRaises(PayPalError, self.wpp.doDirectPayment,", "'state': 'CA', 'countrycode': 'US', 'zip': '95131', 'expdate': '012019', 'cvv2': '037',", "ValidationError from django.http import QueryDict from django.test import TestCase from", "import QueryDict from django.test import TestCase from django.test.client import Client", "# \"DoDirectPayment\": \"\"\"ack=Success&timestamp=2009-03-12T23%3A52%3A33Z&l_severitycode0=Error&l_shortmessage0=Security+error&l_longmessage0=Security+header+is+not+valid&version=54.0&build=854529&l_errorcode0=&correlationid=\"\"\", # } # # def _request(self, data):", "django.http import QueryDict from django.test import TestCase from django.test.client import", "import Client from paypal.pro.fields import CreditCardField from paypal.pro.helpers import PayPalWPP,", "= CreditCardField() field.clean('4797503429879309') self.assertEquals(field.card_type, \"Visa\") self.assertRaises(ValidationError, CreditCardField().clean, '1234567890123455') class PayPalWPPTest(TestCase):", "{ 'amt': '9.95', 'inv': 'inv', 'custom': 'custom', 'next': 'http://www.example.com/next/', 'returnurl':", "# 'ordertime': '2009-03-04T20:56:08Z', # 'paymentstatus': 'Completed', # 'paymenttype': 'instant', #", "'countrycode': 'US', 'zip': '95131', 'expdate': '012019', 'cvv2': '037', 'acct': '4797503429879309',", "to generate request objects. def request(self, **request): environ = {", "'0.00', # 'timestamp': '2009-03-04T20:56:09Z', # 'token': '<PASSWORD>', # 'transactionid': '3TG42202A7335864V',", "data.update(self.item) self.assertTrue(self.wpp.doDirectPayment(data)) def test_doDirectPayment_invalid(self): data = { 'firstname': 'Epic', 'lastname':", "setUp(self): # Avoding blasting real requests at PayPal. self.old_debug =", "'100 Georgia St', 'city': 'Vancouver', 'state': 'BC', 'countrycode': 'CA', 'zip':", "import ValidationError from django.http import QueryDict from django.test import TestCase", "WSGIRequest(environ) RF = RequestFactory() REQUEST = RF.get(\"/pay/\", REMOTE_ADDR=\"127.0.0.1:8000\") class DummyPayPalWPP(PayPalWPP):", "{ 'HTTP_COOKIE': self.cookies, 'PATH_INFO': '/', 'QUERY_STRING': '', 'REQUEST_METHOD': 'GET', 'SCRIPT_NAME':", "def tearDown(self): settings.DEBUG = self.old_debug def test_doDirectPayment_missing_params(self): data = {'firstname':", "'visa', 'ipaddress': '10.0.1.199',} data.update(self.item) self.assertTrue(self.wpp.doDirectPayment(data)) def test_doDirectPayment_invalid(self): data = {", "# {'ack': 'Success', # 'amt': '10.00', # 'build': '848077', #", "test_doDirectPayment_missing_params(self): data = {'firstname': 'Chewbacca'} self.assertRaises(PayPalError, self.wpp.doDirectPayment, data) def test_doDirectPayment_valid(self):", "'inv': 'inv', 'custom': 'custom', 'next': 'http://www.example.com/next/', 'returnurl': 'http://www.example.com/pay/', 'cancelurl': 'http://www.example.com/cancel/'", "'paymentstatus': 'Completed', # 'paymenttype': 'instant', # 'pendingreason': 'None', # 'reasoncode':", "from django.test import TestCase from django.test.client import Client from paypal.pro.fields", "import PayPalWPP, PayPalError class RequestFactory(Client): # Used to generate request", "and friends # because they're behind paypal's doors. nvp_obj =", "self.responses[\"DoDirectPayment\"] class CreditCardFieldTest(TestCase): def testCreditCardField(self): field = CreditCardField() field.clean('4797503429879309') self.assertEquals(field.card_type,", "RequestFactory(Client): # Used to generate request objects. def request(self, **request):", "'feeamt': '0.59', # 'ordertime': '2009-03-04T20:56:08Z', # 'paymentstatus': 'Completed', # 'paymenttype':", "utf-8 -*- from django.conf import settings from django.core.handlers.wsgi import WSGIRequest", "u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'token': u'<PASSWORD>'} # # PayPal Response: # {'ack':", "'95131', 'expdate': '012019', 'cvv2': '037', 'acct': '4797503429879309', 'creditcardtype': 'visa', 'ipaddress':", "import CreditCardField from paypal.pro.helpers import PayPalWPP, PayPalError class RequestFactory(Client): #", "self.assertEquals(field.card_type, \"Visa\") self.assertRaises(ValidationError, CreditCardField().clean, '1234567890123455') class PayPalWPPTest(TestCase): def setUp(self): #", "'10.0.1.199',} data.update(self.item) self.assertFalse(self.wpp.doDirectPayment(data)) def test_setExpressCheckout(self): # We'll have to stub", "'10.00', # 'cancelurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'custom': u'website_id=480&cname=1', # 'inv': u'website-480-cname',", "request(self, **request): environ = { 'HTTP_COOKIE': self.cookies, 'PATH_INFO': '/', 'QUERY_STRING':", "We'll have to stub out tests for doExpressCheckoutPayment and friends", "for doExpressCheckoutPayment and friends # because they're behind paypal's doors.", "'firstname': 'Brave', 'lastname': 'Star', 'street': '1 Main St', 'city': u'San", "'Vancouver', 'state': 'BC', 'countrycode': 'CA', 'zip': 'V6V 1V1', 'expdate': '012019',", "# PayPal Response: # {'ack': 'Success', # 'amt': '10.00', #", "settings.DEBUG = True self.item = { 'amt': '9.95', 'inv': 'inv',", "'reasoncode': 'None', # 'taxamt': '0.00', # 'timestamp': '2009-03-04T20:56:09Z', # 'token':", "# PayPal Request: # {'amt': '10.00', # 'cancelurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', #", "RF = RequestFactory() REQUEST = RF.get(\"/pay/\", REMOTE_ADDR=\"127.0.0.1:8000\") class DummyPayPalWPP(PayPalWPP): pass", "PayPalWPPTest(TestCase): def setUp(self): # Avoding blasting real requests at PayPal.", "} self.wpp = DummyPayPalWPP(REQUEST) def tearDown(self): settings.DEBUG = self.old_debug def", "environ = { 'HTTP_COOKIE': self.cookies, 'PATH_INFO': '/', 'QUERY_STRING': '', 'REQUEST_METHOD':", "{ 'firstname': 'Brave', 'lastname': 'Star', 'street': '1 Main St', 'city':", "django.conf import settings from django.core.handlers.wsgi import WSGIRequest from django.forms import", "'custom': 'custom', 'next': 'http://www.example.com/next/', 'returnurl': 'http://www.example.com/pay/', 'cancelurl': 'http://www.example.com/cancel/' } self.wpp", "data = {'firstname': 'Chewbacca'} self.assertRaises(PayPalError, self.wpp.doDirectPayment, data) def test_doDirectPayment_valid(self): data", "'10.00', # 'build': '848077', # 'correlationid': '375f4773c3d34', # 'currencycode': 'USD',", "paypal.pro.helpers import PayPalWPP, PayPalError class RequestFactory(Client): # Used to generate", "PayPal Request: # {'amt': '10.00', # 'cancelurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'custom':", "Client from paypal.pro.fields import CreditCardField from paypal.pro.helpers import PayPalWPP, PayPalError", "return self.responses[\"DoDirectPayment\"] class CreditCardFieldTest(TestCase): def testCreditCardField(self): field = CreditCardField() field.clean('4797503429879309')", "'countrycode': 'CA', 'zip': 'V6V 1V1', 'expdate': '012019', 'cvv2': '999', 'acct':", "# responses = { # # @@@ Need some reals", "'taxamt': '0.00', # 'timestamp': '2009-03-04T20:56:09Z', # 'token': '<PASSWORD>', # 'transactionid':", "def test_doDirectPayment_valid(self): data = { 'firstname': 'Brave', 'lastname': 'Star', 'street':", "requests at PayPal. self.old_debug = settings.DEBUG settings.DEBUG = True self.item", "self.cookies, 'PATH_INFO': '/', 'QUERY_STRING': '', 'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'SERVER_NAME':", "-*- from django.conf import settings from django.core.handlers.wsgi import WSGIRequest from", "'SCRIPT_NAME': '', 'SERVER_NAME': 'testserver', 'SERVER_PORT': 80, 'SERVER_PROTOCOL': 'HTTP/1.1', } environ.update(self.defaults)", "'1234567890', 'creditcardtype': 'visa', 'ipaddress': '10.0.1.199',} data.update(self.item) self.assertFalse(self.wpp.doDirectPayment(data)) def test_setExpressCheckout(self): #", "@@@ Need some reals data here. # \"DoDirectPayment\": \"\"\"ack=Success&timestamp=2009-03-12T23%3A52%3A33Z&l_severitycode0=Error&l_shortmessage0=Security+error&l_longmessage0=Security+header+is+not+valid&version=54.0&build=854529&l_errorcode0=&correlationid=\"\"\", #", "REMOTE_ADDR=\"127.0.0.1:8000\") class DummyPayPalWPP(PayPalWPP): pass # \"\"\"Dummy class for testing PayPalWPP.\"\"\"", "'999', 'acct': '1234567890', 'creditcardtype': 'visa', 'ipaddress': '10.0.1.199',} data.update(self.item) self.assertFalse(self.wpp.doDirectPayment(data)) def", "some reals data here. # \"DoDirectPayment\": \"\"\"ack=Success&timestamp=2009-03-12T23%3A52%3A33Z&l_severitycode0=Error&l_shortmessage0=Security+error&l_longmessage0=Security+header+is+not+valid&version=54.0&build=854529&l_errorcode0=&correlationid=\"\"\", # } #", "friends # because they're behind paypal's doors. nvp_obj = self.wpp.setExpressCheckout(self.item)", "{'amt': '10.00', # 'cancelurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'custom': u'website_id=480&cname=1', # 'inv':", "settings from django.core.handlers.wsgi import WSGIRequest from django.forms import ValidationError from", "request objects. def request(self, **request): environ = { 'HTTP_COOKIE': self.cookies,", "# 'payerid': u'BN5JZ2V7MLEV4', # 'paymentaction': 'Sale', # 'returnurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', #", "'paymenttype': 'instant', # 'pendingreason': 'None', # 'reasoncode': 'None', # 'taxamt':", "'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'SERVER_NAME': 'testserver', 'SERVER_PORT': 80, 'SERVER_PROTOCOL': 'HTTP/1.1',", "QueryDict from django.test import TestCase from django.test.client import Client from", "u'website_id=480&cname=1', # 'inv': u'website-480-cname', # 'method': 'DoExpressCheckoutPayment', # 'next': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',", "### DoExpressCheckoutPayment # PayPal Request: # {'amt': '10.00', # 'cancelurl':", "'012019', 'cvv2': '037', 'acct': '4797503429879309', 'creditcardtype': 'visa', 'ipaddress': '10.0.1.199',} data.update(self.item)", "'V6V 1V1', 'expdate': '012019', 'cvv2': '999', 'acct': '1234567890', 'creditcardtype': 'visa',", "'DoExpressCheckoutPayment', # 'next': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'payerid': u'BN5JZ2V7MLEV4', # 'paymentaction': 'Sale',", "self.assertTrue(nvp_obj.ack == \"Success\") ### DoExpressCheckoutPayment # PayPal Request: # {'amt':", "django.forms import ValidationError from django.http import QueryDict from django.test import", "'street': '1 Main St', 'city': u'San Jos\\xe9', 'state': 'CA', 'countrycode':", "'city': u'San Jos\\xe9', 'state': 'CA', 'countrycode': 'US', 'zip': '95131', 'expdate':", "'token': '<PASSWORD>', # 'transactionid': '3TG42202A7335864V', # 'transactiontype': 'expresscheckout', # 'version':", "# 'next': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'payerid': u'BN5JZ2V7MLEV4', # 'paymentaction': 'Sale', #", "{'firstname': 'Chewbacca'} self.assertRaises(PayPalError, self.wpp.doDirectPayment, data) def test_doDirectPayment_valid(self): data = {", "for testing PayPalWPP.\"\"\" # responses = { # # @@@", "'Success', # 'amt': '10.00', # 'build': '848077', # 'correlationid': '375f4773c3d34',", "'/', 'QUERY_STRING': '', 'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'SERVER_NAME': 'testserver', 'SERVER_PORT':", "u'San Jos\\xe9', 'state': 'CA', 'countrycode': 'US', 'zip': '95131', 'expdate': '012019',", "tests for doExpressCheckoutPayment and friends # because they're behind paypal's", "'returnurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'token': u'<PASSWORD>'} # # PayPal Response: #", "'4797503429879309', 'creditcardtype': 'visa', 'ipaddress': '10.0.1.199',} data.update(self.item) self.assertTrue(self.wpp.doDirectPayment(data)) def test_doDirectPayment_invalid(self): data", "# \"\"\"Dummy class for testing PayPalWPP.\"\"\" # responses = {", "Main St', 'city': u'San Jos\\xe9', 'state': 'CA', 'countrycode': 'US', 'zip':", "= { 'firstname': 'Epic', 'lastname': 'Fail', 'street': '100 Georgia St',", "'state': 'BC', 'countrycode': 'CA', 'zip': 'V6V 1V1', 'expdate': '012019', 'cvv2':", "objects. def request(self, **request): environ = { 'HTTP_COOKIE': self.cookies, 'PATH_INFO':", "'token': u'<PASSWORD>'} # # PayPal Response: # {'ack': 'Success', #", "= {'firstname': 'Chewbacca'} self.assertRaises(PayPalError, self.wpp.doDirectPayment, data) def test_doDirectPayment_valid(self): data =", "from django.forms import ValidationError from django.http import QueryDict from django.test", "'creditcardtype': 'visa', 'ipaddress': '10.0.1.199',} data.update(self.item) self.assertFalse(self.wpp.doDirectPayment(data)) def test_setExpressCheckout(self): # We'll", "because they're behind paypal's doors. nvp_obj = self.wpp.setExpressCheckout(self.item) self.assertTrue(nvp_obj.ack ==", "'0.59', # 'ordertime': '2009-03-04T20:56:08Z', # 'paymentstatus': 'Completed', # 'paymenttype': 'instant',", "self.old_debug = settings.DEBUG settings.DEBUG = True self.item = { 'amt':", "self.assertRaises(PayPalError, self.wpp.doDirectPayment, data) def test_doDirectPayment_valid(self): data = { 'firstname': 'Brave',", "environ.update(request) return WSGIRequest(environ) RF = RequestFactory() REQUEST = RF.get(\"/pay/\", REMOTE_ADDR=\"127.0.0.1:8000\")", "'HTTP_COOKIE': self.cookies, 'PATH_INFO': '/', 'QUERY_STRING': '', 'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '',", "def test_doDirectPayment_missing_params(self): data = {'firstname': 'Chewbacca'} self.assertRaises(PayPalError, self.wpp.doDirectPayment, data) def", "Jos\\xe9', 'state': 'CA', 'countrycode': 'US', 'zip': '95131', 'expdate': '012019', 'cvv2':", "u'BN5JZ2V7MLEV4', # 'paymentaction': 'Sale', # 'returnurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'token': u'<PASSWORD>'}", "'375f4773c3d34', # 'currencycode': 'USD', # 'feeamt': '0.59', # 'ordertime': '2009-03-04T20:56:08Z',", "'custom', 'next': 'http://www.example.com/next/', 'returnurl': 'http://www.example.com/pay/', 'cancelurl': 'http://www.example.com/cancel/' } self.wpp =", "'QUERY_STRING': '', 'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'SERVER_NAME': 'testserver', 'SERVER_PORT': 80,", "nvp_obj = self.wpp.setExpressCheckout(self.item) self.assertTrue(nvp_obj.ack == \"Success\") ### DoExpressCheckoutPayment # PayPal", "'1234567890123455') class PayPalWPPTest(TestCase): def setUp(self): # Avoding blasting real requests", "import WSGIRequest from django.forms import ValidationError from django.http import QueryDict", "'Sale', # 'returnurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname', # 'token': u'<PASSWORD>'} # # PayPal", "WSGIRequest from django.forms import ValidationError from django.http import QueryDict from", "import TestCase from django.test.client import Client from paypal.pro.fields import CreditCardField", "# # def _request(self, data): # return self.responses[\"DoDirectPayment\"] class CreditCardFieldTest(TestCase):", "'037', 'acct': '4797503429879309', 'creditcardtype': 'visa', 'ipaddress': '10.0.1.199',} data.update(self.item) self.assertTrue(self.wpp.doDirectPayment(data)) def", "-*- coding: utf-8 -*- from django.conf import settings from django.core.handlers.wsgi", "field.clean('4797503429879309') self.assertEquals(field.card_type, \"Visa\") self.assertRaises(ValidationError, CreditCardField().clean, '1234567890123455') class PayPalWPPTest(TestCase): def setUp(self):", "= RF.get(\"/pay/\", REMOTE_ADDR=\"127.0.0.1:8000\") class DummyPayPalWPP(PayPalWPP): pass # \"\"\"Dummy class for", "# 'token': '<PASSWORD>', # 'transactionid': '3TG42202A7335864V', # 'transactiontype': 'expresscheckout', #", "PayPal Response: # {'ack': 'Success', # 'amt': '10.00', # 'build':", "'lastname': 'Star', 'street': '1 Main St', 'city': u'San Jos\\xe9', 'state':", "'SERVER_NAME': 'testserver', 'SERVER_PORT': 80, 'SERVER_PROTOCOL': 'HTTP/1.1', } environ.update(self.defaults) environ.update(request) return", "'', 'SERVER_NAME': 'testserver', 'SERVER_PORT': 80, 'SERVER_PROTOCOL': 'HTTP/1.1', } environ.update(self.defaults) environ.update(request)" ]
[ "coding: utf-8 -*- \"\"\" Created on Mon Dec 7 19:46:40", "on Mon Dec 7 19:46:40 2020 @author: Intel \"\"\" def", "displayPathtoPrincess(n,grid): me_i=n//2 me_j=n//2 for i in range(n): if 'p' in", "if(me_i-pe_i<0): print('DOWN') me_i=me_i+1 elif(me_i-pe_i>0): print('UP') me_i=me_i-1 else: if(me_j-pe_j>0): print('LEFT') me_j=me_j-1", "me_j=me_j-1 elif(me_j-pe_j<0): print('RIGHT') me_j=me_j+1 else: break m = int(input()) grid", "2020 @author: Intel \"\"\" def displayPathtoPrincess(n,grid): me_i=n//2 me_j=n//2 for i", "Mon Dec 7 19:46:40 2020 @author: Intel \"\"\" def displayPathtoPrincess(n,grid):", "j in range(n): if 'p'==grid[i][j]: pe_j=j break break while((me_i!=pe_i) |", "m = int(input()) grid = [] for i in range(0,", "in range(n): if 'p'==grid[i][j]: pe_j=j break break while((me_i!=pe_i) | (me_j!=pe_j)):", "elif(me_j-pe_j<0): print('RIGHT') me_j=me_j+1 else: break m = int(input()) grid =", "print('LEFT') me_j=me_j-1 elif(me_j-pe_j<0): print('RIGHT') me_j=me_j+1 else: break m = int(input())", "for j in range(n): if 'p'==grid[i][j]: pe_j=j break break while((me_i!=pe_i)", "7 19:46:40 2020 @author: Intel \"\"\" def displayPathtoPrincess(n,grid): me_i=n//2 me_j=n//2", "i in range(n): if 'p' in grid[i]: pe_i=i for j", "print('DOWN') me_i=me_i+1 elif(me_i-pe_i>0): print('UP') me_i=me_i-1 else: if(me_j-pe_j>0): print('LEFT') me_j=me_j-1 elif(me_j-pe_j<0):", "'p' in grid[i]: pe_i=i for j in range(n): if 'p'==grid[i][j]:", "if 'p' in grid[i]: pe_i=i for j in range(n): if", "Dec 7 19:46:40 2020 @author: Intel \"\"\" def displayPathtoPrincess(n,grid): me_i=n//2", "grid[i]: pe_i=i for j in range(n): if 'p'==grid[i][j]: pe_j=j break", "if 'p'==grid[i][j]: pe_j=j break break while((me_i!=pe_i) | (me_j!=pe_j)): if(me_i-pe_i<0): print('DOWN')", "break while((me_i!=pe_i) | (me_j!=pe_j)): if(me_i-pe_i<0): print('DOWN') me_i=me_i+1 elif(me_i-pe_i>0): print('UP') me_i=me_i-1", "= int(input()) grid = [] for i in range(0, m):", "int(input()) grid = [] for i in range(0, m): grid.append(input().strip())", "print('RIGHT') me_j=me_j+1 else: break m = int(input()) grid = []", "me_i=me_i-1 else: if(me_j-pe_j>0): print('LEFT') me_j=me_j-1 elif(me_j-pe_j<0): print('RIGHT') me_j=me_j+1 else: break", "print('UP') me_i=me_i-1 else: if(me_j-pe_j>0): print('LEFT') me_j=me_j-1 elif(me_j-pe_j<0): print('RIGHT') me_j=me_j+1 else:", "break m = int(input()) grid = [] for i in", "range(n): if 'p'==grid[i][j]: pe_j=j break break while((me_i!=pe_i) | (me_j!=pe_j)): if(me_i-pe_i<0):", "\"\"\" def displayPathtoPrincess(n,grid): me_i=n//2 me_j=n//2 for i in range(n): if", "me_i=me_i+1 elif(me_i-pe_i>0): print('UP') me_i=me_i-1 else: if(me_j-pe_j>0): print('LEFT') me_j=me_j-1 elif(me_j-pe_j<0): print('RIGHT')", "19:46:40 2020 @author: Intel \"\"\" def displayPathtoPrincess(n,grid): me_i=n//2 me_j=n//2 for", "(me_j!=pe_j)): if(me_i-pe_i<0): print('DOWN') me_i=me_i+1 elif(me_i-pe_i>0): print('UP') me_i=me_i-1 else: if(me_j-pe_j>0): print('LEFT')", "for i in range(n): if 'p' in grid[i]: pe_i=i for", "grid = [] for i in range(0, m): grid.append(input().strip()) displayPathtoPrincess(m,grid)", "'p'==grid[i][j]: pe_j=j break break while((me_i!=pe_i) | (me_j!=pe_j)): if(me_i-pe_i<0): print('DOWN') me_i=me_i+1", "# -*- coding: utf-8 -*- \"\"\" Created on Mon Dec", "if(me_j-pe_j>0): print('LEFT') me_j=me_j-1 elif(me_j-pe_j<0): print('RIGHT') me_j=me_j+1 else: break m =", "-*- coding: utf-8 -*- \"\"\" Created on Mon Dec 7", "\"\"\" Created on Mon Dec 7 19:46:40 2020 @author: Intel", "elif(me_i-pe_i>0): print('UP') me_i=me_i-1 else: if(me_j-pe_j>0): print('LEFT') me_j=me_j-1 elif(me_j-pe_j<0): print('RIGHT') me_j=me_j+1", "pe_j=j break break while((me_i!=pe_i) | (me_j!=pe_j)): if(me_i-pe_i<0): print('DOWN') me_i=me_i+1 elif(me_i-pe_i>0):", "utf-8 -*- \"\"\" Created on Mon Dec 7 19:46:40 2020", "else: break m = int(input()) grid = [] for i", "else: if(me_j-pe_j>0): print('LEFT') me_j=me_j-1 elif(me_j-pe_j<0): print('RIGHT') me_j=me_j+1 else: break m", "break break while((me_i!=pe_i) | (me_j!=pe_j)): if(me_i-pe_i<0): print('DOWN') me_i=me_i+1 elif(me_i-pe_i>0): print('UP')", "-*- \"\"\" Created on Mon Dec 7 19:46:40 2020 @author:", "def displayPathtoPrincess(n,grid): me_i=n//2 me_j=n//2 for i in range(n): if 'p'", "while((me_i!=pe_i) | (me_j!=pe_j)): if(me_i-pe_i<0): print('DOWN') me_i=me_i+1 elif(me_i-pe_i>0): print('UP') me_i=me_i-1 else:", "Created on Mon Dec 7 19:46:40 2020 @author: Intel \"\"\"", "me_j=n//2 for i in range(n): if 'p' in grid[i]: pe_i=i", "in grid[i]: pe_i=i for j in range(n): if 'p'==grid[i][j]: pe_j=j", "pe_i=i for j in range(n): if 'p'==grid[i][j]: pe_j=j break break", "Intel \"\"\" def displayPathtoPrincess(n,grid): me_i=n//2 me_j=n//2 for i in range(n):", "| (me_j!=pe_j)): if(me_i-pe_i<0): print('DOWN') me_i=me_i+1 elif(me_i-pe_i>0): print('UP') me_i=me_i-1 else: if(me_j-pe_j>0):", "range(n): if 'p' in grid[i]: pe_i=i for j in range(n):", "@author: Intel \"\"\" def displayPathtoPrincess(n,grid): me_i=n//2 me_j=n//2 for i in", "me_j=me_j+1 else: break m = int(input()) grid = [] for", "in range(n): if 'p' in grid[i]: pe_i=i for j in", "me_i=n//2 me_j=n//2 for i in range(n): if 'p' in grid[i]:" ]
[ "# Plot average methylation values per bin # Define Colormap", "list(exp_values_g1) else: exp_values_g1 = list(exp_values_g1.iloc[0, :]) exp_values_g2 = expression_df_g2.loc[gene_name, :]", "path = Path(vertices, codes) patch = PathPatch(path, facecolor = \"None\",", "ticks[i]], [.3, .0], linestyle=\"-\", color=color, linewidth=1) plt.text(ticks[i], -.1, tick_labels[i], horizontalalignment=\"center\",", "str, optional :param color_neutral: Plot color of copy number neutral", "open(input_filename, \"r\") cnv_bed_list = [] ploidy = None for line", "to be plotted. :type start_r: int :param end_r: End position", ":type y_max: bool, optional :param distance_ratio: Minimal distance between two", "color=\"#cbebc4\", edgecolor=False, alpha=1, ax = None): '''Functions that plots genomic", "def plotCNVs(cnvs_bed, chromosome, start, end, ploidy=2, cnv_threshold=0.7, color_gain=\"g\", color_loss=\"r\", color_neutral=\"k\",", "+= [gene_names_map[gene_name_ens]] gene_regions += [[int(e[1]), int(e[2])]] region_right_border = int(region_bed[0][2]) region_left_border", "plotted as bar :type chip_signals: iterator :param r_chrom: Chromosome of", "tick_labels.reverse() print(tick_labels) for i in range(len(ticks)): if(loc_coordinates == \"up\"): plt.plot([ticks[i],", "samples expression. The number of colors must be the same", "[<chrom>, <start>, <end>]) :type genomic_segments: list :param start: Start position", "is plotted in addition to the boxplot, no points are", "= color rect = Rectangle([int(region[1]), -.75], int(region[2])-int(region[1]), 1.5, facecolor=current_color, edgecolor='none',", ":]) expression_values = exp_values if(log_transformed): expression_values = np.log2([i if i", "segment_end-segment_start, 1, color=color) ax.add_patch(rect) patches_dict[segment_type] = rect plt.xlim(int(start), int(end)) plt.ylim(0,", "1. Chromosome 2. Start postion 3. End position 4. Value", "exons_bed: :class:`pybedtools.BedTool` object containing exons of genes. :type exons_bed: :class:`pybedtools.BedTool`", "int(region_bed[0][1]) # Determine minimal extension of barplot extension=None for i", "ax is not None else plt.gca() genes_in_region = genes_bed exons_in_region", "continue n_meth = int(element[3]) n_unmeth = int(element[4]) current_bin = int((position-start)/bin_size)", "vmax=None, location=\"top\", ax=None): '''Function that plots HiC contact maps as", "If \"up\", plot ticks to upper direction, else if \"down\",", "loc=legend_loc, fontsize=5) return max_y_pos+1.5, patch_list, patch_description_list def determineYPosGene(genes_bed, region_size, distance_ratio):", ".4, color=color, edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect) else: rect = Rectangle((current_start,", "contact_map: Matrix that contains the intensity values of HiC contacts.", "ax if ax is not None else plt.gca() c =", "is the max number of stacked genes, default is None.", "End position of the region to be plotted. :type end:", "#ax.spines[\"bottom\"].set_visible(False) ax.spines[\"top\"].set_visible(False) ax.spines[\"left\"].set_visible(False) ax.spines[\"right\"].set_visible(False) plt.xticks([], []) plt.yticks([], []) plt.xlim([start_r, end_r])", "groups. :type groups: list :param gene_names_map: Dictionary with keys: ENSEMBL", ":type input_filename: str :return: :class:`pybedtools.BedTool` object containing CNVs from ACESeq", "(downstream), defaults to \"left\". :type direction: str, optional :param color:", "distance_ratio=0.1, ax=None, plot_legend=False, legend_loc=\"lower right\", color_plus=\"#80b1d3\", color_minus=\"#fb8072\"): \"\"\"Function for plotting", "distance = abs(link_pos2-link_pos1) if(distance > max_dist): max_dist = distance mid_point", "direction=\"top_down\", color=\"k\", ax = None): '''Function that plots arcs from", "plt.ylim([-.1, .8]) else: plt.ylim([-1.5, .3]) plt.xticks([], []) ax.spines[\"bottom\"].set_visible(False) ax.spines[\"top\"].set_visible(False) ax.spines[\"left\"].set_visible(False)", "region to be plotted :type region_bed: :class:`pybedtools.BedTool` :param expression_df_g1: :class:`pandas.Dataframe`", "import math def plotGenes(genes_bed, exons_bed, introns_bed, region_bed, blacklist=None, gene_map=None, plot_gene_ids=True,", "plotting, defaults to \"normal\". :type g2_id: str, optional :param plot_gene_names:", "= 0 left = [] height = [] for signal", "be considered as a CNV, defaults to 0.7. :type cnv_threshold:", "plot_gene_names=True): '''Function for plotting paired gene expression (e.g. tumor and", "segments, and values patch :rtype: dict ''' ax = ax", "== 4): plt.plot([int(start), int(end)], [1, 1], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start),", "= [] patch_description_list = [] met_forward = False met_reverse =", "= gene_mid_points[counter]-extension/2 right_border = gene_mid_points[counter]+extension/2 if(not blacklist is None and", "samples (columns: sample ids; index: gene ids) :type expression_df_g1: :class:`pandas.DataFrame`", "start, end, head_width=0.2, head_length=1000, overhang=0, color_plus=\"#80b1d3\", color_minus=\"#fb8072\", ax=None): '''Function that", "tcn-.1), current_end-current_start, .2, color=color, edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect) # Plot", "Introns for i in introns_in_region: start = int(i[1]) end =", "= [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO] vertices = [(region_mid_point, 1), (region_mid_point,", "= color_minus dx = -1.*head_length plt.arrow(arrow_start, .5, dx, 0, head_width=head_width,", "print(ploidy) if(line[0] == \"#\" or line[:5] == \"chrom\"): continue split_line", "= [] vertices = [] if(direction == \"top_down\"): codes =", "for plotting (R,G,B). :type segments_Tabix_filename: str :param chrom: Chromosome of", "region_mid_points += [int(e[1])+(end-int(e[1]))/2] else: region_mid_points += [int(e[1])+(int(e[2])-int(e[1]))/2] for i in", "if len(average_list) > 0 else 0. ] binned_average_meth = binned_average_meth_no_missing", ":type plot_gene_names: bool. :return: Axis on which plot was placed.", "is the list of patches drawn on the ax. 3.", "that creates a mapping between gene ids :param gene_name_mapping_file: Path", "End, Methylated Cs, Unmethylated Cs. :type methylation_bed: :class:`pybedtools.BedTool` :param chrom:", "to None :type vmin: float, optional :param vmax: Maximal value", "be plotted. :type start_r: int :param end_r: End position of", "= color_reverse y = max_y_pos-y_pos_dict[gene_name]+0.5 rect = Rectangle((start, y-.2), end-start,", ":type max_dev: float, optional :param ax: Axis used for plotting,", "is a list-ike object containing: 1. Chromosome 2. Start postion", "else range(contact_map_index1, contact_map_index2-(contact_map_index2-i))) for j in y_range: # Define midpoint", "arrow_end = motif_start color = color_minus dx = -1.*head_length plt.arrow(arrow_start,", "ids: IDs used for legend plotting, defaults to None. Number", "else 0. ] binned_average_meth = binned_average_meth_no_missing # Plot average methylation", "= int(motif[1]) motif_end = int(motif[2]) strand = str(motif[3]) arrow_start =", "Set containing gene ids not to be plotted, defaults to", "name :type gene_name_mapping_file: str :return: Dictionary containing the gene id", "] ticks.reverse() tick_labels.reverse() print(tick_labels) for i in range(len(ticks)): if(loc_coordinates ==", "of barplot extension=None for i in range(len(gene_regions)): if(not blacklist is", "plt.plot([ (float(m[1])+float(m[2]))/2. for m in meth_calls ], [ float(m[4]) for", ":param color: Color of the rectangles representing the regions to", "\"up\"): plt.ylim([-.1, .8]) else: plt.ylim([-1.5, .3]) plt.xticks([], []) ax.spines[\"bottom\"].set_visible(False) ax.spines[\"top\"].set_visible(False)", "plot is created. :type region_bed: :class:`pybedtools.BedTool` :param blacklist: List of", "region_right_border) if(position_gene_names == \"top\"): ax.xaxis.set_ticks_position(\"top\") ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions))) ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean))) if(not plot_gene_names): ax.xaxis.set_major_formatter(ticker.FixedFormatter(", "= i y_level_dict[i] += [[gene_start, gene_end]] break elif(i == max_y_pos):", "plot_legend: bool, optional :param colors: List of colors used for", "to be plotted, defaults to 1. :type alpha: float, optional.", "plots TF motifs as arrows, indicating their directionality. :param motifs_bed:", "plt.gca() tick_size = 10**math.ceil((np.log10((end-start)/10))) if(not upper): tick_size = 10**int((np.log10((end-start)/10))) #", "side. If this ratio is underwent, the genes will be", "= left_border + extension/4. bplot_g2_pos = left_border + 3*(extension/4.) tick_positions", "digits_to_round = None divisor = None if(scale == \"Mb\"): digits_to_round", "None else plt.gca() colors = plt.cm.get_cmap(cmap) if(max_dev is None): max_dev", "ticks[i]], [0., .3], linestyle=\"-\", color=color, linewidth=1) plt.text(ticks[i], .4, tick_labels[i], horizontalalignment=\"center\",", "the genomic scales elements, defaults to \"k\". :type color: str,", "Nothing to be returned. :rtype: None ''' ax = ax", "str, optional :param edge_color: Color of region edge. If False,", "else plt.gca() TX_start = TX_pos TX_end = end_r if(direction ==", "{\"color\": \"k\", \"linewidth\": .3} capprops={\"color\": \"k\", \"linewidth\": .3} patch_list =", "stay in increasing order, defaults to False. :type revert_coordinates: bool,", "is None): vmax = np.percentile(contact_map, 99.9) colormap = plt.get_cmap(cmap) for", "[] for i in range(len(left)): if(i % merge == 0", "-1.*cnv_threshold): color = color_loss if(abs(ploidy_dev) > cnv_threshold): rect = Rectangle((current_start,", "\"#\" or line[:5] == \"chrom\"): continue split_line = line.rstrip().split(\"\\t\") ploidy_dev", "distances. :param genomic_segments: List of segments for which distances shall", "str, optional. :param ax: Axis on which to plot, defaults", "ids must be the same as the number of groups.", ":param head_width: Width of the arrow head as proportion of", "distance), (link_pos2, 0)] codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3] path =", "float, optional :param ax: Axes instance on which the genes", "id, and the second column is the HUGO gene name", ":return: Nothing to be returned. :rtype: None ''' ax =", "to 0. :type merge: int, optional :return: Nothing to be", "plt import pybedtools import pandas as pnd import numpy as", "to be plotted. :type start: int :param end: End position", "(midpoint[0], midpoint[1]-segment_size/2.), (midpoint[0]+segment_size/2., midpoint[1]), (midpoint[0], midpoint[1]+segment_size/2.), (midpoint[0]-segment_size/2., midpoint[1]) ] codes", "color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [4, 4], color=color_threshold, linestyle=\"--\", linewidth=.5)", "[[gene_start, gene_end]] break elif(gene_start > y_level_dict[i][-1][1] and float(gene_start-y_level_dict[i][-1][0])/float(region_size) > distance_ratio):", "distance_ratio): y_pos_dict[gene_name] = i y_level_dict[i] += [[gene_start, gene_end]] break elif(i", "= open(gene_name_mapping_filename, \"r\") gene_map = {} for line in gene_name_mapping_file:", "values per bin # Define Colormap cmap = cm.bwr norm", "to False. :type edge_color: str, optional :param alpha: Alpha value", "Axes instance on which the genes are plotted, default is", "= [(region_mid_point, 1), (region_mid_point, .8), (equalized_region_mid_point, .2), (equalized_region_mid_point, 0)] else:", ":param chrom: Chromosome of the region to be plotted. :type", "+= [left[i]] if(not i % merge == 0): left_merged +=", "ax is not None else plt.gca() TX_start = TX_pos TX_end", "\"top\" else range(contact_map_index1, contact_map_index2-(contact_map_index2-i))) for j in y_range: # Define", "elif(ploidy_dev <= -1.*cnv_threshold): color = color_loss if(abs(ploidy_dev) > cnv_threshold): rect", "list of descriptions for the patches \\ drawn on the", "None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param plot_legend: If True plot", "= color_reverse y = max_y_pos-y_pos_dict[gene_name]+0.5 patch = Rectangle((start, y-.03), end-start,", "is of the form [<chrom>, <start>, <end>]) :type genomic_segments: list", "[] height_merged += [np.mean(heights)] heights = [] heights += [height[i]]", "samples expression, defaults to \"#fb8072\". :type color_g1: str, optional :param", "genomic coordinates in a linea fashion. :param chrom: Chromosome of", "start: int :param end: End position of the region to", ".3} capprops={\"color\": \"k\", \"linewidth\": .3} patch_list = [] patch_description_list =", "region to be plotted. :type r_end: int :param ax: Axis", "ax.get_xticklabels(): tick.set_rotation(45) tick.set_size(5) for ytick in ax.get_yticklabels(): ytick.set_size(5) if(plot_legend): ax.legend(patch_list,", "positiont of the region to be plotted. :type end_r: int", "ax def plotGeneExpressionEqualDist(genes_bed, gene_mid_points, region, expression_df, groups, gene_names_map=None, blacklist=None, ax=None,", "int :param r_end: End position of region to be plotted.", "= int(e[4])+(int(e[5])-int(e[4]))/2 distance = abs(link_pos2-link_pos1) if(distance > max_dist): max_dist =", "in range(max_y_pos+1): if(i == 0 and not max_y_pos in y_level_dict):", ":param color_g1: Color used for plotting g1 samples expression, defaults", "for plotting CNV segments :param cnvs_bed: :class:`pybedtools.BedTool` object containing CNVs", "else: color = standard_colors[g] bplot[\"boxes\"][0].set_facecolor(color) if(plot_points): x_positions = [ (bplot_pos+", "loc='lower left') return ax def plotGeneExpressionEqualDist(genes_bed, gene_mid_points, region, expression_df, groups,", "contact_map.iloc[i, j] intensity_value = (intensity_value/vmax if intensity_value <= vmax else", "plt.yticks([], []) def plotMethylationProfile(meth_calls, chrom, start, end, color=\"k\", ax=None): '''Function", "axis for plotting ax = ax if ax is not", "plotMethylationProfile(meth_calls, chrom, start, end, color=\"k\", ax=None): '''Function that plots methylation", "else plt.gca() genes_in_region = genes_bed exons_in_region = exons_bed introns_in_region =", "''' gene_name_mapping_file = open(gene_name_mapping_filename, \"r\") gene_map = {} for line", "be plotted ([<chrom>, <start>, <end>]). :type region: list :param groups:", "region_bed = pybedtools.BedTool(\"\\t\".join([str(i) for i in region]), from_string=True) # Get", "True :type plot_gene_ids: bool, optional :param y_max: Max y value", "[gene_name] exp_values = expression_df.loc[gene_name, groups[g]] if(type(exp_values).__name__ == \"Series\"): exp_values =", "> max_dev): tcn = max_dev color = colors((ploidy_dev+max_dev)/(2*max_dev)) if(abs(ploidy_dev) <", "to be plotted. :type end_r: int :param color: Color of", "plotted. :type start: str :param end: End position of the", "> 0 else 0. ] binned_average_meth = binned_average_meth_no_missing # Plot", "[] offset = merge*offset left = left_merged height = height_merged", "plt.xlim([start, end]) plt.yticks([], []) if(loc_coordinates == \"up\"): plt.ylim([-.1, .8]) else:", ":type start: int :param end: Chromosomal end position of region", "end], [0, 0], linestyle=\"-\", color=color, linewidth=1) else: plt.plot([start, end], [0.3,", "[patch] patch_description_list += [\"forward strand\"] met_forward = True elif(strand ==", "plt.ylim([0.4, 0.6]) def plotHiCContactMap(contact_map, start, end, segment_size, cmap=\"Greys\", vmin=None, vmax=None,", "chromosome. :type start: int :param end: End position on chromosome.", "tuple([ float(i)/256. for i in str(segment[-1]).split(\",\") ]+[1]) segment_type = str(segment[3])", "def plotCNVsHeat(cnvs_bed, chromosome, start, end, ploidy=2, cnv_threshold=0.7, cmap=\"bwr\", max_dev=None, ax=None):", "height_merged plt.bar(left, height, offset, color = color, edgecolor = color)", ":rtype: list \"\"\" ax = ax if ax is not", "[left_border + extension/2.] gene_names_clean += [gene_name] exp_values_g1 = expression_df_g1.loc[gene_name, :]", "max_y_pos-y_pos_dict[gene_name]+.8 plt.text(start, y, gene_name_label, size=5, color = color) gene_name =", "0), (region_mid_point, .2), (equalized_region_mid_point, .8), (equalized_region_mid_point, 1)] path = Path(vertices,", "HUGO gene name :type gene_name_mapping_file: str :return: Dictionary containing the", "tick_positions = [] gene_names_clean = [] counter=0 for gene_name in", "groups. :type ids: list, optional. :param plot_gene_names: True if gene", "of g2 used for legend plotting, defaults to \"normal\". :type", "color=color_plus dx = head_length if(strand == \"-\"): arrow_start = motif_end", "cmap=\"bwr\", max_dev=None, ax=None): '''Function for plotting CNV segments as heatmap", "color_reverse y = max_y_pos-y_pos_dict[gene_name]+0.5 rect = Rectangle((start, y-.2), end-start, .4,", ":class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Dictionary with keys = names of segments,", "list :param start: Start position of the genomic region. :type", "= None if(i < len(gene_names)-1): right_border = gene_regions[i+1][0] else: right_border", "else plt.gca() contact_map_index1 = (start)/segment_size contact_map_index2 = ((end)/segment_size)+1 sliced_contact_map =", "in segments_list: segment_start = int(segment[1]) segment_end = int(segment[2]) color =", "int(interval[1]) gene_end = int(interval[2]) for i in range(max_y_pos+1): if(i ==", "for i in range(int(((end-start)/bin_size)+1)) ] counter = 0 for element", "less ticks, else if False make more ticks. :type upper:", "genes_in_region: start = int(i[1]) gene_name = str(i[3]) if(not blacklist is", "files and converts them to pybedtools.BedTool object :param input_filename: Full", "range(len(region_mid_points)): region_mid_point = region_mid_points[i] equalized_region_mid_point = equalized_region_mid_points[i] codes = []", "{} for segment in segments_list: segment_start = int(segment[1]) segment_end =", "plt.cm.get_cmap(cmap) if(max_dev is None): max_dev = max([abs(float(i[3])) for i in", "expression_df, groups, gene_names_map=None, blacklist=None, ax=None, plot_legend=False, colors=None, ids=None, plot_gene_names=True, position_gene_names=\"bottom\",", "to True. :type plot_gene_names: bool, optional :param position_gene_names: Either of", "= [ [0, 0] for i in range(int(((end-start)/bin_size)+1)) ] counter", "left_border = gene_regions[counter][0] right_border = region_right_border if(not blacklist is None", "[]) if(loc_coordinates == \"up\"): plt.ylim([-.1, .8]) else: plt.ylim([-1.5, .3]) plt.xticks([],", ":param ax: Axis used for plotting, defaults to None. :type", "to be plotted. :type r_start: int :param r_end: End position", "= ax if ax is not None else plt.gca() max_dist", "in genes_in_region_bed: gene_name_ens = str(e[3]) gene_names += [gene_names_map[gene_name_ens]] gene_regions +=", "from matplotlib.path import Path from matplotlib.patches import PathPatch import matplotlib.cm", "\"linewidth\": .3} capprops={\"color\": \"k\", \"linewidth\": .3} patch_list = [] patch_description_list", "4): plt.ylim([0, 6.5]) plt.yticks([0, 2, 4, 6], [\"0\", \"2\", \"4\",", "[Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO] vertices = [(region_mid_point, 0), (region_mid_point, .2),", "region to be plotted. :type chrom_r: str :param start_r: Chromosomal", ":param ax: Axis where the plot is drawn, defaults to", "0): left_merged += [lefts[0]] lefts = [] height_merged += [np.mean(heights)]", "with the following entries: 1. Chromsome 2. Start position 3.", "not None else plt.gca() # Calculate midpoints of original and", "hugo_gene_symbol gene_name_mapping_file.close() return gene_map def plotGeneExpression(genes_bed, region_bed, expression_df_g1, expression_df_g2, gene_names_map,", "ax if ax is not None else plt.gca() region_bed =", "\"3\", \"4\"], size=6) elif(ploidy == 4): plt.ylim([0, 6.5]) plt.yticks([0, 2,", "== 0 and not (i == 0)): left_merged += [lefts[0]]", "colors is None): color = colors[g] else: color = standard_colors[g]", "j for j in [meth_before, meth_after] if not j ==", ":class:`pybedtools.BedTool` object containing regions of the TF sited to be", "None): max_y_pos = y_max # Plot Exons for i in", "range(len(binned_average_meth)): rect = Rectangle((start+cbin*bin_size, 0), bin_size, 1, color=m.to_rgba(binned_average_meth[cbin])) ax.add_patch(rect) plt.xlim([start,", "the same as the number of groups. :type ids: list,", "to be considered as a CNV, defaults to 0.7. :type", "region_right_border current_extension = right_border-left_border if(current_extension == 0.): continue if(extension is", "int(i[1]) end = int(i[2]) gene_name = str(i[3]) if(not blacklist is", "optional. :param ax: Axis where the plot is drawn, defaults", "None patch_description_list = None tick_positions = [] gene_names_clean = []", "\"Mb\"): digits_to_round = int(6-np.log10(tick_size)) divisor = 1000000 else: digits_to_round =", "def plotGeneExpression(genes_bed, region_bed, expression_df_g1, expression_df_g2, gene_names_map, blacklist=None, ax=None, plot_legend=False, color_g1=\"#fb8072\",", "defaults to \"#80b1d3\". :type color_plus: str, optional :param color_minus: Color", "strand, start, end, color). The color field is used to", "[] gene_regions = [] for e in genes_in_region_bed: gene_name_ens =", "be plotted. :type motifs_bed: :class:`pybedtools.BedTool` :param start: Start position of", "0): equalized_region_size=(end-start)/n_segments equalized_region_mid_points = [] for i in range(1, n_segments+1):", "\"k\", \"linewidth\": .3} capprops={\"color\": \"k\", \"linewidth\": .3} patch_list = []", "\"-\"): arrow_start = motif_end arrow_end = motif_start color = color_minus", "containing the IDs of the different groups. :type groups: list", "list-like elements with the following entries: 1. Chromosome 2. Start", "None) Axis used for plotting, defaults to None. :type ax:", "int(segment[1]) segment_end = int(segment[2]) color = tuple([ float(i)/256. for i", "[\"#66c2a5\", \"#fc8d62\", \"#8da0cb\", \"#ec87c2\", \"#a6d854\", \"#ffd92f\", \"#e5c494\", \"#bbbbbb\"] ax =", "Direction of the genomic part that is translocated. Either of", "for i in exp_values_g2])], positions=[bplot_g2_pos], widths=extension/2., patch_artist = True, boxprops=boxprops,", "str(i[5]) color = color_forward if(strand == \"-\"): color = color_reverse", "genes are plotted, default is None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional", "from ploidy to plot, defaults to None. :type max_dev: float,", ":param start: Start position on chromosome. :type start: int :param", "chip_signals: iterator :param r_chrom: Chromosome of region to be plotted.", "vertices = [(link_pos1, 0), (mid_point, distance), (link_pos2, 0)] codes =", "regions: Iterator containig list-like elements with the following entries: 1.", "[1, 1], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [2, 2], color=color_threshold,", "position of the genomic region. :type end: int :param color:", "used for plotting HiC intensities, defaults to \"Greys\". :type cmap:", "center positions of genes. :type gene_mid_points: list :param region: List", "plotting (R,G,B). :type segments_Tabix_filename: str :param chrom: Chromosome of the", "intensity_value <= vmax else 1.) facecolor = colormap(intensity_value) patch =", ":param cnv_threshold: Minimal deviation from ploidy to be considered as", "color = color_neutral if(ploidy_dev >= cnv_threshold): color=color_gain elif(ploidy_dev <= -1.*cnv_threshold):", "digits_to_round = int(5-np.log10(tick_size)) divisor = 100000 tick_labels = [ str(round(i/float(divisor),", "else 1. for i in exp_values]) bplot = ax.boxplot(expression_values, positions=[bplot_pos],", "that contains the intensity values of HiC contacts. :type contact_map:", "for plotting genomix segments in different colors :param segments_tabix_filename: Path", "float(m[4]) for m in m in meth_calls], color=color, marker=\".\", linestyle='None',", ":return: :class:`pybedtools.BedTool` object containing CNVs from ACESeq :rtype: :class:`pybedtools.BedTool` '''", "if location == \"top\" else range(contact_map_index1, contact_map_index2-(contact_map_index2-i))) for j in", "\"right\" (downstream), defaults to \"left\". :type direction: str, optional :param", "extension of barplot extension=None if(len(gene_mid_points) <= 1): extension=region[2]-region[1] else: extension=gene_mid_points[1]-gene_mid_points[0]", "+= [bplot[\"boxes\"][0]] patch_description_list += [g_id] counter += 1 ax.set_xlim(region_left_border, region_right_border)", "end, bin_size=1000, ax = None): '''Function for plotting methylation values", "max_y_pos-y_pos_dict[gene_name]+.8 plt.text(start, y, gene_name_label, size=5, color = color) plt.xlim([region_border_up, region_border_down])", "on the region that is plotted), defaults to 1000. :type", "Dictionary containing the gene id mapping. :rtype: dictionary ''' gene_name_mapping_file", "be plotted. :type r_chrom: str :param r_start: Start position of", "legend_loc=\"lower right\", color_plus=\"#80b1d3\", color_minus=\"#fb8072\"): \"\"\"Function for plotting gene structures, i.e.", "are plotted side by side. If this ratio is underwent,", "color_g2: str, optional :param g1_id: ID of g1 used for", "for region in regions: if(not edgecolor): current_color = color rect", "on the plot, default is None :type blacklist: list, optional", "stranded genes, default is \"#80b1d3\". :type color_plus: str, optional. :param", "be plotted. :type genes_bed: :class:`pybedtools.BedTool` :param region_size: Size of region", "''' # Use given axis for plotting ax = ax", "0.1. :type distance_ratio: float, optional :param ax: Axes instance on", "If this ratio is underwent, the genes will be stacked.", "= region_border_down-region_border_up color_forward = color_plus color_reverse = color_minus max_y_pos =", "str, optional :param vmin: Minimal value of intensity range to", "0.7. :type cnv_threshold: float, optional :param color_gain: Plot color of", "to \"Greys\". :type cmap: str, optional :param vmin: Minimal value", "ticks to lower direction, defaults to \"up\". :type loc_coordinates: str,", "arrow head as proportion of the arrow, defaults to 0.2", "color=color, edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect) # Plot thresholds color_threshold=(189./255., 189./255.,", ":type chrom_r: str :param start_r: Start position of the region", "pybedtools.BedTool object :param input_filename: Full path to ACESeq \"most_important\" file", "str, optional :param color_minus: Color of plus stranded TF regions,", "legend, False otherwise, defaults to False. :type plot_legend: bool, optional", "ax is not None else plt.gca() # Get gene names", "start = int(signal[1]) end = int(signal[2]) value = float(signal[3]) if(value", "def plotRegions(regions, start, end, color=\"#cbebc4\", edgecolor=False, alpha=1, ax = None):", "genes_sorted_bed = [genes_bed[i] for i in sort_indices] y_pos_dict = {}", "ax if ax is not None else plt.gca() contact_map_index1 =", "plotting via function plotGenes. :param genes_bed: :class:`pybedtools.BedTool` object containing genes", "else: g_id = \"group \"+str(g) if(not g_id in patch_description_list): patch_list", "positions of genes. :type gene_mid_points: list :param region: List containing", ":param plot_gene_ids: If True, all gene ids will be included", "plot contact map, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional", "entries: 1. Chromsome 2. Start position 3. end position 4.", "the patches \\ drawn on the ax. :rtype: list \"\"\"", "height_merged = [] if(not merge is None): heights = []", "pybedtools.BedTool(\"\\n\".join([\"\\t\".join(e) for e in cnv_bed_list]), from_string=True) def plotChIPSignals(chip_signals, r_chrom, r_start,", "plotted. :type end_r: int :param TX_pos: Position of the translocation.", "returned. :rtype: None ''' ax = ax if ax is", "dx, 0, head_width=head_width, head_length=head_length, overhang=overhang, head_starts_at_zero=False, edgecolor=\"none\", facecolor=color, length_includes_head=True) plt.xlim([start,", "= list(exp_values) else: exp_values = list(exp_values.iloc[0, :]) expression_values = exp_values", "1): extension=region[2]-region[1] else: extension=gene_mid_points[1]-gene_mid_points[0] # Subtract a small percentage of", "= gene_regions[counter][0] right_border = region_right_border if(not blacklist is None and", "in genes_bed])] genes_sorted_bed = [genes_bed[i] for i in sort_indices] y_pos_dict", ":return: Nothing to be returned :rtype: None ''' # Use", "defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Nothing to", "Iterator, that contains bed-like structured lists with the following elements:", "placed. :rtype: :class:`matplotlib.axes._subplots.AxesSubplot` ''' ax = ax if ax is", "Start region1 3. End region1 4. Chromosome region2 5. Start", "edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect) plt.xlim([int(start), int(end)]) plt.ylim([.5, 1.5]) plt.xticks([], [])", "color = colors[g] else: color = standard_colors[g] bplot[\"boxes\"][0].set_facecolor(color) if(plot_points): x_positions", "if (float(i[0])+float(i[1])) > 0 else \"NA\" for i in binned_meth_calls", ":type end: int :param segment_size: Size of the segments for", "on which to plot, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`,", "be returned :rtype: None ''' # Use given axis for", "not max_y_pos in y_level_dict): y_pos_dict[gene_name] = i y_level_dict[i] = [[gene_start,", "gene_names_map: Dictionary with keys: ENSEMBL GENE IDs, and values: HUGO", "element in methylation_bed: # Determine bin position = int(element[1]) if(position", "\"linewidth\": .3} patch_list = None patch_description_list = None tick_positions =", "split_line = line.rstrip().split(\"\\t\") ensembl_gene_id = split_line[0].split(\".\")[0] hugo_gene_symbol = split_line[1].split(\".\")[0] gene_map[ensembl_gene_id]", "region scale equalizing the position of genes. :param genes_bed: :class:`pybedtools.BedTool`", "start + offset left += [start] height += [value] left_merged", "the boxplots boxes, defaults to 0.5. :type alpha: float, optional", "False otherwise. Default is False. :type plot_legend: bool, optional :param", ":param contact_map: Matrix that contains the intensity values of HiC", "not be shown on the plot, default is None :type", "\"down\", plot ticks to lower direction, defaults to \"up\". :type", "m in meth_calls], color=color, marker=\".\", linestyle='None', markersize=1, alpha=.5) elif(n_entries ==", "direction: Direction of distance equalization (top_down | bottom_up), defaults to", "right_border-left_border if(current_extension == 0.): continue if(extension is None): extension =", "ax: Axis to be used for plotting, defaults to None.", "elif(gene_start > y_level_dict[i][-1][1] and float(gene_start-y_level_dict[i][-1][0])/float(region_size) > distance_ratio): y_pos_dict[gene_name] = i", "= {} for line in gene_name_mapping_file: split_line = line.rstrip().split(\"\\t\") ensembl_gene_id", "False, no edge is plotted, defaults to False. :type edge_color:", "\"-\"): color = color_reverse border_distance_down = region_border_down-start if(start < region_border_up):", "object containing the region to be plotted :type region_bed: :class:`pybedtools.BedTool`", "the max number of stacked genes, default is None. :type", "bool, optional :param position_gene_names: Either of \"top\", or \"bottom\", defaults", ":param rotation: Rotational angle of coordinate strings, defaults to 0.", "in range(int(((end-start)/bin_size)+1)) ] counter = 0 for element in methylation_bed:", "color=\"k\", ax = None): '''Function that plots links between genomic", "tumor and normal) on a gene region scale equalizing the", "the pyramid points downwards, defaults to top, :type location: str,", "plotting HiC intensities, defaults to \"Greys\". :type cmap: str, optional", "blacklist): counter += 1 continue n_groups = len(groups) for g", "max_y_pos+1.5, patch_list, patch_description_list def determineYPosGene(genes_bed, region_size, distance_ratio): '''Function that determines", ":class:`pybedtools.BedTool` :param expression_df_g1: :class:`pandas.Dataframe` containing the expression values of g1", "2, 4, 6], [\"0\", \"2\", \"4\", \"6\"], size=6) plt.xticks(rotation=45) def", "ticker.FixedFormatter(([ \" \" for i in gene_names_clean]))) for tick in", "the max_y_position + 1.5. max_y_pos defines the \\ number of", ":type gene_names_map: dict. :param expression_df: class:`pandas.DataFrame` object containing the expression", "0 left = [] height = [] for signal in", "midpoint[1]-segment_size/2.), (midpoint[0]+segment_size/2., midpoint[1]), (midpoint[0], midpoint[1]+segment_size/2.), (midpoint[0]-segment_size/2., midpoint[1]) ] codes =", "Axis where the plot is drawn, defaults to None. :type", "plotting, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param plot_legend:", ":param offset: Length of intervals, defaults to None. :type offset:", "ploidy_dev = float(interval[3]) tcn = float(interval[4]) if(tcn < -1.*max_dev): tcn", "extension=region[2]-region[1] else: extension=gene_mid_points[1]-gene_mid_points[0] # Subtract a small percentage of region", "heights = [] lefts = [] for i in range(len(left)):", "None else plt.gca() for motif in motifs_bed: motif_start = int(motif[1])", "gene_name_mapping_file: str :return: Dictionary containing the gene id mapping. :rtype:", "CNV segments as heatmap :param cnvs_bed: :class:`pybedtools.BedTool` object containing CNVs", "for i in introns_in_region: start = int(i[1]) end = int(i[2])", "TX_start = start_r TX_end = TX_pos rect = Rectangle((TX_start, .4),", "for legend plotting, defaults to \"tumor\". :type g1_id: str, optional", ":type genes_bed: :class:`pybedtools.BedTool` :param region_size: Size of region to be", "= motif_start color = color_minus dx = -1.*head_length plt.arrow(arrow_start, .5,", "| \"down\". If \"up\", plot ticks to upper direction, else", "otherwise. :type log_transformed: bool, optional :param plot_points: If True, a", ":param expression_df_g2: :class:`pandas.Dataframe` containing the expression values of g2 samples", "Methylated Cs, Unmethylated Cs. :type methylation_bed: :class:`pybedtools.BedTool` :param chrom: Chromosome", "left = [] height = [] for signal in chip_signals:", "End region2 :type links_bed: iterator :param chrom_r: Chromosome of the", "patch_list, patch_description_list, where 1. max_y_pos+1.5 is the max_y_position + 1.5.", "= colors((ploidy_dev+max_dev)/(2*max_dev)) if(abs(ploidy_dev) < cnv_threshold): color=colors(.5) rect = Rectangle((current_start, .5),", "if(i == 0 and not max_y_pos in y_level_dict): y_pos_dict[gene_name] =", ".2), (equalized_region_mid_point, .8), (equalized_region_mid_point, 1)] path = Path(vertices, codes) path_patch", "midpoints of original and distance equalized segments n_segments = len(genomic_segments)", "for i in cnvs_bed]) for interval in cnvs_bed: current_start =", "plt.xticks([], []) plt.yticks([], []) def plotMethylationProfile(meth_calls, chrom, start, end, color=\"k\",", "\"#80b1d3\". :type color_g2: str, optional :param g1_id: ID of g1", "gene. :rtype: tuple ''' sort_indices = [int(idx) for idx in", "chromosome, start, end, ploidy=2, cnv_threshold=0.7, color_gain=\"g\", color_loss=\"r\", color_neutral=\"k\", ax=None): '''Function", "= int(interval[2]) ploidy_dev = float(interval[3]) tcn = float(interval[4]) # Smooth", "a small percentage of region size from extension extension=extension-(region[2]-region[1])*.01 boxprops", "= gene_regions[i][0] right_border = None if(i < len(gene_names)-1): right_border =", "\"1\", \"2\", \"3\", \"4\"], size=6) elif(ploidy == 4): plt.ylim([0, 6.5])", "genes_bed exons_in_region = exons_bed introns_in_region = introns_bed region_border_up = int(region_bed[0][1])", ":type overhang: float, optional :param color_plus: Color of plus stranded", "regions as simple rectangles. :param regions: Iterator containig list-like elements", "in binned_meth_calls ] binned_average_meth_no_missing = [] n = len(binned_average_meth) for", "boxes, defaults to 0.5. :type alpha: float, optional :return: Plots", "the colormap to be used for plotting HiC intensities, defaults", "for plotting CNV segments as heatmap :param cnvs_bed: :class:`pybedtools.BedTool` object", "used for plotting samples expression. The number of colors must", "= line.rstrip().split(\"\\t\") ensembl_gene_id = split_line[0].split(\".\")[0] hugo_gene_symbol = split_line[1].split(\".\")[0] gene_map[ensembl_gene_id] =", "None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: List of equalized region", "tick_positions += [left_border + extension/2.] gene_names_clean += [gene_name] exp_values_g1 =", ":param end: End position of region to be plotted. :type", "genes_bed: :class:`pybedtools.BedTool` :param region_size: Size of region to be plotted", ":param genomic_segments: List of segments for which distances shall be", "(equalized_region_mid_point, .2), (equalized_region_mid_point, 0)] else: codes = [Path.MOVETO, Path.LINETO, Path.LINETO,", "if ax is not None else plt.gca() max_dist = 0", "optional :return: Plots axis. :rtype: :class:`matplotlib.axes._subplots.AxesSubplot` ''' standard_colors = [\"#66c2a5\",", "patch_description_list, where 1. max_y_pos+1.5 is the max_y_position + 1.5. max_y_pos", "[0, 0], linestyle=\"-\", color=color, linewidth=1) else: plt.plot([start, end], [0.3, 0.3],", "of ax width, such that two genes are plotted side", "189./255., 189./255., 0.5) if(ploidy == 2): plt.plot([int(start), int(end)], [1, 1],", ".4), TX_end-TX_start, .2, color=color, capstyle='butt', linewidth=0) ax.add_patch(rect) plt.xlim([start_r, end_r]) plt.ylim([0.3,", "patch_description_list += [\"reverse strand\"] met_reverse = True # Plot Gene", "region_bed: :class:`pybedtools.BedTool` object containing the region to be plotted :type", "plus stranded genes, default is \"#80b1d3\". :type color_plus: str, optional.", "elif(ploidy == 4): plt.ylim([0, 6.5]) plt.yticks([0, 2, 4, 6], [\"0\",", "blacklist): continue left_border = gene_regions[i][0] right_border = None if(i <", "int :param color: Color of the rectangles representing the regions", "value is not equal to 0, than merge elements will", "this ratio is underwent, the genes will be stacked. :type", "current_bin = int((position-start)/bin_size) counter += 1 binned_meth_calls[current_bin][0] += n_meth binned_meth_calls[current_bin][1]", "Name of the colormap to be used for plotting HiC", "facecolor = colormap(intensity_value) patch = matplotlib.patches.PathPatch(path, facecolor=facecolor, edgecolor='none') ax.add_patch(patch) ax.set_xlim(start,", "np.log2([i if i >= 1. else 1. for i in", "the part of the genome that is translocated. :param chrom_r:", "None tick_positions = [] gene_names_clean = [] counter=0 patch_saved =", "head_width: float, optional :param head_length: Length of the arrow in", "between two genes, as ratio of ax width, such that", "if(plot_gene_ids): for i in genes_in_region: start = int(i[1]) gene_name =", "extension extension=extension-(region[2]-region[1])*.01 boxprops = {\"color\": \"k\", \"linewidth\": .3, \"alpha\":alpha} flierprops", "whiskerprops=whiskerprops, capprops=capprops, showfliers=False) bplot_g2 = ax.boxplot([np.log2([i if i >= 1.", "= int(region_bed[0][1]) # Determine minimal extension of barplot extension=None for", "gene_names_map: dict. :param blacklist: Set containing gene ids not to", "= color_neutral if(ploidy_dev >= cnv_threshold): color=color_gain elif(ploidy_dev <= -1.*cnv_threshold): color", "in range(len(region_mid_points)): region_mid_point = region_mid_points[i] equalized_region_mid_point = equalized_region_mid_points[i] codes =", "range(1, n_segments+1): equalized_region_mid_points += [((start+ i*equalized_region_size)- equalized_region_size/2)] region_mid_points = []", "gene_mid_points[counter]-extension/2 right_border = gene_mid_points[counter]+extension/2 if(not blacklist is None and gene_name", "plotted. :type end: int :param color: Color of the rectangles", "else 0. for m in meth_calls], color=color, marker=\".\", linestyle='None', markersize=1,", "groups, gene_names_map=None, blacklist=None, ax=None, plot_legend=False, colors=None, ids=None, plot_gene_names=True, position_gene_names=\"bottom\", log_transformed=True,", "g_id = None if(not ids is None): g_id = ids[g]", "if(not i % merge == 0): left_merged += [lefts[0]] lefts", "alpha: float, optional :return: Plots axis. :rtype: :class:`matplotlib.axes._subplots.AxesSubplot` ''' standard_colors", "index: gene ids) :type expression_df_g2: :class:`pandas.DataFrame` :param gene_names_map: Dictionary with", "i in str(segment[-1]).split(\",\") ]+[1]) segment_type = str(segment[3]) if(segment_type == \"R\"):", "if not i == len(binned_average_meth)-1 else \"NA\") average_list = [", "= {\"color\": \"k\", \"linewidth\": .3} whiskerprops = {\"color\": \"k\", \"linewidth\":", "int :param distance_ratio: Minimal distance between two genes, as ratio", "= names of segments, and values patch :rtype: dict '''", "\"None\", edgecolor = color, lw = lw) ax.add_patch(patch) #ax.spines[\"bottom\"].set_visible(False) ax.spines[\"top\"].set_visible(False)", "coordinates are reverted to decreasing order. Else, coordinates stay in", "y_level_dict = {} max_y_pos = 0 for interval in genes_sorted_bed:", "(R,G,B). :type segments_Tabix_filename: str :param chrom: Chromosome of the region", "Dictionary with keys = gene ids and values = y", "such that two genes are plotted side by side. If", "[Path.MOVETO, Path.CURVE3, Path.CURVE3] path = Path(vertices, codes) patch = PathPatch(path,", "legend_loc: Location of the legend. Either of \"lower left\", \"lower", ":type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param color: color of bars, defaults", ":param ax: Axis on which to plot contact map, defaults", "else \"NA\") average_list = [ j for j in [meth_before,", "(binned_average_meth[i+1] if not i == len(binned_average_meth)-1 else \"NA\") average_list =", "pybedtools.BedTool(\"\\t\".join([str(i) for i in region]), from_string=True) # Get gene names", "object containing CNVs with following entries: 1. Chromosome, 2. Start", "start_r TX_end = TX_pos rect = Rectangle((TX_start, .4), TX_end-TX_start, .2,", ".8), (equalized_region_mid_point, 1)] path = Path(vertices, codes) path_patch = PathPatch(path,", "in gene_name_mapping_file: split_line = line.rstrip().split(\"\\t\") ensembl_gene_id = split_line[0].split(\".\")[0] hugo_gene_symbol =", "i in range(n): if(not binned_average_meth[i] == \"NA\"): binned_average_meth_no_missing += [binned_average_meth[i]]", "Either of \"top\" | \"bottom\". If location == \"top\", the", "distance_ratio: float :return: Tuple of 1. max_y_pos: Defines the number", "default is 0.1. :type distance_ratio: float, optional :param ax: Axes", "in ax.get_yticklabels(): ytick.set_size(5) if(plot_legend): ax.legend(patch_list, patch_description_list, fontsize=5, loc='lower left') return", "[bplot[\"boxes\"][0]] patch_description_list += [g_id] counter += 1 ax.set_xlim(region_left_border, region_right_border) if(position_gene_names", "int :param end_r: End position of the region to be", "ax.boxplot([np.log2([i if i >= 1. else 1. for i in", "region to be plotted. :type start_r: int :param end_r: End", "end_r if(direction == \"left\"): TX_start = start_r TX_end = TX_pos", "if(type(exp_values_g2).__name__ == \"Series\"): exp_values_g2 = list(exp_values_g2) else: exp_values_g2 = list(exp_values_g2.iloc[0,", "stranded TF regions, defaults to \"#fb8072\". :type color_minus: str, optional", "linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [4, 4], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start),", "= None): '''Function that plots arcs from unequal distances of", "ploidy = None for line in input_file: if(line[:7] == \"#ploidy\"):", "horizontalalignment=\"center\", fontsize=5, color=color, verticalalignment=\"top\", rotation=rotation) plt.xlim([start, end]) plt.yticks([], []) if(loc_coordinates", "be plotted. :type end: int :param color: Color of the", "of tumor, defaults to 2. :type ploidy: int, optional :param", "= {\"color\": \"k\", \"linewidth\": .3, \"alpha\":alpha} flierprops = {\"color\": \"k\"}", "float, optional. :param ax: Axis of plot, defaults to None.", "= split_line[1].split(\".\")[0] gene_map[ensembl_gene_id] = hugo_gene_symbol gene_name_mapping_file.close() return gene_map def plotGeneExpression(genes_bed,", "i in range(int(((end-start)/bin_size)+1)) ] counter = 0 for element in", "color, lw = lw) ax.add_patch(patch) #ax.spines[\"bottom\"].set_visible(False) ax.spines[\"top\"].set_visible(False) ax.spines[\"left\"].set_visible(False) ax.spines[\"right\"].set_visible(False) plt.xticks([],", "not i == 0 else \"NA\") meth_after = (binned_average_meth[i+1] if", "rectangle, representing the region to be plotted, defaults to 1.", "if(strand == \"-\"): color = color_reverse border_distance_down = region_border_down-start if(start", "linewidth=.5) plt.plot([int(start), int(end)], [4, 4], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)],", "[int(e[1])+(end-int(e[1]))/2] else: region_mid_points += [int(e[1])+(int(e[2])-int(e[1]))/2] for i in range(len(region_mid_points)): region_mid_point", "to None. :type vmax: float, optional :param location: Either of", "if(not upper): tick_size = 10**int((np.log10((end-start)/10))) # Determine first tick position", "else: exp_values = list(exp_values.iloc[0, :]) expression_values = exp_values if(log_transformed): expression_values", "chrom: str :param start: Start position of the region to", "cm import matplotlib import tabix import math def plotGenes(genes_bed, exons_bed,", "which each element is a list-ike object containing: 1. Chromosome", "extension = float(current_extension) elif(current_extension < extension): extension = float(current_extension) boxprops", "wa=True, u=True).sort() gene_names = [] gene_regions = [] for e", "number gains, defaults to \"g\". :type color_gain: str, optional :param", "list-like elements with the following entries: 1. Chromsome 2. Start", "\"top_down\". :type direction: str, optional. :param ax: Axis on which", "range(len(gene_regions)): if(not blacklist is None and gene_names[i] in blacklist): continue", "with keys = gene ids and values = y position", "end-start, .4, color=color, capstyle='butt', linewidth=0) ax.add_patch(rect) patch_list = [] patch_description_list", "binned_meth_calls[current_bin][1] += n_unmeth binned_average_meth = [ float(i[0])/(float(i[0])+float(i[1])) if (float(i[0])+float(i[1])) >", "(default: None) Axis used for plotting, defaults to None. :type", "plotting, defaults to None. Number of ids must be the", "Value :type meth_calles: iterator :param chrom: Chromosome of region to", "values of g2 samples (columns: sample ids; index: gene ids)", ">= 1. else 1. for i in exp_values]) bplot =", ":param introns_bed: :class:`pybedtools.BedTool` object containing introns :type introns_bed: :class:`pybedtools.BedTool` :param", "of genes. :type exons_bed: :class:`pybedtools.BedTool` :param introns_bed: :class:`pybedtools.BedTool` object containing", "is a ensemble gene id, and the second column is", "[lefts[0]] lefts = [] height_merged += [np.mean(heights)] heights = []", "y_level_dict[i][-1][1] and float(gene_start-y_level_dict[i][-1][0])/float(region_size) > distance_ratio): y_pos_dict[gene_name] = i y_level_dict[i] +=", "value for the background color of the boxplots boxes, defaults", "[] height_merged += [np.mean(heights)] heights = [] offset = merge*offset", "m in meth_calls], color=color, marker=\".\", linestyle='None', markersize=1, alpha=.5) plt.ylim([0, 1])", "import Path from matplotlib.patches import PathPatch import matplotlib.cm as cm", "ax.add_patch(rect) plt.xlim([int(start), int(end)]) plt.ylim([.5, 1.5]) plt.xticks([], []) plt.yticks([], []) def", "region1 3. End region1 4. Chromosome region2 5. Start region2", "as bar :type chip_signals: iterator :param r_chrom: Chromosome of region", "gene region scale equalizing the position of genes. :param genes_bed:", "the number of groups. :type ids: list, optional. :param plot_gene_names:", "i in gene_names_clean]))) for tick in ax.get_xticklabels(): tick.set_rotation(45) tick.set_size(5) for", "the one region, for which the gene plot is created.", "None): extension = float(current_extension) elif(current_extension < extension): extension = float(current_extension)", "[ [chrom, split_line[1], split_line[2], str(ploidy_dev), split_line[5], \"+\"] ] input_file.close() return", "defaults to \"tumor\". :type g1_id: str, optional :param g2_id: ID", "lefts = [] for i in range(len(left)): if(i % merge", "max_y_pos = y_max # Plot Exons for i in exons_in_region:", "print(tick_labels) for i in range(len(ticks)): if(loc_coordinates == \"up\"): plt.plot([ticks[i], ticks[i]],", "same as the number of groups, defaults to None. :type", "used for plotting, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional", "be equalized (each segment is of the form [<chrom>, <start>,", "else: rect = Rectangle((current_start, tcn-.1), current_end-current_start, .2, color=color, edgecolor='none', capstyle='butt',", "exp_values_g2 = expression_df_g2.loc[gene_name, :] if(type(exp_values_g2).__name__ == \"Series\"): exp_values_g2 = list(exp_values_g2)", "used for legend plotting, defaults to \"tumor\". :type g1_id: str,", "plt.yticks([], []) return patches_dict def plotCNVs(cnvs_bed, chromosome, start, end, ploidy=2,", "if i >= 1. else 1. for i in exp_values_g1])],", "for gene plotting strand = str(i[5]) color = color_forward if(strand", "that should not be shown on the plot, default is", "split_line[2], str(ploidy_dev), split_line[5], \"+\"] ] input_file.close() return pybedtools.BedTool(\"\\n\".join([\"\\t\".join(e) for e", "object containing the one region, for which the gene plot", "g2_id: ID of g2 used for legend plotting, defaults to", "be plotted. :type start: int :param end: End position of", "== \"top_down\"): codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO] vertices =", ":param r_chrom: Chromosome of region to be plotted. :type r_chrom:", "# Get gene names and regions genes_in_region_bed = genes_bed.intersect(region_bed, wa=True,", "\"upper left\", \"upper right\", default is \"lower right\". :type legend_loc:", "in bp (depends on the region that is plotted), defaults", "max_dev: Maximal deviation from ploidy to plot, defaults to None.", "to be used for plotting HiC intensities, defaults to \"Greys\".", "tick in ax.get_xticklabels(): tick.set_rotation(45) tick.set_size(5) for ytick in ax.get_yticklabels(): ytick.set_size(5)", "regions, defaults to \"k\". :type color_neutral: str, optional :param ax:", "for legend plotting, defaults to None. Number of ids must", "are plotted otherwise, defaults to False. :type plot_points: bool, optional", ":param ax: Axis used for plotting. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional", "equalized_region_mid_point = equalized_region_mid_points[i] codes = [] vertices = [] if(direction", "instance on which the genes are plotted, default is None.", "height += [value] left_merged = [] height_merged = [] if(not", "the pyramid points upwards, else if location == \"bottom\" the", "None if(scale == \"Mb\"): digits_to_round = int(6-np.log10(tick_size)) divisor = 1000000", "plotHiCContactMap(contact_map, start, end, segment_size, cmap=\"Greys\", vmin=None, vmax=None, location=\"top\", ax=None): '''Function", "alpha=.5) elif(n_entries == 4): plt.plot([ (float(m[1])+float(m[2]))/2. for m in meth_calls", "tick_positions += [left_border + extension/2.] gene_names_clean += [gene_name] exp_values =", ":type color_g1: str, optional :param color_g2: Color used for plotting", "plotted, defaults to 1. :type alpha: float, optional. :param ax:", "bin to average methylation values, defaults to 1000. :type bin_size:", "< cnv_threshold): color=colors(.5) rect = Rectangle((current_start, .5), current_end-current_start, 1, color=color,", "of gene names, for genes that should not be shown", "linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [2, 2], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start),", ":type end: int :param color: Color of lines equalizing distances,", "] binned_average_meth_no_missing += [ (float(sum(average_list))/ float(len(average_list))) if len(average_list) > 0", "= {\"color\": \"k\", \"linewidth\": .3} flierprops = {\"color\": \"k\"} medianprops", "of distance equalization (top_down | bottom_up), defaults to \"top_down\". :type", "genes_bed: :class:`pybedtools.BedTool` object containing gene regions. :type genes_bed: :class:`pybedtools.BedTool` :param", "for i in range(max_y_pos+1): if(i == 0 and not max_y_pos", "End position :type regions: iterator :param start: Start position of", "4. Chromosome region2 5. Start region2 6. End region2 :type", "be plotted, False otherwise, defaults to True. :type plot_gene_names: bool,", "meth_after] if not j == \"NA\" ] binned_average_meth_no_missing += [", "bool. :return: Axis on which plot was placed. :rtype: :class:`matplotlib.axes._subplots.AxesSubplot`", "= Path(vertices, codes) patch = PathPatch(path, facecolor = \"None\", edgecolor", "in range(len(ticks)): if(loc_coordinates == \"up\"): plt.plot([ticks[i], ticks[i]], [0., .3], linestyle=\"-\",", ":param region: List containing the region to be plotted ([<chrom>,", "[6, 6], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.xlim([int(start), int(end)]) if(ploidy == 2):", "0. :type overhang: float, optional :param color_plus: Color of plus", "int(element[4]) current_bin = int((position-start)/bin_size) counter += 1 binned_meth_calls[current_bin][0] += n_meth", "showfliers=False) bplot_g2 = ax.boxplot([np.log2([i if i >= 1. else 1.", "1]) plt.xticks([], []) plt.xlim([start, end]) def plotTX(chrom_r, start_r, end_r, TX_pos,", "i >= 1. else 1. for i in exp_values_g1])], positions=[bplot_g1_pos],", "'''Function that plots HiC contact maps as pyramid plots :param", "linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [4, 4], color=color_threshold, linestyle=\"--\", linewidth=.5) elif(ploidy", "equalized_region_mid_points def plotCoordinates(chrom, start, end, color=\"k\", ax = None, upper=True,", "for interval in cnvs_bed: current_start = int(interval[1]) current_end = int(interval[2])", "break elif(gene_start > y_level_dict[i][-1][1] and float(gene_start-y_level_dict[i][-1][0])/float(region_size) > distance_ratio): y_pos_dict[gene_name] =", "expression_df: class:`pandas.DataFrame` :param blacklist: Set containing gene ids not to", "def plotChIPSignals(chip_signals, r_chrom, r_start, r_end, ax=None, color=\"b\", offset=None, merge=None): '''Function", "the ax. 3. patch_description_list is the list of descriptions for", "= right_border-left_border if(current_extension == 0.): continue if(extension is None): extension", "defaults to \"#fb8072\". :type color_g1: str, optional :param color_g2: Color", ":type legend_loc: str, optional :param color_plus: Color code for plus", "be plotted. :type r_end: int :param ax: Axis of plot", "gene ids). :type expression_df: class:`pandas.DataFrame` :param blacklist: Set containing gene", "ax. :rtype: list \"\"\" ax = ax if ax is", "for plotting max_y_pos, y_pos_dict = determineYPosGene(genes_in_region, (region_border_down- region_border_up), distance_ratio) if(not", "for g in range(n_groups): bplot_pos = left_border + (2*g+1)*extension/float((n_groups*2.)) tick_positions", "bool, optional :param distance_ratio: Minimal distance between two genes, as", "g1_id=\"tumor\", g2_id=\"normal\", plot_gene_names=True): '''Function for plotting paired gene expression (e.g.", "names and regions genes_in_region_bed = genes_bed.intersect(region_bed, wa=True, u=True).sort() gene_names =", "== \"Series\"): exp_values = list(exp_values) else: exp_values = list(exp_values.iloc[0, :])", "Max y value in the gene plot. If not set,", "barplot extension=None if(len(gene_mid_points) <= 1): extension=region[2]-region[1] else: extension=gene_mid_points[1]-gene_mid_points[0] # Subtract", "will be stacked. :type distance_ratio: float :return: Tuple of 1.", "default is True :type plot_gene_ids: bool, optional :param y_max: Max", "is plotted, defaults to False. :type edge_color: str, optional :param", "color=color_threshold, linestyle=\"--\", linewidth=.5) elif(ploidy == 4): plt.plot([int(start), int(end)], [1, 1],", "list \"\"\" ax = ax if ax is not None", "back (0 overhang means triangular shape). Can be negative or", "pandas as pnd import numpy as np import tabix import", "of groups, defaults to None. :type colors: str, optional :param", "which to plot CNVs. :type chromosome: str :param start: Start", "== 2): plt.ylim([0, 4.5]) plt.yticks([0, 1, 2, 3, 4], [\"0\",", "else: continue return max_y_pos, y_pos_dict def createGeneNameMap(gene_name_mapping_filename): '''Function that creates", "+= [left_border + extension/2.] gene_names_clean += [gene_name] exp_values_g1 = expression_df_g1.loc[gene_name,", "exons_bed, introns_bed, region_bed, blacklist=None, gene_map=None, plot_gene_ids=True, y_max=None, distance_ratio=0.1, ax=None, plot_legend=False,", "otherwise, defaults to False. :type plot_points: bool, optional :param alpha:", "on chromosome. :type end: int :param ploidy: Assumed ploidy of", "== \"top\"): ax.set_ylim(0, (end-start)/2.) else: ax.set_ylim(-1.*(end-start)/2., 0) def distanceEqualizer(genomic_segments, start,", "= ax if ax is not None else plt.gca() TX_start", "the region that is plotted), defaults to 1000. :type head_length:", "alpha: Alpha value of the rectangle, representing the region to", "\"up\"): plt.plot([start, end], [0, 0], linestyle=\"-\", color=color, linewidth=1) else: plt.plot([start,", "plot, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: List", "def plotCoordinates(chrom, start, end, color=\"k\", ax = None, upper=True, loc_coordinates=\"up\",", "shape). Can be negative or greater than one. Defaults to", "| bottom_up), defaults to \"top_down\". :type direction: str, optional. :param", "gene region scale retaining the position of genes. :param genes_bed:", "in links_bed: link_pos1 = int(e[1])+(int(e[2])-int(e[1]))/2 link_pos2 = int(e[4])+(int(e[5])-int(e[4]))/2 distance =", "current_start = int(interval[1]) current_end = int(interval[2]) ploidy_dev = float(interval[3]) tcn", "end-start, .06, color=color, capstyle='butt', linewidth=0) ax.add_patch(patch) if(strand == \"+\" and", "border_distance_down = region_border_down-start if(not(float(border_distance_down)/float(region_size) < distance_ratio)): gene_name = str(i[3]) gene_name_label", "colors: List of colors used for plotting samples expression. The", "{\"color\": \"k\"} medianprops = {\"color\": \"k\", \"linewidth\": .3} whiskerprops =", "from ploidy, 5. True Copy Number) :type cnvs_bed: :class:`pybedtools.BedTool` :param", ":type location: str, optional :param ax: Axis on which to", "[Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO] vertices = [(region_mid_point, 1), (region_mid_point, .8),", "CNVs with following entries: 1. Chromosome, 2. Start Position, 3.", "offset: int, optional :param merge: Number of elements to be", "y value in the gene plot. If not set, then", "score, strand, start, end, color). The color field is used", "= equalized_region_mid_points[i] codes = [] vertices = [] if(direction ==", "= int((position-start)/bin_size) counter += 1 binned_meth_calls[current_bin][0] += n_meth binned_meth_calls[current_bin][1] +=", "start: int :param end: Chromosomal end position of region to", "Color of the rectangles representing the regions to be plotted,", "str, optional :param g2_id: ID of g2 used for legend", ":type direction: str, optional :param color: Color of the bar", "\"#cbebc4\". :type color: str, optional :param edge_color: Color of region", "and values = y position \\ of gene. :rtype: tuple", "of plot :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param color: color of", "\"NA\" ] binned_average_meth_no_missing += [ (float(sum(average_list))/ float(len(average_list))) if len(average_list) >", "= [] heights += [height[i]] lefts += [left[i]] if(not i", "None else plt.gca() contact_map_index1 = (start)/segment_size contact_map_index2 = ((end)/segment_size)+1 sliced_contact_map", "bar :type chip_signals: iterator :param r_chrom: Chromosome of region to", "else 1. for i in exp_values_g1])], positions=[bplot_g1_pos], widths=extension/2., patch_artist=True, boxprops=boxprops,", "if(counter < len(gene_names)-1): right_border = gene_regions[counter+1][0] bplot_g1_pos = left_border +", "if(revert_coordinates): ticks = [ start + end-i for i in", "ax.spines[\"top\"].set_visible(False) ax.spines[\"left\"].set_visible(False) ax.spines[\"right\"].set_visible(False) plt.xticks([], []) plt.yticks([], []) plt.xlim([start_r, end_r]) plt.ylim([0,", "methylation values as heatmap :param methylation_bed: Methylation calls. Following fields", "the background color of the boxplots boxes, defaults to 0.5.", "int :param end: End position of region to be plotted.", "= [] for signal in chip_signals: start = int(signal[1]) end", "] path = Path(vertices, codes) intensity_value = contact_map.iloc[i, j] intensity_value", "if(abs(ploidy_dev) > cnv_threshold): rect = Rectangle((current_start, tcn-.2), current_end-current_start, .4, color=color,", "Minimal value of intensity range to be plotted, defaults to", "a tab separated file, for which the first column is", "expression, defaults to \"#fb8072\". :type color_g1: str, optional :param color_g2:", "plotted :type region_bed: :class:`pybedtools.BedTool` :param expression_df_g1: :class:`pandas.Dataframe` containing the expression", "if(abs(ploidy_dev) < cnv_threshold): tcn = ploidy color = color_neutral if(ploidy_dev", "If True, a point per expression value is plotted in", "n_unmeth binned_average_meth = [ float(i[0])/(float(i[0])+float(i[1])) if (float(i[0])+float(i[1])) > 0 else", "optional. :param ax: Axis of plot, defaults to None. :type", "used for plotting. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Nothing to", "r_end) def plotMethylationProfileHeat(methylation_bed, chrom, start, end, bin_size=1000, ax = None):", "= None): '''Function for plotting genomix segments in different colors", "chromosome: str :param start: Start position on chromosome. :type start:", ":rtype: dictionary ''' gene_name_mapping_file = open(gene_name_mapping_filename, \"r\") gene_map = {}", "\"alpha\":alpha} flierprops = {\"color\": \"k\"} medianprops = {\"color\": \"k\", \"linewidth\":", "blacklist): continue # Define color for gene plotting strand =", "< len(gene_names)-1): right_border = gene_regions[i+1][0] else: right_border = region_right_border current_extension", "for motif in motifs_bed: motif_start = int(motif[1]) motif_end = int(motif[2])", "chrom, start, end, color=\"k\", ax=None): '''Function that plots methylation values", "start_r: Chromosomal start position of the region to be plotted.", "value of intensity range to be plotted, defaults to None", "None else plt.gca() # Calculate midpoints of original and distance", "True, all gene ids will be included in the plot,", "createGeneNameMap(gene_name_mapping_filename): '''Function that creates a mapping between gene ids :param", "(float(m[1])+float(m[2]))/2. for m in meth_calls ], [ float(m[4]) for m", "of lines equalizing distances, defaults to \"k\". :type color: str,", "gene ids not to be plotted, default to None. :type", ":param ploidy: Assumed ploidy of tumor, defaults to 2. :type", "str :param start_r: Start position of the region to be", "[ float(i[0])/(float(i[0])+float(i[1])) if (float(i[0])+float(i[1])) > 0 else \"NA\" for i", "for plotting samples expression. The number of colors must be", "capprops={\"color\": \"k\", \"linewidth\": .3} patch_list = [] patch_description_list = []", "left') return ax def plotGenomicSegments(segments_list, chrom, start, end, ax =", "the TF sited to be plotted. :type motifs_bed: :class:`pybedtools.BedTool` :param", "chrom_r: str :param start_r: Start position of the region to", "int :param segment_size: Size of the segments for which contacts", "<= end): ticks += [current_tick] current_tick = current_tick + tick_size", "None. :type max_dev: float, optional :param ax: Axis used for", "points are plotted otherwise, defaults to False. :type plot_points: bool,", "1]) def plotMotifDirections(motifs_bed, start, end, head_width=0.2, head_length=1000, overhang=0, color_plus=\"#80b1d3\", color_minus=\"#fb8072\",", "\"#ploidy\"): ploidy = float(line.rstrip().split(\":\")[1]) print(ploidy) if(line[0] == \"#\" or line[:5]", "chrom_r: str :param start_r: Chromosomal start position of the region", ":param methylation_bed: Methylation calls. Following fields must be included: Chrom,", "equalized_region_size = (end-start) if(n_segments > 0): equalized_region_size=(end-start)/n_segments equalized_region_mid_points = []", "genes. :type genes_bed: :class:`pybedtools.BedTool` :param region_bed: :class:`pybedtools.BedTool` object containing the", "colors: str, optional :param ids: IDs used for legend plotting,", "for element in methylation_bed: # Determine bin position = int(element[1])", "plot_points=False, alpha=.5): '''Function for plotting grouped gene expression (e.g. tumor", "\"r\") cnv_bed_list = [] ploidy = None for line in", "(float(sum(average_list))/ float(len(average_list))) if len(average_list) > 0 else 0. ] binned_average_meth", "Plot Gene Names if(plot_gene_ids): for i in genes_in_region: start =", "None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Nothing to be returned", "= ax.boxplot([np.log2([i if i >= 1. else 1. for i", "id mapping. :rtype: dictionary ''' gene_name_mapping_file = open(gene_name_mapping_filename, \"r\") gene_map", "loc_coordinates=\"up\", revert_coordinates=False, rotation=0): '''Function that plots genomic coordinates in a", "and normal) on a gene region scale retaining the position", "of the genome that is translocated. :param chrom_r: Chromosome of", "plots. :param meth_calls: Iterator containing list-like elements with the following", "exp_values = expression_df.loc[gene_name, groups[g]] if(type(exp_values).__name__ == \"Series\"): exp_values = list(exp_values)", "for i in ticks ] ticks.reverse() tick_labels.reverse() print(tick_labels) for i", "patches_dict[segment_type] = rect plt.xlim(int(start), int(end)) plt.ylim(0, 1) plt.yticks([], []) return", "defaults to \"k\". :type color_neutral: str, optional :param ax: Axis", "= float(interval[3]) tcn = float(interval[4]) if(tcn < -1.*max_dev): tcn =", "tick.set_rotation(45) tick.set_size(5) for ytick in ax.get_yticklabels(): ytick.set_size(5) if(plot_legend): ax.legend(patch_list, patch_description_list,", "plot_legend=False, color_g1=\"#fb8072\", color_g2=\"#80b1d3\", g1_id=\"tumor\", g2_id=\"normal\", plot_gene_names=True): '''Function for plotting paired", "while(current_tick <= end): ticks += [current_tick] current_tick = current_tick +", "return equalized_region_mid_points def plotCoordinates(chrom, start, end, color=\"k\", ax = None,", "int(e[4])+(int(e[5])-int(e[4]))/2 distance = abs(link_pos2-link_pos1) if(distance > max_dist): max_dist = distance", ":type end: str :param ax: Axis used for plotting, defaults", "plotted. :type end: str :param ax: Axis used for plotting,", "hidden. :type plot_gene_names: bool. :return: Axis on which plot was", "Copy Number) :type cnvs_bed: :class:`pybedtools.BedTool` :param chromosome: Chromosome for which", "region_border_up border_distance_down = region_border_down-start if(not(float(border_distance_down)/float(region_size) < distance_ratio)): gene_name = str(i[3])", "and TXend of genes. :type genes_bed: :class:`pybedtools.BedTool` :param region_bed: :class:`pybedtools.BedTool`", "to a tab separated file, for which the first column", "int :param end: End position of the region to be", "PathPatch(path, facecolor = \"None\", edgecolor = color, lw = lw)", "= {} max_y_pos = 0 for interval in genes_sorted_bed: gene_name", "list-ike object containing: 1. Chromosome 2. Start postion 3. End", "distance_ratio: Minimal distance between two genes, as ratio of ax", "name, score, strand, start, end, color). The color field is", "of the rectangles representing the regions to be plotted, defaults", ":type plot_gene_names: bool, optional :param position_gene_names: Either of \"top\", or", "split_line = line.rstrip().split(\"\\t\") ploidy_dev = float(split_line[5])-ploidy chrom = split_line[0] if(chrom", "tcn = float(interval[4]) if(tcn < -1.*max_dev): tcn = -1.*max_dev elif(tcn", "color = standard_colors[g] bplot[\"boxes\"][0].set_facecolor(color) if(plot_points): x_positions = [ (bplot_pos+ (i-.5)*", "plot_gene_names: True if gene names shall be plotted, False otherwise,", "start, end, color). The color field is used to determine", "head_length=1000, overhang=0, color_plus=\"#80b1d3\", color_minus=\"#fb8072\", ax=None): '''Function that plots TF motifs", "and gene_name in blacklist): counter += 1 continue n_groups =", "float(interval[4]) # Smooth tcn, if ploidy_dev is smaller than cnv_threshold", "Iterator containig list-like elements with the following entries: 1. Chromosome", "'''Function that plots a translocation event as a bar, showing", "TX_end-TX_start, .2, color=color, capstyle='butt', linewidth=0) ax.add_patch(rect) plt.xlim([start_r, end_r]) plt.ylim([0.3, 0.7])", "to be plotted, defaults to None, :type blacklist: set, optional", "gene ids :param gene_name_mapping_file: Path to a tab separated file,", "of plot, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param", "n_segments = len(genomic_segments) equalized_region_size = (end-start) if(n_segments > 0): equalized_region_size=(end-start)/n_segments", "int, optional :param overhang: Fraction that the arrow is swept", "on which the genes are plotted, default is None. :type", "values of g1 samples (columns: sample ids; index: gene ids)", "y_pos_dict def createGeneNameMap(gene_name_mapping_filename): '''Function that creates a mapping between gene", "counter += 1 continue n_groups = len(groups) for g in", "HiC contact maps as pyramid plots :param contact_map: Matrix that", "gene_regions[i+1][0] else: right_border = region_right_border current_extension = right_border-left_border if(current_extension ==", "in chip_signals: start = int(signal[1]) end = int(signal[2]) value =", "met_reverse = False # Plot Introns for i in introns_in_region:", "CNVs, defaults to \"bwr\". :type cmap: str, optional :param max_dev:", "names shall be plotted, False otherwise, defaults to True. :type", "right\". :type legend_loc: str, optional :param color_plus: Color code for", "for gene_name in gene_names: left_border = gene_mid_points[counter]-extension/2 right_border = gene_mid_points[counter]+extension/2", ":param revert_coordinates: If True, coordinates are reverted to decreasing order.", "gene_names[i] in blacklist): continue left_border = gene_regions[i][0] right_border = None", "positions=[bplot_g1_pos], widths=extension/2., patch_artist=True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False) bplot_g2", "optional :param color_plus: Color of plus stranded TF regions, defaults", "edge_color: Color of region edge. If False, no edge is", "vmax=1.) m = matplotlib.cm.ScalarMappable(norm = norm, cmap = cmap) for", "int(element[3]) n_unmeth = int(element[4]) current_bin = int((position-start)/bin_size) counter += 1", "plotted side by side. If this ratio is underwent, the", "in genes_in_region: start = int(i[1]) gene_name = str(i[3]) if(not blacklist", "y_max # Plot Exons for i in exons_in_region: start =", ":param log_transformed: If True use log transformed values for plotting,", "scale=\"Kb\" digits_to_round = None divisor = None if(scale == \"Mb\"):", "motif_end arrow_end = motif_start color = color_minus dx = -1.*head_length", "plot, defaults to None. :type max_dev: float, optional :param ax:", ":type chromosome: str :param start: Start position on chromosome. :type", "def plotMethylationProfile(meth_calls, chrom, start, end, color=\"k\", ax=None): '''Function that plots", "genes are plotted side by side. If this ratio is", "or minus stranded genes is plotted, False otherwise. Default is", "[binned_average_meth[i]] else: meth_before = (binned_average_meth[i-1] if not i == 0", "start, end, ploidy=2, cnv_threshold=0.7, cmap=\"bwr\", max_dev=None, ax=None): '''Function for plotting", "plotGenomicSegments(segments_list, chrom, start, end, ax = None): '''Function for plotting", "= str(segment[3]) if(segment_type == \"R\"): color = (1,1,1,1) rect =", "positions of genes for plotting max_y_pos, y_pos_dict = determineYPosGene(genes_in_region, (region_border_down-", ":return: Tuple of max_y_pos+1.5, patch_list, patch_description_list, where 1. max_y_pos+1.5 is", "HUGO GENE SYMBOLs will be shown, else the GENE SYMBOLs", "(midpoint[0]+segment_size/2., midpoint[1]), (midpoint[0], midpoint[1]+segment_size/2.), (midpoint[0]-segment_size/2., midpoint[1]) ] codes = [Path.MOVETO,", "and values: HUGO GENE SYMBOLs. :type gene_names_map: dict. :param blacklist:", "expression value is plotted in addition to the boxplot, no", "blacklist=None, ax=None, plot_legend=False, color_g1=\"#fb8072\", color_g2=\"#80b1d3\", g1_id=\"tumor\", g2_id=\"normal\", plot_gene_names=True): '''Function for", "if(not gene_names_map is None): gene_names += [gene_names_map[gene_name_ens]] else: gene_names +=", "u=True).sort() gene_names = [] gene_regions = [] for e in", "1. for i in exp_values_g2])], positions=[bplot_g2_pos], widths=extension/2., patch_artist = True,", "TX_end = TX_pos rect = Rectangle((TX_start, .4), TX_end-TX_start, .2, color=color,", ":type genes_bed: :class:`pybedtools.BedTool` :param gene_mid_points: list of integer values containing", "merge == 0): left_merged += [lefts[0]] lefts = [] height_merged", "location: Either of \"top\" | \"bottom\". If location == \"top\",", "if(extension is None): extension = float(current_extension) elif(current_extension < extension): extension", "list :param groups: List of lists containing the IDs of", "IDs of the different groups. :type groups: list :param gene_names_map:", "if ax is not None else plt.gca() # Get gene", "Path.LINETO, Path.LINETO, Path.CLOSEPOLY, ] path = Path(vertices, codes) intensity_value =", "in cnvs_bed: current_start = int(interval[1]) current_end = int(interval[2]) ploidy_dev =", "else plt.gca() tick_size = 10**math.ceil((np.log10((end-start)/10))) if(not upper): tick_size = 10**int((np.log10((end-start)/10)))", "end-i for i in ticks ] ticks.reverse() tick_labels.reverse() print(tick_labels) for", "+= [gene_name] exp_values = expression_df.loc[gene_name, groups[g]] if(type(exp_values).__name__ == \"Series\"): exp_values", "= True # Plot Gene Names if(plot_gene_ids): for i in", "+= [height[i]] lefts += [left[i]] if(not i % merge ==", "segments :param cnvs_bed: :class:`pybedtools.BedTool` object containing CNVs with following entries:", "left_merged = [] height_merged = [] if(not merge is None):", "TX_pos: int :param direction: Direction of the genomic part that", "= cm.bwr norm = matplotlib.colors.Normalize(vmin=0., vmax=1.) m = matplotlib.cm.ScalarMappable(norm =", "paired gene expression (e.g. tumor and normal) on a gene", "None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param plot_legend: If True legend", "containing introns :type introns_bed: :class:`pybedtools.BedTool` :param region_bed: :class:`pybedtools.BedTool` object containing", "object containing TXstart, and TXend of genes. :type genes_bed: :class:`pybedtools.BedTool`", "\"-\"): color = color_reverse y = max_y_pos-y_pos_dict[gene_name]+0.5 rect = Rectangle((start,", "optional :param head_length: Length of the arrow in bp (depends", "extension=gene_mid_points[1]-gene_mid_points[0] # Subtract a small percentage of region size from", "a point per expression value is plotted in addition to", "cmap: str, optional :param vmin: Minimal value of intensity range", "the region to be plotted. :type end: str :param ax:", "PathPatch(path, facecolor=\"none\", edgecolor=color, linewidth=.5) ax.add_patch(path_patch) ax.axis(\"off\") plt.xlim([start, end]) plt.ylim([0, 1])", "int(end)], [1, 1], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [2, 2],", "heights = [] heights += [height[i]] lefts += [left[i]] if(not", "if(plot_legend): plt.legend(patch_list, patch_description_list, loc=legend_loc, fontsize=5) return max_y_pos+1.5, patch_list, patch_description_list def", "linestyle=\"-\", color=color, linewidth=1) if(revert_coordinates): ticks = [ start + end-i", "colors((ploidy_dev+max_dev)/(2*max_dev)) if(abs(ploidy_dev) < cnv_threshold): color=colors(.5) rect = Rectangle((current_start, .5), current_end-current_start,", "chip_signals: Iterator for which each element is a list-ike object", "containing the one region, for which the gene plot is", "numpy as np import tabix import matplotlib.ticker as ticker from", "not None else plt.gca() region_bed = pybedtools.BedTool(\"\\t\".join([str(i) for i in", "else: plt.ylim([-1.5, .3]) plt.xticks([], []) ax.spines[\"bottom\"].set_visible(False) ax.spines[\"top\"].set_visible(False) ax.spines[\"left\"].set_visible(False) ax.spines[\"right\"].set_visible(False) def", "color = color_reverse y = max_y_pos-y_pos_dict[gene_name]+0.5 patch = Rectangle((start, y-.03),", "positions=[bplot_pos], widths=extension/float(n_groups), patch_artist=True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False) color", "= [ (bplot_pos+ (i-.5)* ((2*extension)/(float(n_groups)*3))) for i in list(np.random.rand(len(expression_values))) ]", "in region]), from_string=True) # Get gene names and regions genes_in_region_bed", "Start position of region to be plotted. :type start: int", "one region, for which the gene plot is created. :type", "Path.LINETO, Path.LINETO] vertices = [(region_mid_point, 1), (region_mid_point, .8), (equalized_region_mid_point, .2),", "region_border_up): start = region_border_up border_distance_down = region_border_down-start if(not(float(border_distance_down)/float(region_size) < distance_ratio)):", "gene_names_clean]))) for tick in ax.get_xticklabels(): tick.set_rotation(45) tick.set_size(6) for ytick in", "str, optional :param log_transformed: If True use log transformed values", "linewidth=0) ax.add_patch(rect) else: rect = Rectangle((current_start, tcn-.1), current_end-current_start, .2, color=color,", "None if(not colors is None): color = colors[g] else: color", "is not None else plt.gca() TX_start = TX_pos TX_end =", "color for gene plotting strand = str(i[5]) color = color_forward", "optional :param edge_color: Color of region edge. If False, no", "codes) path_patch = PathPatch(path, facecolor=\"none\", edgecolor=color, linewidth=.5) ax.add_patch(path_patch) ax.axis(\"off\") plt.xlim([start,", "is None): extension = float(current_extension) elif(current_extension < extension): extension =", "end, name, score, strand, start, end, color). The color field", "int, optional :param ax: Axis to be used for plotting,", "plotted, defaults to None, :type blacklist: set, optional :param ax:", "event as a bar, showing the part of the genome", "determineYPosGene(genes_in_region, (region_border_down- region_border_up), distance_ratio) if(not y_max is None): max_y_pos =", "plt.gca() max_signal = 0 left = [] height = []", "the HUGO GENE SYMBOLs will be shown, else the GENE", "regions, defaults to \"#fb8072\". :type color_minus: str, optional :param ax:", "\"normal\". :type g2_id: str, optional :param plot_gene_names: If True, the", "in ax.get_yticklabels(): ytick.set_size(6) if(plot_legend): ax.legend(patch_list, patch_description_list, fontsize=5, loc='lower left') return", "revert_coordinates=False, rotation=0): '''Function that plots genomic coordinates in a linea", "legend. Either of \"lower left\", \"lower right\", \"upper left\", \"upper", "color_plus=\"#80b1d3\", color_minus=\"#fb8072\"): \"\"\"Function for plotting gene structures, i.e. introns exons", "List of segments for which distances shall be equalized (each", "0)): left_merged += [lefts[0]] lefts = [] height_merged += [np.mean(heights)]", "< start): region_mid_points += [start+(int(e[2])-start)/2] elif(int(e[2]) > end): region_mid_points +=", "n_segments+1): equalized_region_mid_points += [((start+ i*equalized_region_size)- equalized_region_size/2)] region_mid_points = [] for", "of genes. :type genes_bed: :class:`pybedtools.BedTool` :param region_bed: :class:`pybedtools.BedTool` object containing", "defaults to False. :type plot_legend: bool, optional :param colors: List", "ax = ax if ax is not None else plt.gca()", "optional :param legend_loc: Location of the legend. Either of \"lower", "int :param ploidy: Assumed ploidy of tumor, defaults to 2.", "if ploidy_dev is smaller than cnv_threshold if(abs(ploidy_dev) < cnv_threshold): tcn", "first_tick while(current_tick <= end): ticks += [current_tick] current_tick = current_tick", "= first_tick while(current_tick <= end): ticks += [current_tick] current_tick =", "color=color, marker=\".\", linestyle='None', markersize=1, alpha=.5) elif(n_entries == 4): plt.plot([ (float(m[1])+float(m[2]))/2.", "= Rectangle([int(region[1]), -.75], int(region[2])-int(region[1]), 1.5, facecolor=current_color, edgecolor=edgecolor, alpha=alpha) c +=", ":param color_g2: Color used for plotting g2 samples expression, defaults", "for e in genomic_segments: if(int(e[1]) < start): region_mid_points += [start+(int(e[2])-start)/2]", "if ax is not None else plt.gca() colors = plt.cm.get_cmap(cmap)", "if(not blacklist is None and gene_name in blacklist): counter +=", "plot CNVs. :type chromosome: str :param start: Start position on", "\"g\". :type color_gain: str, optional :param color_loss: Plot color of", "optional :param overhang: Fraction that the arrow is swept back", "plot_legend: If True, a legend describing plus or minus stranded", "neutral regions, defaults to \"k\". :type color_neutral: str, optional :param", "from_string=True) # Get gene names and regions genes_in_region_bed = genes_bed.intersect(region_bed,", "defaults to \"b\". :type color: str, optional :param offset: Length", "to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param upper: If True,", "expression_df_g2.loc[gene_name, :] if(type(exp_values_g2).__name__ == \"Series\"): exp_values_g2 = list(exp_values_g2) else: exp_values_g2", "plotted. :type end_r: int :param color: Color of the arc,", "bed file containing (chrom, start, end, name, score, strand, start,", "= [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO] vertices = [(region_mid_point, 0), (region_mid_point,", "be returned. :rtype: None ''' # Use given axis for", "tick_labels = [ str(round(i/float(divisor), digits_to_round))+scale for i in ticks ]", "extension of barplot extension=None for i in range(len(gene_regions)): if(not blacklist", ":type color_minus: str, optional. :return: Tuple of max_y_pos+1.5, patch_list, patch_description_list,", ":class:`matplotlib.axes._subplots.AxesSubplot`, optional :param plot_legend: If True legend is plotted, False", "None else plt.gca() c = 0 for region in regions:", "\"\"\" ax = ax if ax is not None else", "patch = matplotlib.patches.PathPatch(path, facecolor=facecolor, edgecolor='none') ax.add_patch(patch) ax.set_xlim(start, end) if(location ==", "be used for plotting, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`,", "is None. :type y_max: bool, optional :param distance_ratio: Minimal distance", "\"bottom\". If location == \"top\", the pyramid points upwards, else", "and the second column is the HUGO gene name :type", "end: int :param segment_size: Size of the segments for which", "color_forward if(strand == \"-\"): color = color_reverse y = max_y_pos-y_pos_dict[gene_name]+0.5", "max_y_pos): max_y_pos += 1 y_pos_dict[gene_name] = max_y_pos y_level_dict[max_y_pos] = [[gene_start,", "ax.set_xlim(start, end) if(location == \"top\"): ax.set_ylim(0, (end-start)/2.) else: ax.set_ylim(-1.*(end-start)/2., 0)", "codes = [] vertices = [] if(direction == \"top_down\"): codes", "color=color, linewidth=1) plt.text(ticks[i], .4, tick_labels[i], horizontalalignment=\"center\", verticalalignment=\"bottom\", fontsize=5, color=color, rotation=rotation)", "CNV, defaults to 0.7. :type cnv_threshold: float, optional :param cmap:", "1, color=color) ax.add_patch(rect) patches_dict[segment_type] = rect plt.xlim(int(start), int(end)) plt.ylim(0, 1)", ":type cnv_threshold: float, optional :param cmap: Colormap used for plotting", "color=color) ax.add_patch(rect) patches_dict[segment_type] = rect plt.xlim(int(start), int(end)) plt.ylim(0, 1) plt.yticks([],", "patch_saved=True patch_list = [bplot_g1[\"boxes\"][0], bplot_g2[\"boxes\"][0]] patch_description_list = [g1_id, g2_id] counter", "height_merged += [np.mean(heights)] heights = [] offset = merge*offset left", "[] for e in genomic_segments: if(int(e[1]) < start): region_mid_points +=", "end, head_width=0.2, head_length=1000, overhang=0, color_plus=\"#80b1d3\", color_minus=\"#fb8072\", ax=None): '''Function that plots", "ACESeq \"most_important\" file :type input_filename: str :return: :class:`pybedtools.BedTool` object containing", "y_range: # Define midpoint of rectangle midpoint = (i*segment_size+(j*segment_size-i*segment_size)/2., (j*segment_size-i*segment_size)/2.)", "gene_names_map=None, blacklist=None, ax=None, plot_legend=False, colors=None, ids=None, plot_gene_names=True, position_gene_names=\"bottom\", log_transformed=True, plot_points=False,", "'''Function for plotting genomix segments in different colors :param segments_tabix_filename:", "equalized region midpoints. :rtype: list ''' ax = ax if", "segments as heatmap :param cnvs_bed: :class:`pybedtools.BedTool` object containing CNVs with", "equalized_region_size=(end-start)/n_segments equalized_region_mid_points = [] for i in range(1, n_segments+1): equalized_region_mid_points", "= [[gene_start, gene_end]] break elif(gene_start > y_level_dict[i][-1][1] and float(gene_start-y_level_dict[i][-1][0])/float(region_size) >", "ax if ax is not None else plt.gca() max_signal =", "if ax is not None else plt.gca() max_signal = 0", "color of copy number losses, defaults to \"r\". :type color_loss:", "to pybedtools.BedTool object :param input_filename: Full path to ACESeq \"most_important\"", "End position of region to be plotted. :type end: int", "start position of the region to be plotted. :type start_r:", ":] if(type(exp_values_g2).__name__ == \"Series\"): exp_values_g2 = list(exp_values_g2) else: exp_values_g2 =", "= int(region_bed[0][2]) region_left_border = int(region_bed[0][1]) # Determine minimal extension of", "else: meth_before = (binned_average_meth[i-1] if not i == 0 else", "== 4): plt.plot([ (float(m[1])+float(m[2]))/2. for m in meth_calls ], [", "start: Chromosomal start position of region to be plotted. :type", "that two genes are plotted side by side. If this", "plotted. :type chrom_r: str :param start_r: Chromosomal start position of", "color_gain: Plot color of copy number gains, defaults to \"g\".", "= abs(link_pos2-link_pos1) if(distance > max_dist): max_dist = distance mid_point =", "boxplots boxes, defaults to 0.5. :type alpha: float, optional :return:", "plt.text(start, y, gene_name_label, size=5, color = color) gene_name = str(i[3])", "= matplotlib.patches.PathPatch(path, facecolor=facecolor, edgecolor='none') ax.add_patch(patch) ax.set_xlim(start, end) if(location == \"top\"):", "ax = None): '''Function that plots arcs from unequal distances", "the list of patches drawn on the ax. 3. patch_description_list", "-.1, tick_labels[i], horizontalalignment=\"center\", fontsize=5, color=color, verticalalignment=\"top\", rotation=rotation) plt.xlim([start, end]) plt.yticks([],", "== \"chrom\"): continue split_line = line.rstrip().split(\"\\t\") ploidy_dev = float(split_line[5])-ploidy chrom", "number of stacked genes. 2. y_pos_dict: Dictionary with keys =", "for gene_name in gene_names: left_border = gene_regions[counter][0] right_border = region_right_border", "to False. :type revert_coordinates: bool, optional :param rotation: Rotational angle", "tick_size scale = None if(first_tick > 1000000): scale = \"Mb\"", "points representing methylation values, defaults to \"k\". :type color: str,", "> max_dist): max_dist = distance mid_point = link_pos1 + (link_pos2-link_pos1)/2", "on which to plot contact map, defaults to None. :type", "True, coordinates are reverted to decreasing order. Else, coordinates stay", "Path.LINETO, Path.CLOSEPOLY, ] path = Path(vertices, codes) intensity_value = contact_map.iloc[i,", "bplot[\"boxes\"][0].set_facecolor(color) if(plot_points): x_positions = [ (bplot_pos+ (i-.5)* ((2*extension)/(float(n_groups)*3))) for i", "containing (chrom, start, end, name, score, strand, start, end, color).", "= Rectangle((TX_start, .4), TX_end-TX_start, .2, color=color, capstyle='butt', linewidth=0) ax.add_patch(rect) plt.xlim([start_r,", ":type genes_bed: :class:`pybedtools.BedTool` :param exons_bed: :class:`pybedtools.BedTool` object containing exons of", "SYMBOLs will be shown, else the GENE SYMBOLs are hidden.", "import pybedtools import pandas as pnd import numpy as np", "[]) return patches_dict def plotCNVs(cnvs_bed, chromosome, start, end, ploidy=2, cnv_threshold=0.7,", "a legend describing plus or minus stranded genes is plotted,", "else: gene_names += [gene_name_ens] gene_regions += [[int(e[1]), int(e[2])]] region_right_border =", "ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param color: color of bars, defaults to", "== 5): plt.plot([ (float(m[1])+float(m[2]))/2. for m in meth_calls ], [", ":param end: End position on chromosome. :type end: int :param", ":type g1_id: str, optional :param g2_id: ID of g2 used", "used for plotting CNVs, defaults to \"bwr\". :type cmap: str,", "right_border = gene_regions[counter+1][0] bplot_g1_pos = left_border + extension/4. bplot_g2_pos =", "start, end, color=\"k\", ax = None, upper=True, loc_coordinates=\"up\", revert_coordinates=False, rotation=0):", "for plus stranded genes, default is \"#80b1d3\". :type color_plus: str,", "max_y_pos+1.5 is the max_y_position + 1.5. max_y_pos defines the \\", "x_positions = [ (bplot_pos+ (i-.5)* ((2*extension)/(float(n_groups)*3))) for i in list(np.random.rand(len(expression_values)))", "plt.xlim([region_border_up, region_border_down]) plt.ylim([0, max_y_pos+1.5]) plt.yticks([], []) if(plot_legend): plt.legend(patch_list, patch_description_list, loc=legend_loc,", "gene_names_clean = [] counter=0 for gene_name in gene_names: left_border =", "optional :param g2_id: ID of g2 used for legend plotting,", ":param color_neutral: Plot color of copy number neutral regions, defaults", "start = int(i[1]) end = int(i[2]) gene_name = str(i[3]) if(not", "max_y_pos += 1 y_pos_dict[gene_name] = max_y_pos y_level_dict[max_y_pos] = [[gene_start, gene_end]]", "0 else \"NA\") meth_after = (binned_average_meth[i+1] if not i ==", "float, optional :param vmax: Maximal value of intensity range to", "direction=\"right\", color=\"k\", ax=None): '''Function that plots a translocation event as", "None): gene_name_label = gene_map[gene_name] y = max_y_pos-y_pos_dict[gene_name]+.8 plt.text(start, y, gene_name_label,", "plot is drawn, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional", "2. patch_list is the list of patches drawn on the", "color_g1=\"#fb8072\", color_g2=\"#80b1d3\", g1_id=\"tumor\", g2_id=\"normal\", plot_gene_names=True): '''Function for plotting paired gene", "list ''' ax = ax if ax is not None", "that plots methylation values as dot plots. :param meth_calls: Iterator", "codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY, ] path =", "overhang means triangular shape). Can be negative or greater than", "gene_regions = [] for e in genes_in_region_bed: gene_name_ens = str(e[3])", "to \"g\". :type color_gain: str, optional :param color_loss: Plot color", "y_pos_dict[gene_name] = max_y_pos y_level_dict[max_y_pos] = [[gene_start, gene_end]] break else: continue", "None else plt.gca() max_dist = 0 for e in links_bed:", "ax.legend(patch_list, patch_description_list, fontsize=5, loc='lower left') return ax def plotGeneExpressionEqualDist(genes_bed, gene_mid_points,", "ids will be included in the plot, False otherwise, default", "< -1.*max_dev): tcn = -1.*max_dev elif(tcn > max_dev): tcn =", "introns exons of genes. :param genes_bed: :class:`pybedtools.BedTool` object containing TX", "None for line in input_file: if(line[:7] == \"#ploidy\"): ploidy =", "region. :type end: int :param color: Color of lines equalizing", "is smaller than cnv_threshold if(abs(ploidy_dev) < cnv_threshold): tcn = ploidy", "int(element[1]) if(position < start or position > end): continue n_meth", "input_file: if(line[:7] == \"#ploidy\"): ploidy = float(line.rstrip().split(\":\")[1]) print(ploidy) if(line[0] ==", "of genes. :type gene_mid_points: list :param region: List containing the", "met_forward = True elif(strand == \"-\" and not(met_reverse)): patch_list +=", "Determine y positions of genes for plotting max_y_pos, y_pos_dict =", "for which each element is a list-ike object containing: 1.", "GENE IDs, and values: HUGO GENE SYMBOLs. :type gene_names_map: dict.", "False. :type plot_points: bool, optional :param alpha: Alpha value for", ":param location: Either of \"top\" | \"bottom\". If location ==", "[0., .3], linestyle=\"-\", color=color, linewidth=1) plt.text(ticks[i], .4, tick_labels[i], horizontalalignment=\"center\", verticalalignment=\"bottom\",", "of colors used for plotting samples expression. The number of", "upwards, else if location == \"bottom\" the pyramid points downwards,", "float(signal[3]) if(value > max_signal): max_signal = value if(not offset is", "line.rstrip().split(\"\\t\") ploidy_dev = float(split_line[5])-ploidy chrom = split_line[0] if(chrom == \"23\"):", "plot, False otherwise, default is True :type plot_gene_ids: bool, optional", "plot, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param upper:", "number of groups, defaults to None. :type colors: str, optional", "the following elements: 1. Chromosome region1 2. Start region1 3.", "[]) def readACESeqAsBed(input_filename): '''Function that reads CNVs from ACESeq (\"*most_important*\")", "ratio is underwent, the genes will be stacked, default is", "[] tick_positions = [] gene_names_clean = [] counter=0 for gene_name", "if ax is not None else plt.gca() region_bed = pybedtools.BedTool(\"\\t\".join([str(i)", ":param ax: Axis of plot, defaults to None. :type ax:", "in ticks ] if(loc_coordinates == \"up\"): plt.plot([start, end], [0, 0],", ":param g1_id: ID of g1 used for legend plotting, defaults", "= float(split_line[5])-ploidy chrom = split_line[0] if(chrom == \"23\"): chrom=\"X\" elif(chrom", "end = int(i[2]) gene_name = str(i[3]) if(not blacklist is None", "of HiC contacts. :type contact_map: :class:`pandas.DataFrame` :param start: Chromosomal start", "plotCoordinates(chrom, start, end, color=\"k\", ax = None, upper=True, loc_coordinates=\"up\", revert_coordinates=False,", "the expression values of g1 samples (columns: sample ids; index:", "direction: str, optional. :param ax: Axis on which to plot,", "bin_size, 1, color=m.to_rgba(binned_average_meth[cbin])) ax.add_patch(rect) plt.xlim([start, end]) plt.ylim([0, 1]) plt.xticks([], [])", "vertices = [] if(direction == \"top_down\"): codes = [Path.MOVETO, Path.LINETO,", "# Determine minimal extension of barplot extension=None for i in", "optional. :param plot_gene_names: True if gene names shall be plotted,", "% merge == 0 and not (i == 0)): left_merged", "= int(6-np.log10(tick_size)) divisor = 1000000 else: digits_to_round = int(5-np.log10(tick_size)) divisor", "continue if(counter < len(gene_names)-1): right_border = gene_regions[counter+1][0] bplot_g1_pos = left_border", "1000. :type head_length: int, optional :param overhang: Fraction that the", "linewidth=.5) plt.plot([int(start), int(end)], [2, 2], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)],", "== \"-\"): arrow_start = motif_end arrow_end = motif_start color =", "genes_in_region = genes_bed exons_in_region = exons_bed introns_in_region = introns_bed region_border_up", "i in range(len(region_mid_points)): region_mid_point = region_mid_points[i] equalized_region_mid_point = equalized_region_mid_points[i] codes", "class:`pandas.DataFrame` object containing the expression values of all samples (columns:", "\"Greys\". :type cmap: str, optional :param vmin: Minimal value of", "log_transformed: bool, optional :param plot_points: If True, a point per", "j == \"NA\" ] binned_average_meth_no_missing += [ (float(sum(average_list))/ float(len(average_list))) if", "code for plus stranded genes, default is \"#80b1d3\". :type color_plus:", "not None else plt.gca() genes_in_region = genes_bed exons_in_region = exons_bed", "> distance_ratio): y_pos_dict[gene_name] = i y_level_dict[i] += [[gene_start, gene_end]] break", "if ax is not None else plt.gca() contact_map_index1 = (start)/segment_size", "for plotting ax = ax if ax is not None", "to \"b\". :type color: str, optional :param offset: Length of", "ids not to be plotted, default to None. :type blacklist:", "< start or position > end): continue n_meth = int(element[3])", "+= 1 ax.set_xlim(region_left_border, region_right_border) if(position_gene_names == \"top\"): ax.xaxis.set_ticks_position(\"top\") ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions))) ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean)))", "Path.LINETO] vertices = [(region_mid_point, 0), (region_mid_point, .2), (equalized_region_mid_point, .8), (equalized_region_mid_point,", "+ (link_pos2-link_pos1)/2 if(link_pos2 < link_pos2): mid_point = link_pos2 + (link_pos1-link_pos2)/2", "the region to be plotted ([<chrom>, <start>, <end>]). :type region:", "patches \\ drawn on the ax. :rtype: list \"\"\" ax", ">= cnv_threshold): color=color_gain elif(ploidy_dev <= -1.*cnv_threshold): color = color_loss if(abs(ploidy_dev)", "\"-\" and not(met_reverse)): patch_list += [patch] patch_description_list += [\"reverse strand\"]", "linewidth=1) else: plt.plot([start, end], [0.3, 0.3], linestyle=\"-\", color=color, linewidth=1) if(revert_coordinates):", "capprops=capprops, showfliers=False) color = None if(not colors is None): color", "right\", default is \"lower right\". :type legend_loc: str, optional :param", "None else plt.gca() patches_dict = {} for segment in segments_list:", "Number) :type cnvs_bed: :class:`pybedtools.BedTool` :param chromosome: Chromosome for which to", "cmap = cm.bwr norm = matplotlib.colors.Normalize(vmin=0., vmax=1.) m = matplotlib.cm.ScalarMappable(norm", "start, and TX end of genes. :type genes_bed: :class:`pybedtools.BedTool` :param", "\"R\"): color = (1,1,1,1) rect = Rectangle((segment_start, 0), segment_end-segment_start, 1,", "0 for region in regions: if(not edgecolor): current_color = color", "the arc, defaults to \"k\". :type color: str, optional. :param", "matplotlib.patches import Rectangle from matplotlib.patches import Arrow from matplotlib.path import", "exp_values_g1 = list(exp_values_g1.iloc[0, :]) exp_values_g2 = expression_df_g2.loc[gene_name, :] if(type(exp_values_g2).__name__ ==", "file, for which the first column is a ensemble gene", "= max_y_pos-y_pos_dict[gene_name]+.8 plt.text(start, y, gene_name_label, size=5, color = color) gene_name", "regions: if(not edgecolor): current_color = color rect = Rectangle([int(region[1]), -.75],", "arrow in bp (depends on the region that is plotted),", "If True, make less ticks, else if False make more", "optional :param offset: Length of intervals, defaults to None. :type", "distance_ratio: float, optional :param ax: Axes instance on which the", "continue if(extension is None): extension = float(current_extension) elif(current_extension < extension):", "ploidy_dev = float(interval[3]) tcn = float(interval[4]) # Smooth tcn, if", "= (start)/segment_size contact_map_index2 = ((end)/segment_size)+1 sliced_contact_map = contact_map.iloc[contact_map_index1:contact_map_index2, contact_map_index1:contact_map_index2] if(vmin", "ax. 3. patch_description_list is the list of descriptions for the", "4. Deviation from ploidy, 5. True Copy Number) :type cnvs_bed:", "[int(e[1])+(int(e[2])-int(e[1]))/2] for i in range(len(region_mid_points)): region_mid_point = region_mid_points[i] equalized_region_mid_point =", "if(log_transformed): expression_values = np.log2([i if i >= 1. else 1.", "gene_regions[counter+1][0] bplot_g1_pos = left_border + extension/4. bplot_g2_pos = left_border +", "gene_regions[i][0] right_border = None if(i < len(gene_names)-1): right_border = gene_regions[i+1][0]", "medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False) bplot_g1[\"boxes\"][0].set_facecolor(color_g1) bplot_g2[\"boxes\"][0].set_facecolor(color_g2) if(not patch_saved): patch_saved=True patch_list", "of original and distance equalized segments n_segments = len(genomic_segments) equalized_region_size", "\"r\". :type color_loss: str, optional :param color_neutral: Plot color of", "position = int(element[1]) if(position < start or position > end):", "max_y_pos-y_pos_dict[gene_name]+0.5 patch = Rectangle((start, y-.03), end-start, .06, color=color, capstyle='butt', linewidth=0)", "contact_map_index1 = (start)/segment_size contact_map_index2 = ((end)/segment_size)+1 sliced_contact_map = contact_map.iloc[contact_map_index1:contact_map_index2, contact_map_index1:contact_map_index2]", "= 0 for e in links_bed: link_pos1 = int(e[1])+(int(e[2])-int(e[1]))/2 link_pos2", "\"k\". :type color: str, optional :param direction: Direction of distance", "the first column is a ensemble gene id, and the", "is None): heights = [] lefts = [] for i", "plot_gene_names): ax.xaxis.set_major_formatter( ticker.FixedFormatter(([ \" \" for i in gene_names_clean]))) for", "max_dist): max_dist = distance mid_point = link_pos1 + (link_pos2-link_pos1)/2 if(link_pos2", "= [(midpoint[0]-segment_size/2., midpoint[1]), (midpoint[0], midpoint[1]-segment_size/2.), (midpoint[0]+segment_size/2., midpoint[1]), (midpoint[0], midpoint[1]+segment_size/2.), (midpoint[0]-segment_size/2.,", "= end_r if(direction == \"left\"): TX_start = start_r TX_end =", "scale retaining the position of genes. :param genes_bed: :class:`pybedtools.BedTool` object", "optional :param color_g2: Color used for plotting g2 samples expression,", "points downwards, defaults to top, :type location: str, optional :param", "+= [left_border + extension/2.] gene_names_clean += [gene_name] exp_values = expression_df.loc[gene_name,", "<= -1.*cnv_threshold): color = color_loss if(abs(ploidy_dev) > cnv_threshold): rect =", "distance_ratio): '''Function that determines the max y position for gene", "int(signal[1]) end = int(signal[2]) value = float(signal[3]) if(value > max_signal):", "i in ticks ] if(loc_coordinates == \"up\"): plt.plot([start, end], [0,", "position 3. end position 4. Beta Value :type meth_calles: iterator", "ticks = [] current_tick = first_tick while(current_tick <= end): ticks", "= float(interval[4]) # Smooth tcn, if ploidy_dev is smaller than", "color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [6, 6], color=color_threshold, linestyle=\"--\", linewidth=.5)", "plotted, False otherwise, defaults to True. :type plot_gene_names: bool, optional", "TXstart, and TXend of genes. :type genes_bed: :class:`pybedtools.BedTool` :param region_bed:", "size=6) elif(ploidy == 4): plt.ylim([0, 6.5]) plt.yticks([0, 2, 4, 6],", "else plt.gca() max_signal = 0 left = [] height =", ":type g2_id: str, optional :param plot_gene_names: If True, the HUGO", "plt.xlim([start, end]) def plotTX(chrom_r, start_r, end_r, TX_pos, direction=\"right\", color=\"k\", ax=None):", "rect = Rectangle((current_start, tcn-.2), current_end-current_start, .4, color=color, edgecolor='none', capstyle='butt', linewidth=0)", "position of the region to be plotted. :type end_r: int", "whiskerprops=whiskerprops, capprops=capprops, showfliers=False) bplot_g1[\"boxes\"][0].set_facecolor(color_g1) bplot_g2[\"boxes\"][0].set_facecolor(color_g2) if(not patch_saved): patch_saved=True patch_list =", "e in genomic_segments: if(int(e[1]) < start): region_mid_points += [start+(int(e[2])-start)/2] elif(int(e[2])", "intensity range to be plotted, defaults to None :type vmin:", "TX_pos: Position of the translocation. :type TX_pos: int :param direction:", "to \"#80b1d3\". :type color_plus: str, optional :param color_minus: Color of", "genes_sorted_bed: gene_name = interval[3] gene_start = int(interval[1]) gene_end = int(interval[2])", "dict. :param expression_df: class:`pandas.DataFrame` object containing the expression values of", "color=color, capstyle='butt', linewidth=0) ax.add_patch(patch) if(strand == \"+\" and not(met_forward)): patch_list", "vmin: float, optional :param vmax: Maximal value of intensity range", "intervals, defaults to None. :type offset: int, optional :param merge:", ":type plot_gene_ids: bool, optional :param y_max: Max y value in", "shown, else the GENE SYMBOLs are hidden. :type plot_gene_names: bool.", "pyramid plots :param contact_map: Matrix that contains the intensity values", "this value is not equal to 0, than merge elements", "patch_description_list = [g1_id, g2_id] counter += 1 ax.set_xlim(region_left_border, region_right_border) ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions)))", "\"#fb8072\". :type color_g1: str, optional :param color_g2: Color used for", "midpoint of rectangle midpoint = (i*segment_size+(j*segment_size-i*segment_size)/2., (j*segment_size-i*segment_size)/2.) vertices = [(midpoint[0]-segment_size/2.,", "Path.CLOSEPOLY, ] path = Path(vertices, codes) intensity_value = contact_map.iloc[i, j]", "tick in ax.get_xticklabels(): tick.set_rotation(45) tick.set_size(6) for ytick in ax.get_yticklabels(): ytick.set_size(6)", "containing regions of the TF sited to be plotted. :type", "Chromosome of the region to be plotted. :type chrom_r: str", ":param expression_df: class:`pandas.DataFrame` object containing the expression values of all", "current_end-current_start, .2, color=color, edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect) # Plot thresholds", "# Subtract a small percentage of region size from extension", "Color of the bar representing the translocation, defaults to \"k\".", "''' sort_indices = [int(idx) for idx in np.argsort([i[1] for i", "set, optional :param ax: (default: None) Axis used for plotting,", "= None divisor = None if(scale == \"Mb\"): digits_to_round =", "following entries: 1. Chromsome 2. Start position 3. end position", "\"tumor\". :type g1_id: str, optional :param g2_id: ID of g2", "True, a legend describing plus or minus stranded genes is", "for tick in ax.get_xticklabels(): tick.set_rotation(45) tick.set_size(6) for ytick in ax.get_yticklabels():", "of the boxplots boxes, defaults to 0.5. :type alpha: float,", "region_bed, blacklist=None, gene_map=None, plot_gene_ids=True, y_max=None, distance_ratio=0.1, ax=None, plot_legend=False, legend_loc=\"lower right\",", ":type start: int :param end: End position of the genomic", "sample ids; index: gene ids) :type expression_df_g1: :class:`pandas.DataFrame` :param expression_df_g2:", "[] patch_description_list = [] tick_positions = [] gene_names_clean = []", "== 0): # Determine y positions of genes for plotting", "cnv_bed_list += [ [chrom, split_line[1], split_line[2], str(ploidy_dev), split_line[5], \"+\"] ]", "to plot, defaults to None. :type max_dev: float, optional :param", "ax if ax is not None else plt.gca() n_entries =", "the IDs of the different groups. :type groups: list :param", "codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO] vertices = [(region_mid_point, 1),", "color) plt.xlim([region_border_up, region_border_down]) plt.ylim([0, max_y_pos+1.5]) plt.yticks([], []) if(plot_legend): plt.legend(patch_list, patch_description_list,", "y_max is None): max_y_pos = y_max # Plot Exons for", "to average methylation values, defaults to 1000. :type bin_size: int,", ":class:`pandas.DataFrame` :param gene_names_map: Dictionary with keys: ENSEMBL GENE IDs, and", "ax is not None else plt.gca() max_signal = 0 left", "if(line[:7] == \"#ploidy\"): ploidy = float(line.rstrip().split(\":\")[1]) print(ploidy) if(line[0] == \"#\"", "cnv_threshold=0.7, color_gain=\"g\", color_loss=\"r\", color_neutral=\"k\", ax=None): '''Function for plotting CNV segments", "''' ax = ax if ax is not None else", "from extension extension=extension-(region[2]-region[1])*.01 boxprops = {\"color\": \"k\", \"linewidth\": .3, \"alpha\":alpha}", "interval in genes_sorted_bed: gene_name = interval[3] gene_start = int(interval[1]) gene_end", "links_bed: Iterator, that contains bed-like structured lists with the following", "direction: str, optional :param color: Color of the bar representing", "r_end, ax=None, color=\"b\", offset=None, merge=None): '''Function that plots bedGraph like", "= int(i[2]) gene_name = str(i[3]) if(not blacklist is None and", "2. Start position 3. End position :type regions: iterator :param", "{\"color\": \"k\", \"linewidth\": .3} flierprops = {\"color\": \"k\"} medianprops =", "color_plus: Color of plus stranded TF regions, defaults to \"#80b1d3\".", "plt.yticks([0, 1, 2, 3, 4], [\"0\", \"1\", \"2\", \"3\", \"4\"],", "input_filename: str :return: :class:`pybedtools.BedTool` object containing CNVs from ACESeq :rtype:", "introns_bed, region_bed, blacklist=None, gene_map=None, plot_gene_ids=True, y_max=None, distance_ratio=0.1, ax=None, plot_legend=False, legend_loc=\"lower", "if(current_extension == 0.): continue if(extension is None): extension = float(current_extension)", "matplotlib.ticker as ticker from matplotlib.patches import Rectangle from matplotlib.patches import", "= [] offset = merge*offset left = left_merged height =", ":type edge_color: str, optional :param alpha: Alpha value of the", "blacklist: set, optional :param ax: (default: None) Axis used for", ":rtype: dict ''' ax = ax if ax is not", "= gene_map[gene_name] y = max_y_pos-y_pos_dict[gene_name]+.8 plt.text(start, y, gene_name_label, size=5, color", ":type cnv_threshold: float, optional :param color_gain: Plot color of copy", ":param color: Color of the bar representing the translocation, defaults", "expression_values = np.log2([i if i >= 1. else 1. for", "plots genomic regions as simple rectangles. :param regions: Iterator containig", "break elif(i == max_y_pos): max_y_pos += 1 y_pos_dict[gene_name] = max_y_pos", "determine the color for plotting (R,G,B). :type segments_Tabix_filename: str :param", "split_line[1], split_line[2], str(ploidy_dev), split_line[5], \"+\"] ] input_file.close() return pybedtools.BedTool(\"\\n\".join([\"\\t\".join(e) for", "is not None else plt.gca() colors = plt.cm.get_cmap(cmap) if(max_dev is", "plot_legend=False, legend_loc=\"lower right\", color_plus=\"#80b1d3\", color_minus=\"#fb8072\"): \"\"\"Function for plotting gene structures,", "= [] gene_names_clean = [] counter=0 for gene_name in gene_names:", "ax: Axis of plot, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`,", "plot_gene_names): ax.xaxis.set_major_formatter(ticker.FixedFormatter( ([ \" \" for i in gene_names_clean]))) for", "optional :param plot_legend: If True plot legend, False otherwise, defaults", "values patch :rtype: dict ''' ax = ax if ax", "region_size, distance_ratio): '''Function that determines the max y position for", "break else: continue return max_y_pos, y_pos_dict def createGeneNameMap(gene_name_mapping_filename): '''Function that", "open(gene_name_mapping_filename, \"r\") gene_map = {} for line in gene_name_mapping_file: split_line", "= [bplot_g1[\"boxes\"][0], bplot_g2[\"boxes\"][0]] patch_description_list = [g1_id, g2_id] counter += 1", "if(strand == \"-\"): color = color_reverse y = max_y_pos-y_pos_dict[gene_name]+0.5 rect", "region to be plotted. :type end: int :param segment_size: Size", "with following entries: 1. Chromosome, 2. Start Position, 3. End", "contact_map_index2-(contact_map_index2-i))) for j in y_range: # Define midpoint of rectangle", "if(not y_max is None): max_y_pos = y_max # Plot Exons", "ids and values = y position \\ of gene. :rtype:", "Path.CURVE3, Path.CURVE3] path = Path(vertices, codes) patch = PathPatch(path, facecolor", "cm.bwr norm = matplotlib.colors.Normalize(vmin=0., vmax=1.) m = matplotlib.cm.ScalarMappable(norm = norm,", "str, optional. :return: Tuple of max_y_pos+1.5, patch_list, patch_description_list, where 1.", "optional :param max_dev: Maximal deviation from ploidy to plot, defaults", "ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param plot_legend: If True, a legend describing", "max([abs(float(i[3])) for i in cnvs_bed]) for interval in cnvs_bed: current_start", "return ax def plotGeneExpressionEqualDist(genes_bed, gene_mid_points, region, expression_df, groups, gene_names_map=None, blacklist=None,", "int(end)], [2, 2], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [3, 3],", "colors = plt.cm.get_cmap(cmap) if(max_dev is None): max_dev = max([abs(float(i[3])) for", "coordinate strings, defaults to 0. :type rotation: int, optional :return:", "y_max: Max y value in the gene plot. If not", "ytick in ax.get_yticklabels(): ytick.set_size(5) if(plot_legend): ax.legend(patch_list, patch_description_list, fontsize=5, loc='lower left')", "\"4\", \"6\"], size=6) plt.xticks(rotation=45) def plotCNVsHeat(cnvs_bed, chromosome, start, end, ploidy=2,", "height_merged += [np.mean(heights)] heights = [] heights += [height[i]] lefts", "gains, defaults to \"g\". :type color_gain: str, optional :param color_loss:", "plotGeneExpressionEqualDist(genes_bed, gene_mid_points, region, expression_df, groups, gene_names_map=None, blacklist=None, ax=None, plot_legend=False, colors=None,", "region to be plotted, defaults to 1. :type alpha: float,", "distance_ratio)): gene_name = str(i[3]) gene_name_label = gene_name if(not gene_map is", "False. :type plot_legend: bool, optional :param legend_loc: Location of the", "minus stranded genes is plotted, False otherwise. Default is False.", "color=color, rotation=rotation) else: plt.plot([ticks[i], ticks[i]], [.3, .0], linestyle=\"-\", color=color, linewidth=1)", "else if location == \"bottom\" the pyramid points downwards, defaults", "if ax is not None else plt.gca() # Calculate midpoints", ":param color_gain: Plot color of copy number gains, defaults to", "arrow_start = motif_end arrow_end = motif_start color = color_minus dx", ":param region_size: Size of region to be plotted in base", "merged. If this value is not equal to 0, than", "plotted), defaults to 1000. :type head_length: int, optional :param overhang:", "containing TX start, and TX end of genes. :type genes_bed:", "ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Nothing to be returned. :rtype: None", "import tabix import matplotlib.ticker as ticker from matplotlib.patches import Rectangle", "tcn-.2), current_end-current_start, .4, color=color, edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect) else: rect", "gene plotting strand = str(i[5]) color = color_forward if(strand ==", "(columns: sample ids; index: gene ids). :type expression_df: class:`pandas.DataFrame` :param", "list(exp_values) else: exp_values = list(exp_values.iloc[0, :]) expression_values = exp_values if(log_transformed):", "region: List containing the region to be plotted ([<chrom>, <start>,", "minimal extension of barplot extension=None if(len(gene_mid_points) <= 1): extension=region[2]-region[1] else:", "int(interval[2]) ploidy_dev = float(interval[3]) tcn = float(interval[4]) if(tcn < -1.*max_dev):", ":rtype: :class:`matplotlib.axes._subplots.AxesSubplot` ''' standard_colors = [\"#66c2a5\", \"#fc8d62\", \"#8da0cb\", \"#ec87c2\", \"#a6d854\",", "end_r: int :param color: Color of the arc, defaults to", ":type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Dictionary with keys = names", "\"Series\"): exp_values = list(exp_values) else: exp_values = list(exp_values.iloc[0, :]) expression_values", "divisor = None if(scale == \"Mb\"): digits_to_round = int(6-np.log10(tick_size)) divisor", "plt.yticks([], []) if(loc_coordinates == \"up\"): plt.ylim([-.1, .8]) else: plt.ylim([-1.5, .3])", "None. Number of ids must be the same as the", "rect = Rectangle((segment_start, 0), segment_end-segment_start, 1, color=color) ax.add_patch(rect) patches_dict[segment_type] =", "ax.xaxis.set_major_formatter(ticker.FixedFormatter( ([ \" \" for i in gene_names_clean]))) for tick", "[left_border + extension/2.] gene_names_clean += [gene_name] exp_values = expression_df.loc[gene_name, groups[g]]", "plotting samples expression. The number of colors must be the", "translocation. :type TX_pos: int :param direction: Direction of the genomic", "region_bed: :class:`pybedtools.BedTool` :param blacklist: List of gene names, for genes", "groups[g]] if(type(exp_values).__name__ == \"Series\"): exp_values = list(exp_values) else: exp_values =", "[0.3, 0.3], linestyle=\"-\", color=color, linewidth=1) if(revert_coordinates): ticks = [ start", "chrom: str :param start: Start position of region to be", "[]) plt.yticks([], []) def readACESeqAsBed(input_filename): '''Function that reads CNVs from", ":type r_end: int :param ax: Axis of plot :type ax:", "TX_start = TX_pos TX_end = end_r if(direction == \"left\"): TX_start", "heights = [] offset = merge*offset left = left_merged height", "position :type regions: iterator :param start: Start position of the", "if ax is not None else plt.gca() c = 0", "else \"NA\" for i in binned_meth_calls ] binned_average_meth_no_missing = []", "[] for signal in chip_signals: start = int(signal[1]) end =", "representing the regions to be plotted, defaults to \"#cbebc4\". :type", "segment in segments_list: segment_start = int(segment[1]) segment_end = int(segment[2]) color", ".3, \"alpha\":alpha} flierprops = {\"color\": \"k\"} medianprops = {\"color\": \"k\",", "ENSEMBL GENE IDs, and values: HUGO GENE SYMBOLs. :type gene_names_map:", "is \"#80b1d3\". :type color_plus: str, optional. :param color_minus: Color code", "to None. Number of ids must be the same as", "to decreasing order. Else, coordinates stay in increasing order, defaults", "start_r: int :param end_r: End position of the region to", "int(region_bed[0][1]) region_border_down = int(region_bed[0][2]) region_size = region_border_down-region_border_up color_forward = color_plus", "4], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [5, 5], color=color_threshold, linestyle=\"--\",", "if ax is not None else plt.gca() tick_size = 10**math.ceil((np.log10((end-start)/10)))", "\"\"\"Function for plotting gene structures, i.e. introns exons of genes.", "else plt.gca() # Calculate midpoints of original and distance equalized", "to be plotted. :type start_r: int :param end_r: Chromosomal end", "HUGO GENE SYMBOLs. :type gene_names_map: dict. :param blacklist: Set containing", "object containing the expression values of all samples (columns: sample", ":type region_bed: :class:`pybedtools.BedTool` :param blacklist: List of gene names, for", "[left[i]] if(not i % merge == 0): left_merged += [lefts[0]]", "(j*segment_size-i*segment_size)/2.) vertices = [(midpoint[0]-segment_size/2., midpoint[1]), (midpoint[0], midpoint[1]-segment_size/2.), (midpoint[0]+segment_size/2., midpoint[1]), (midpoint[0],", "contacts were called. :type segment_size: int :param cmap: Name of", "str(e[3]) if(not gene_names_map is None): gene_names += [gene_names_map[gene_name_ens]] else: gene_names", "5): plt.plot([ (float(m[1])+float(m[2]))/2. for m in meth_calls ], [ float(m[3])/(float(m[3])+float(m[4]))", "Path.LINETO] vertices = [(region_mid_point, 1), (region_mid_point, .8), (equalized_region_mid_point, .2), (equalized_region_mid_point,", ":param cmap: Name of the colormap to be used for", "plt.xlim(int(start), int(end)) plt.ylim(0, 1) plt.yticks([], []) return patches_dict def plotCNVs(cnvs_bed,", "[] current_tick = first_tick while(current_tick <= end): ticks += [current_tick]", "str, optional :param ax: Axis used for plotting. :type ax:", "gene_end = int(interval[2]) for i in range(max_y_pos+1): if(i == 0", "= [] gene_names_clean = [] counter=0 patch_saved = False for", "plt.gca() genes_in_region = genes_bed exons_in_region = exons_bed introns_in_region = introns_bed", "If location == \"top\", the pyramid points upwards, else if", ":param groups: List of lists containing the IDs of the", "region to be plotted ([<chrom>, <start>, <end>]). :type region: list", "plotting, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Nothing", "for i in ticks ] if(loc_coordinates == \"up\"): plt.plot([start, end],", "be merged. If this value is not equal to 0,", "of the genomic scales elements, defaults to \"k\". :type color:", "= float(line.rstrip().split(\":\")[1]) print(ploidy) if(line[0] == \"#\" or line[:5] == \"chrom\"):", "for idx in np.argsort([i[1] for i in genes_bed])] genes_sorted_bed =", "3, 4], [\"0\", \"1\", \"2\", \"3\", \"4\"], size=6) elif(ploidy ==", ":type end_r: int :param color: Color of the arc, defaults", "[chrom, split_line[1], split_line[2], str(ploidy_dev), split_line[5], \"+\"] ] input_file.close() return pybedtools.BedTool(\"\\n\".join([\"\\t\".join(e)", "rect = Rectangle((current_start, .5), current_end-current_start, 1, color=color, edgecolor='none', capstyle='butt', linewidth=0)", "ax if ax is not None else plt.gca() genes_in_region =", "plotting CNVs, defaults to \"bwr\". :type cmap: str, optional :param", "plots methylation values as dot plots. :param meth_calls: Iterator containing", "set, optional :param ax: Axis used for plotting, defaults to", "GENE SYMBOLs. :type gene_names_map: dict. :param expression_df: class:`pandas.DataFrame` object containing", "standard_colors[g] bplot[\"boxes\"][0].set_facecolor(color) if(plot_points): x_positions = [ (bplot_pos+ (i-.5)* ((2*extension)/(float(n_groups)*3))) for", "SYMBOLs. :type gene_names_map: dict. :param blacklist: Set containing gene ids", "and converts them to pybedtools.BedTool object :param input_filename: Full path", "0), bin_size, 1, color=m.to_rgba(binned_average_meth[cbin])) ax.add_patch(rect) plt.xlim([start, end]) plt.ylim([0, 1]) plt.xticks([],", "edge. If False, no edge is plotted, defaults to False.", "int(region_bed[0][2]) region_size = region_border_down-region_border_up color_forward = color_plus color_reverse = color_minus", "from ACESeq :rtype: :class:`pybedtools.BedTool` ''' input_file = open(input_filename, \"r\") cnv_bed_list", "0 for element in methylation_bed: # Determine bin position =", "contact_map_index1:contact_map_index2] if(vmin is None): vmin = 0 if(vmax is None):", "= [] lefts = [] for i in range(len(left)): if(i", "in ticks ] ticks.reverse() tick_labels.reverse() print(tick_labels) for i in range(len(ticks)):", "= ax if ax is not None else plt.gca() c", "containing list-like elements with the following entries: 1. Chromsome 2.", "if(tcn < -1.*max_dev): tcn = -1.*max_dev elif(tcn > max_dev): tcn", "position of genes. :param genes_bed: :class:`pybedtools.BedTool` object containing TXstart, and", "\"#ffd92f\", \"#e5c494\", \"#bbbbbb\"] ax = ax if ax is not", "revert_coordinates: If True, coordinates are reverted to decreasing order. Else,", "y, gene_name_label, size=5, color = color) plt.xlim([region_border_up, region_border_down]) plt.ylim([0, max_y_pos+1.5])", "np import tabix import matplotlib.ticker as ticker from matplotlib.patches import", "0 else \"NA\" for i in binned_meth_calls ] binned_average_meth_no_missing =", "signal in chip_signals: start = int(signal[1]) end = int(signal[2]) value", "to be plotted as bar :type chip_signals: iterator :param r_chrom:", "ticks.reverse() tick_labels.reverse() print(tick_labels) for i in range(len(ticks)): if(loc_coordinates == \"up\"):", ":param genes_bed: :class:`pybedtools.BedTool` object containing TXstart, and TXend of genes.", "Nothing to be returned :rtype: None ''' ax = ax", "edgecolor=False, alpha=1, ax = None): '''Functions that plots genomic regions", "chrom: Chromosome of the region to be plotted. :type chrom:", "in meth_calls], color=color, marker=\".\", linestyle='None', markersize=1, alpha=.5) elif(n_entries == 4):", "plus or minus stranded genes is plotted, False otherwise. Default", "True Copy Number) :type cnvs_bed: :class:`pybedtools.BedTool` :param chromosome: Chromosome for", "position of the region to be plotted. :type start: str", "height = [] for signal in chip_signals: start = int(signal[1])", "Minimal distance between two genes, as ratio of ax width,", "+= n_unmeth binned_average_meth = [ float(i[0])/(float(i[0])+float(i[1])) if (float(i[0])+float(i[1])) > 0", ".8]) else: plt.ylim([-1.5, .3]) plt.xticks([], []) ax.spines[\"bottom\"].set_visible(False) ax.spines[\"top\"].set_visible(False) ax.spines[\"left\"].set_visible(False) ax.spines[\"right\"].set_visible(False)", "= int(e[1])+(int(e[2])-int(e[1]))/2 link_pos2 = int(e[4])+(int(e[5])-int(e[4]))/2 distance = abs(link_pos2-link_pos1) if(distance >", "mapping. :rtype: dictionary ''' gene_name_mapping_file = open(gene_name_mapping_filename, \"r\") gene_map =", ":param max_dev: Maximal deviation from ploidy to plot, defaults to", ":type chip_signals: iterator :param r_chrom: Chromosome of region to be", "= [] for i in range(1, n_segments+1): equalized_region_mid_points += [((start+", "plotted. :type motifs_bed: :class:`pybedtools.BedTool` :param start: Start position of the", ".5), current_end-current_start, 1, color=color, edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect) plt.xlim([int(start), int(end)])", "= Path(vertices, codes) path_patch = PathPatch(path, facecolor=\"none\", edgecolor=color, linewidth=.5) ax.add_patch(path_patch)", "Rectangle((segment_start, 0), segment_end-segment_start, 1, color=color) ax.add_patch(rect) patches_dict[segment_type] = rect plt.xlim(int(start),", "= ax if ax is not None else plt.gca() genes_in_region", "int(e[2])]] region_right_border = int(region_bed[0][2]) region_left_border = int(region_bed[0][1]) # Determine minimal", "region scale retaining the position of genes. :param genes_bed: :class:`pybedtools.BedTool`", "current_extension = right_border-left_border if(current_extension == 0.): continue if(extension is None):", "for plotting gene structures, i.e. introns exons of genes. :param", "Matrix that contains the intensity values of HiC contacts. :type", "= left_border + 3*(extension/4.) tick_positions += [left_border + extension/2.] gene_names_clean", "patch_artist=True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False) color = None", "CNV segments :param cnvs_bed: :class:`pybedtools.BedTool` object containing CNVs with following", "else: exp_values_g1 = list(exp_values_g1.iloc[0, :]) exp_values_g2 = expression_df_g2.loc[gene_name, :] if(type(exp_values_g2).__name__", "= [] height_merged += [np.mean(heights)] heights = [] offset =", "for plotting, non-transformed values otherwise. :type log_transformed: bool, optional :param", "defaults to \"up\". :type loc_coordinates: str, optional :param revert_coordinates: If", ":type colors: str, optional :param ids: IDs used for legend", "ploidy_dev = float(split_line[5])-ploidy chrom = split_line[0] if(chrom == \"23\"): chrom=\"X\"", "angle of coordinate strings, defaults to 0. :type rotation: int,", "def createGeneNameMap(gene_name_mapping_filename): '''Function that creates a mapping between gene ids", ".4, color=color, capstyle='butt', linewidth=0) ax.add_patch(rect) patch_list = [] patch_description_list =", "optional :param plot_legend: If True, a legend describing plus or", "i in range(len(gene_regions)): if(not blacklist is None and gene_names[i] in", "be plotted. :type end_r: int :param TX_pos: Position of the", "0) def distanceEqualizer(genomic_segments, start, end, direction=\"top_down\", color=\"k\", ax = None):", "top, :type location: str, optional :param ax: Axis on which", "stacked. :type distance_ratio: float :return: Tuple of 1. max_y_pos: Defines", "of region edge. If False, no edge is plotted, defaults", "genes_bed])] genes_sorted_bed = [genes_bed[i] for i in sort_indices] y_pos_dict =", "linestyle=\"-\", color=color, linewidth=1) plt.text(ticks[i], -.1, tick_labels[i], horizontalalignment=\"center\", fontsize=5, color=color, verticalalignment=\"top\",", "and regions genes_in_region_bed = genes_bed.intersect(region_bed, wa=True, u=True).sort() gene_names = []", "different groups. :type groups: list :param gene_names_map: Dictionary with keys:", "to be plotted. :type end: int :param bin_size: size of", "tcn = -1.*max_dev elif(tcn > max_dev): tcn = max_dev color", ":param y_max: Max y value in the gene plot. If", "ids not to be plotted, defaults to None, :type blacklist:", "ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Dictionary with keys = names of", "to None, :type blacklist: set, optional :param ax: (default: None)", "0)] codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3] path = Path(vertices, codes)", "values, defaults to 1000. :type bin_size: int, optional :param ax:", "3. End Position, 4. Deviation from ploidy, 5. True Copy", "= [] patch_description_list = [] tick_positions = [] gene_names_clean =", "dictionary ''' gene_name_mapping_file = open(gene_name_mapping_filename, \"r\") gene_map = {} for", "plot ticks to lower direction, defaults to \"up\". :type loc_coordinates:", "optional. :param color_minus: Color code for minus stranded genes, default", "\"#fc8d62\", \"#8da0cb\", \"#ec87c2\", \"#a6d854\", \"#ffd92f\", \"#e5c494\", \"#bbbbbb\"] ax = ax", "(1,1,1,1) rect = Rectangle((segment_start, 0), segment_end-segment_start, 1, color=color) ax.add_patch(rect) patches_dict[segment_type]", "elif(tcn > max_dev): tcn = max_dev color = colors((ploidy_dev+max_dev)/(2*max_dev)) if(abs(ploidy_dev)", "defaults to \"bwr\". :type cmap: str, optional :param max_dev: Maximal", "counter=0 for gene_name in gene_names: left_border = gene_mid_points[counter]-extension/2 right_border =", "plot_legend: bool, optional :param legend_loc: Location of the legend. Either", "ids: list, optional. :param plot_gene_names: True if gene names shall", "if(not edgecolor): current_color = color rect = Rectangle([int(region[1]), -.75], int(region[2])-int(region[1]),", "> 0 else \"NA\" for i in binned_meth_calls ] binned_average_meth_no_missing", "is translocated. Either of \"left\" (upstream), or \"right\" (downstream), defaults", "rect = Rectangle((current_start, tcn-.1), current_end-current_start, .2, color=color, edgecolor='none', capstyle='butt', linewidth=0)", "= ax if ax is not None else plt.gca() contact_map_index1", "rect = Rectangle([int(region[1]), -.75], int(region[2])-int(region[1]), 1.5, facecolor=current_color, edgecolor='none', alpha=alpha) c", "left\", \"lower right\", \"upper left\", \"upper right\", default is \"lower", "None else plt.gca() tick_size = 10**math.ceil((np.log10((end-start)/10))) if(not upper): tick_size =", "from ploidy to be considered as a CNV, defaults to", "region to be plotted. :type start: int :param end: Chromosomal", "in range(len(binned_average_meth)): rect = Rectangle((start+cbin*bin_size, 0), bin_size, 1, color=m.to_rgba(binned_average_meth[cbin])) ax.add_patch(rect)", "+ extension/4. bplot_g2_pos = left_border + 3*(extension/4.) tick_positions += [left_border", "of max_y_pos+1.5, patch_list, patch_description_list, where 1. max_y_pos+1.5 is the max_y_position", "strand\"] met_forward = True elif(strand == \"-\" and not(met_reverse)): patch_list", "= [int(idx) for idx in np.argsort([i[1] for i in genes_bed])]", "== 0 and not max_y_pos in y_level_dict): y_pos_dict[gene_name] = i", "a ensemble gene id, and the second column is the", "end) if(location == \"top\"): ax.set_ylim(0, (end-start)/2.) else: ax.set_ylim(-1.*(end-start)/2., 0) def", "int((position-start)/bin_size) counter += 1 binned_meth_calls[current_bin][0] += n_meth binned_meth_calls[current_bin][1] += n_unmeth", "is underwent, the genes will be stacked, default is 0.1.", "upper=True, loc_coordinates=\"up\", revert_coordinates=False, rotation=0): '''Function that plots genomic coordinates in", "to \"k\". :type color: str, optional. :param ax: Axis where", "Chromosomal end position of region to be plotted. :type end:", "of genes for plotting max_y_pos, y_pos_dict = determineYPosGene(genes_in_region, (region_border_down- region_border_up),", "Define color for gene plotting strand = str(i[5]) color =", "on a gene region scale retaining the position of genes.", "> max_signal): max_signal = value if(not offset is None): end", "ax.add_patch(patch) if(strand == \"+\" and not(met_forward)): patch_list += [patch] patch_description_list", "blacklist=None, ax=None, plot_legend=False, colors=None, ids=None, plot_gene_names=True, position_gene_names=\"bottom\", log_transformed=True, plot_points=False, alpha=.5):", "are reverted to decreasing order. Else, coordinates stay in increasing", "] plt.plot(x_positions, expression_values, \"k.\", markersize=3) g_id = None if(not ids", "color: Color of the arc, defaults to \"k\". :type color:", "marker=\".\", linestyle='None', markersize=1, alpha=.5) elif(n_entries == 4): plt.plot([ (float(m[1])+float(m[2]))/2. for", "{} for line in gene_name_mapping_file: split_line = line.rstrip().split(\"\\t\") ensembl_gene_id =", "region: list :param groups: List of lists containing the IDs", "equalized_region_size/2)] region_mid_points = [] for e in genomic_segments: if(int(e[1]) <", "be plotted. :type start_r: int :param end_r: Chromosomal end positiont", "ax: Axis on which to plot, defaults to None. :type", "([ \" \" for i in gene_names_clean]))) for tick in", "translocated. Either of \"left\" (upstream), or \"right\" (downstream), defaults to", "<start>, <end>]). :type region: list :param groups: List of lists", "object containing exons of genes. :type exons_bed: :class:`pybedtools.BedTool` :param introns_bed:", "for e in links_bed: link_pos1 = int(e[1])+(int(e[2])-int(e[1]))/2 link_pos2 = int(e[4])+(int(e[5])-int(e[4]))/2", "1. else 1. for i in exp_values_g2])], positions=[bplot_g2_pos], widths=extension/2., patch_artist", "deviation from ploidy to plot, defaults to None. :type max_dev:", "float, optional :param ax: Axis used for plotting, defaults to", "None): max_dev = max([abs(float(i[3])) for i in cnvs_bed]) for interval", "tcn = max_dev color = colors((ploidy_dev+max_dev)/(2*max_dev)) if(abs(ploidy_dev) < cnv_threshold): color=colors(.5)", "facecolor=color, length_includes_head=True) plt.xlim([start, end]) plt.ylim([0.4, 0.6]) def plotHiCContactMap(contact_map, start, end,", "extension/2.] gene_names_clean += [gene_name] exp_values = expression_df.loc[gene_name, groups[g]] if(type(exp_values).__name__ ==", "g2_id] counter += 1 ax.set_xlim(region_left_border, region_right_border) ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions))) ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean))) if(not plot_gene_names):", "of region to be plotted. :type end: int :param segment_size:", "Axis on which to plot, defaults to None. :type ax:", "from matplotlib.patches import PathPatch import matplotlib.cm as cm import matplotlib", "boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False) bplot_g1[\"boxes\"][0].set_facecolor(color_g1) bplot_g2[\"boxes\"][0].set_facecolor(color_g2) if(not patch_saved):", "expression_values, \"k.\", markersize=3) g_id = None if(not ids is None):", "[2, 2], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [3, 3], color=color_threshold,", "return max_y_pos, y_pos_dict def createGeneNameMap(gene_name_mapping_filename): '''Function that creates a mapping", "if(not offset is None): end = start + offset left", "[current_tick] current_tick = current_tick + tick_size scale = None if(first_tick", ":type region_size: int :param distance_ratio: Minimal distance between two genes,", "end = start + offset left += [start] height +=", "Following fields must be included: Chrom, Start, End, Methylated Cs,", "for i in sort_indices] y_pos_dict = {} y_level_dict = {}", "stranded TF regions, defaults to \"#80b1d3\". :type color_plus: str, optional", "containing the expression values of all samples (columns: sample ids;", ":param start: Start position of region to be plotted. :type", ".4, tick_labels[i], horizontalalignment=\"center\", verticalalignment=\"bottom\", fontsize=5, color=color, rotation=rotation) else: plt.plot([ticks[i], ticks[i]],", "'''Function that plots links between genomic regions as arcs. :param", "= -1.*max_dev elif(tcn > max_dev): tcn = max_dev color =", "if(n_segments > 0): equalized_region_size=(end-start)/n_segments equalized_region_mid_points = [] for i in", "cnv_bed_list = [] ploidy = None for line in input_file:", ":type expression_df_g2: :class:`pandas.DataFrame` :param gene_names_map: Dictionary with keys: ENSEMBL GENE", "[value] left_merged = [] height_merged = [] if(not merge is", "of genes. :param genes_bed: :class:`pybedtools.BedTool` object containing TXstart, and TXend", "legend plotting, defaults to \"tumor\". :type g1_id: str, optional :param", "genomic_segments: list :param start: Start position of the genomic region.", "as pnd import numpy as np import tabix import matplotlib.ticker", "\\ of gene. :rtype: tuple ''' sort_indices = [int(idx) for", "border_distance_down = region_border_down-start if(start < region_border_up): start = region_border_up border_distance_down", "= TX_pos TX_end = end_r if(direction == \"left\"): TX_start =", ":type bin_size: int, optional :param ax: Axis to be used", "genes will be stacked, default is 0.1. :type distance_ratio: float,", "end: int :param head_width: Width of the arrow head as", "column is the HUGO gene name :type gene_name_mapping_file: str :return:", "bplot_g2[\"boxes\"][0].set_facecolor(color_g2) if(not patch_saved): patch_saved=True patch_list = [bplot_g1[\"boxes\"][0], bplot_g2[\"boxes\"][0]] patch_description_list =", "cmap: Colormap used for plotting CNVs, defaults to \"bwr\". :type", "entries: 1. Chromosome, 2. Start Position, 3. End Position, 4.", "= len(groups) for g in range(n_groups): bplot_pos = left_border +", "and not max_y_pos in y_level_dict): y_pos_dict[gene_name] = i y_level_dict[i] =", "1. max_y_pos+1.5 is the max_y_position + 1.5. max_y_pos defines the", "color_neutral=\"k\", ax=None): '''Function for plotting CNV segments :param cnvs_bed: :class:`pybedtools.BedTool`", "+= [value] left_merged = [] height_merged = [] if(not merge", "or position > end): continue n_meth = int(element[3]) n_unmeth =", "region_left_border = int(region_bed[0][1]) # Determine minimal extension of barplot extension=None", "line in input_file: if(line[:7] == \"#ploidy\"): ploidy = float(line.rstrip().split(\":\")[1]) print(ploidy)", "not(met_reverse)): patch_list += [patch] patch_description_list += [\"reverse strand\"] met_reverse =", "to None. :type colors: str, optional :param ids: IDs used", "optional :param revert_coordinates: If True, coordinates are reverted to decreasing", "blacklist): counter += 1 continue if(counter < len(gene_names)-1): right_border =", "on the ax. :rtype: list \"\"\" ax = ax if", "file :type input_filename: str :return: :class:`pybedtools.BedTool` object containing CNVs from", "methylated cytosines 5. Number unmethylated cytosines Or 1. Chromsome 2.", "(end-start)/2.) else: ax.set_ylim(-1.*(end-start)/2., 0) def distanceEqualizer(genomic_segments, start, end, direction=\"top_down\", color=\"k\",", "region edge. If False, no edge is plotted, defaults to", "the region to be plotted. :type start: int :param end:", "gene plotting via function plotGenes. :param genes_bed: :class:`pybedtools.BedTool` object containing", "blacklist is None and gene_name in blacklist): counter += 1", "facecolor = \"None\", edgecolor = color, lw = lw) ax.add_patch(patch)", "plt.ylim([0, 6.5]) plt.yticks([0, 2, 4, 6], [\"0\", \"2\", \"4\", \"6\"],", "str :param start: Start position of the region to be", "color_plus color_reverse = color_minus max_y_pos = None if(not len(genes_in_region) ==", "color_loss: Plot color of copy number losses, defaults to \"r\".", "with the following entries: 1. Chromosome 2. Start position 3.", "if(value > max_signal): max_signal = value if(not offset is None):", "midpoint = (i*segment_size+(j*segment_size-i*segment_size)/2., (j*segment_size-i*segment_size)/2.) vertices = [(midpoint[0]-segment_size/2., midpoint[1]), (midpoint[0], midpoint[1]-segment_size/2.),", "import numpy as np import tabix import matplotlib.ticker as ticker", "{} max_y_pos = 0 for interval in genes_sorted_bed: gene_name =", "genomic_segments: List of segments for which distances shall be equalized", "max_dist = 0 for e in links_bed: link_pos1 = int(e[1])+(int(e[2])-int(e[1]))/2", "plot :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param color: color of bars,", "color=\"k\", ax=None): '''Function that plots a translocation event as a", ":type contact_map: :class:`pandas.DataFrame` :param start: Chromosomal start position of region", "position_gene_names=\"bottom\", log_transformed=True, plot_points=False, alpha=.5): '''Function for plotting grouped gene expression", "gene_names = [] gene_regions = [] for e in genes_in_region_bed:", "Path.CURVE3] path = Path(vertices, codes) patch = PathPatch(path, facecolor =", "Path to tabixed bed file containing (chrom, start, end, name,", "4. Value to be plotted as bar :type chip_signals: iterator", ":param colors: List of colors used for plotting samples expression.", "make less ticks, else if False make more ticks. :type", "color_threshold=(189./255., 189./255., 189./255., 0.5) if(ploidy == 2): plt.plot([int(start), int(end)], [1,", "in y_range: # Define midpoint of rectangle midpoint = (i*segment_size+(j*segment_size-i*segment_size)/2.,", "path to ACESeq \"most_important\" file :type input_filename: str :return: :class:`pybedtools.BedTool`", "the rectangles representing the regions to be plotted, defaults to", "in meth_calls], color=color, marker=\".\", linestyle='None', markersize=1, alpha=.5) plt.ylim([0, 1]) plt.xticks([],", "optional :param alpha: Alpha value for the background color of", "(link_pos1-link_pos2)/2 vertices = [(link_pos1, 0), (mid_point, distance), (link_pos2, 0)] codes", "= standard_colors[g] bplot[\"boxes\"][0].set_facecolor(color) if(plot_points): x_positions = [ (bplot_pos+ (i-.5)* ((2*extension)/(float(n_groups)*3)))", "the different groups. :type groups: list :param gene_names_map: Dictionary with", "Maximal value of intensity range to be plotted, defaults to", "be plotted, defaults to None :type vmin: float, optional :param", "if(loc_coordinates == \"up\"): plt.ylim([-.1, .8]) else: plt.ylim([-1.5, .3]) plt.xticks([], [])", "to 2. :type ploidy: int, optional :param cnv_threshold: Minimal deviation", ":type ids: list, optional. :param plot_gene_names: True if gene names", "end_r, lw=1, color=\"k\", ax = None): '''Function that plots links", "Rectangle((current_start, tcn-.2), current_end-current_start, .4, color=color, edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect) else:", "region_mid_point = region_mid_points[i] equalized_region_mid_point = equalized_region_mid_points[i] codes = [] vertices", "linewidth=.5) plt.plot([int(start), int(end)], [6, 6], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.xlim([int(start), int(end)])", "boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False) bplot_g2 = ax.boxplot([np.log2([i if", "rect = Rectangle((start+cbin*bin_size, 0), bin_size, 1, color=m.to_rgba(binned_average_meth[cbin])) ax.add_patch(rect) plt.xlim([start, end])", "gene_names: left_border = gene_regions[counter][0] right_border = region_right_border if(not blacklist is", "int, optional :return: Nothing to be returned. :rtype: None '''", "keys = names of segments, and values patch :rtype: dict", "= [] if(not merge is None): heights = [] lefts", "contact_map_index2) if location == \"top\" else range(contact_map_index1, contact_map_index2-(contact_map_index2-i))) for j", "\"linewidth\": .3} capprops={\"color\": \"k\", \"linewidth\": .3} patch_list = None patch_description_list", "int :param direction: Direction of the genomic part that is", "cnvs_bed: current_start = int(interval[1]) current_end = int(interval[2]) ploidy_dev = float(interval[3])", "+= 1 ax.set_xlim(region_left_border, region_right_border) ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions))) ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean))) if(not plot_gene_names): ax.xaxis.set_major_formatter( ticker.FixedFormatter(([", "in range(len(left)): if(i % merge == 0 and not (i", "optional :param ax: Axis to be used for plotting, defaults", "i in introns_in_region: start = int(i[1]) end = int(i[2]) gene_name", "for the background color of the boxplots boxes, defaults to", "Size of the segments for which contacts were called. :type", "= ploidy color = color_neutral if(ploidy_dev >= cnv_threshold): color=color_gain elif(ploidy_dev", "meth_calls ], [ float(m[3])/(float(m[3])+float(m[4])) if not(float(m[3])+float(m[4]) == 0.) else 0.", "None): '''Function that plots links between genomic regions as arcs.", "of the region to be plotted. :type start_r: int :param", "plt.ylim([0, 4.5]) plt.yticks([0, 1, 2, 3, 4], [\"0\", \"1\", \"2\",", "sort_indices] y_pos_dict = {} y_level_dict = {} max_y_pos = 0", "ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param plot_legend: If True plot legend, False", ":return: Nothing to be returned :rtype: None ''' ax =", ":class:`pybedtools.BedTool` object containing CNVs from ACESeq :rtype: :class:`pybedtools.BedTool` ''' input_file", "= (binned_average_meth[i+1] if not i == len(binned_average_meth)-1 else \"NA\") average_list", "color: Color of lines equalizing distances, defaults to \"k\". :type", "widths=extension/float(n_groups), patch_artist=True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False) color =", "for i in range(1, n_segments+1): equalized_region_mid_points += [((start+ i*equalized_region_size)- equalized_region_size/2)]", "tabix import matplotlib.ticker as ticker from matplotlib.patches import Rectangle from", "[] counter=0 for gene_name in gene_names: left_border = gene_mid_points[counter]-extension/2 right_border", "in ax.get_xticklabels(): tick.set_rotation(45) tick.set_size(5) for ytick in ax.get_yticklabels(): ytick.set_size(5) if(plot_legend):", "binned_average_meth = binned_average_meth_no_missing # Plot average methylation values per bin", "colors used for plotting samples expression. The number of colors", "y_pos_dict[gene_name] = i y_level_dict[i] = [[gene_start, gene_end]] break elif(gene_start >", "defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Dictionary with", "color=color, capstyle='butt', linewidth=0) ax.add_patch(rect) patch_list = [] patch_description_list = []", "str, optional :param max_dev: Maximal deviation from ploidy to plot,", "r_start, r_end, ax=None, color=\"b\", offset=None, merge=None): '''Function that plots bedGraph", "arrow_start = motif_start arrow_end = motif_end color=color_plus dx = head_length", "Defaults to 0. :type overhang: float, optional :param color_plus: Color", "underwent, the genes will be stacked, default is 0.1. :type", "representing methylation values, defaults to \"k\". :type color: str, optional", "for legend plotting, defaults to \"normal\". :type g2_id: str, optional", "for m in meth_calls ], [ float(m[3])/(float(m[3])+float(m[4])) if not(float(m[3])+float(m[4]) ==", "float(m[3])/(float(m[3])+float(m[4])) if not(float(m[3])+float(m[4]) == 0.) else 0. for m in", "of region to be plotted. :type r_end: int :param ax:", "[\"0\", \"2\", \"4\", \"6\"], size=6) plt.xticks(rotation=45) def plotCNVsHeat(cnvs_bed, chromosome, start,", "end of genes. :type genes_bed: :class:`pybedtools.BedTool` :param exons_bed: :class:`pybedtools.BedTool` object", "the regions to be plotted, defaults to \"#cbebc4\". :type color:", "plotting ax = ax if ax is not None else", "on a gene region scale equalizing the position of genes.", "as dot plots. :param meth_calls: Iterator containing list-like elements with", "cmap=\"Greys\", vmin=None, vmax=None, location=\"top\", ax=None): '''Function that plots HiC contact", "str(motif[3]) arrow_start = motif_start arrow_end = motif_end color=color_plus dx =", "to \"k\". :type color_neutral: str, optional :param ax: Axis used", "plotted. :type genes_bed: :class:`pybedtools.BedTool` :param region_size: Size of region to", "copy number neutral regions, defaults to \"k\". :type color_neutral: str,", "patch_saved): patch_saved=True patch_list = [bplot_g1[\"boxes\"][0], bplot_g2[\"boxes\"][0]] patch_description_list = [g1_id, g2_id]", "to \"#fb8072\". :type color_minus: str, optional :param ax: Axis on", "color field is used to determine the color for plotting", "in different colors :param segments_tabix_filename: Path to tabixed bed file", "is 0.1. :type distance_ratio: float, optional :param ax: Axes instance", "tcn, if ploidy_dev is smaller than cnv_threshold if(abs(ploidy_dev) < cnv_threshold):", "for i in range(len(left)): if(i % merge == 0 and", "= color_forward if(strand == \"-\"): color = color_reverse border_distance_down =", "Chromosome for which to plot CNVs. :type chromosome: str :param", "containing center positions of genes. :type gene_mid_points: list :param region:", "None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Dictionary with keys =", "bedGraph like iterators. :param chip_signals: Iterator for which each element", "optional :param y_max: Max y value in the gene plot.", "color = color, edgecolor = color) plt.xlim(r_start, r_end) def plotMethylationProfileHeat(methylation_bed,", "genes. 2. patch_list is the list of patches drawn on", "ax=None, plot_legend=False, colors=None, ids=None, plot_gene_names=True, position_gene_names=\"bottom\", log_transformed=True, plot_points=False, alpha=.5): '''Function", "= left_border + (2*g+1)*extension/float((n_groups*2.)) tick_positions += [left_border + extension/2.] gene_names_clean", "to \"up\". :type loc_coordinates: str, optional :param revert_coordinates: If True,", "\"k\". :type color_neutral: str, optional :param ax: Axis used for", "= color) gene_name = str(i[3]) gene_name_label = gene_name if(not gene_map", "in blacklist): counter += 1 continue if(counter < len(gene_names)-1): right_border", ":type alpha: float, optional :return: Plots axis. :rtype: :class:`matplotlib.axes._subplots.AxesSubplot` '''", "Rectangle((start+cbin*bin_size, 0), bin_size, 1, color=m.to_rgba(binned_average_meth[cbin])) ax.add_patch(rect) plt.xlim([start, end]) plt.ylim([0, 1])", "bplot_g1 = ax.boxplot([np.log2([i if i >= 1. else 1. for", "region_mid_points = [] for e in genomic_segments: if(int(e[1]) < start):", ":class:`pybedtools.BedTool` object containing exons of genes. :type exons_bed: :class:`pybedtools.BedTool` :param", "c += 1 ax.add_patch(rect) plt.xticks([], []) plt.yticks([], []) plt.xlim([start, end])", "average methylation values, defaults to 1000. :type bin_size: int, optional", "Dictionary with keys: ENSEMBL GENE IDs, and values: HUGO GENE", "will be stacked, default is 0.1. :type distance_ratio: float, optional", "True # Plot Gene Names if(plot_gene_ids): for i in genes_in_region:", "a bar, showing the part of the genome that is", "0): # Determine y positions of genes for plotting max_y_pos,", "is the max_y_position + 1.5. max_y_pos defines the \\ number", "ratio of ax width, such that two genes are plotted", "ax.get_xticklabels(): tick.set_rotation(45) tick.set_size(6) for ytick in ax.get_yticklabels(): ytick.set_size(6) if(plot_legend): ax.legend(patch_list,", "if(ploidy == 2): plt.plot([int(start), int(end)], [1, 1], color=color_threshold, linestyle=\"--\", linewidth=.5)", ":param upper: If True, make less ticks, else if False", "n = len(binned_average_meth) for i in range(n): if(not binned_average_meth[i] ==", "= [] for e in genes_in_region_bed: gene_name_ens = str(e[3]) gene_names", "patch_description_list += [\"forward strand\"] met_forward = True elif(strand == \"-\"", "else plt.gca() for motif in motifs_bed: motif_start = int(motif[1]) motif_end", "lists with the following elements: 1. Chromosome region1 2. Start", "= \"None\", edgecolor = color, lw = lw) ax.add_patch(patch) #ax.spines[\"bottom\"].set_visible(False)", "gene_name in gene_names: left_border = gene_mid_points[counter]-extension/2 right_border = gene_mid_points[counter]+extension/2 if(not", "to be plotted :type region_bed: :class:`pybedtools.BedTool` :param expression_df_g1: :class:`pandas.Dataframe` containing", "and values patch :rtype: dict ''' ax = ax if", "None. :type vmax: float, optional :param location: Either of \"top\"", "than one. Defaults to 0. :type overhang: float, optional :param", "met_reverse = True # Plot Gene Names if(plot_gene_ids): for i", "= exons_bed introns_in_region = introns_bed region_border_up = int(region_bed[0][1]) region_border_down =", "continue split_line = line.rstrip().split(\"\\t\") ploidy_dev = float(split_line[5])-ploidy chrom = split_line[0]", "color: color of bars, defaults to \"b\". :type color: str,", "patch = PathPatch(path, facecolor = \"None\", edgecolor = color, lw", "plt.plot([ (float(m[1])+float(m[2]))/2. for m in meth_calls ], [ float(m[3])/(float(m[3])+float(m[4])) if", "int :param TX_pos: Position of the translocation. :type TX_pos: int", "position of the region to be plotted. :type end: str", "TF motifs as arrows, indicating their directionality. :param motifs_bed: :class:`pybedtools.BedTool`", "color_minus max_y_pos = None if(not len(genes_in_region) == 0): # Determine", "plotted, defaults to \"#cbebc4\". :type color: str, optional :param edge_color:", "None. :type colors: str, optional :param ids: IDs used for", "calls. Following fields must be included: Chrom, Start, End, Methylated", "and distance equalized segments n_segments = len(genomic_segments) equalized_region_size = (end-start)", "if(distance > max_dist): max_dist = distance mid_point = link_pos1 +", "(equalized_region_mid_point, 1)] path = Path(vertices, codes) path_patch = PathPatch(path, facecolor=\"none\",", "[(region_mid_point, 1), (region_mid_point, .8), (equalized_region_mid_point, .2), (equalized_region_mid_point, 0)] else: codes", "i in exons_in_region: start = int(i[1]) end = int(i[2]) gene_name", "head_width: Width of the arrow head as proportion of the", "color = color_forward if(strand == \"-\"): color = color_reverse border_distance_down", "bars, defaults to \"b\". :type color: str, optional :param offset:", "m in m in meth_calls], color=color, marker=\".\", linestyle='None', markersize=1, alpha=.5)", "Path(vertices, codes) intensity_value = contact_map.iloc[i, j] intensity_value = (intensity_value/vmax if", "of integer values containing center positions of genes. :type gene_mid_points:", "ids; index: gene ids) :type expression_df_g1: :class:`pandas.DataFrame` :param expression_df_g2: :class:`pandas.Dataframe`", "plt.ylim([0, 1]) plt.xticks([], []) plt.xlim([start, end]) def plotTX(chrom_r, start_r, end_r,", "link_pos1 + (link_pos2-link_pos1)/2 if(link_pos2 < link_pos2): mid_point = link_pos2 +", "] input_file.close() return pybedtools.BedTool(\"\\n\".join([\"\\t\".join(e) for e in cnv_bed_list]), from_string=True) def", "with the following elements: 1. Chromosome region1 2. Start region1", ":type segment_size: int :param cmap: Name of the colormap to", "whiskerprops=whiskerprops, capprops=capprops, showfliers=False) color = None if(not colors is None):", "plt.yticks([], []) plt.xlim([start, end]) plt.ylim([-1, 1]) def plotMotifDirections(motifs_bed, start, end,", "of bars, defaults to \"b\". :type color: str, optional :param", "= (1,1,1,1) rect = Rectangle((segment_start, 0), segment_end-segment_start, 1, color=color) ax.add_patch(rect)", "[] n = len(binned_average_meth) for i in range(n): if(not binned_average_meth[i]", "ytick in ax.get_yticklabels(): ytick.set_size(6) if(plot_legend): ax.legend(patch_list, patch_description_list, fontsize=5, loc='lower left')", "ax.xaxis.set_ticks_position(\"top\") ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions))) ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean))) if(not plot_gene_names): ax.xaxis.set_major_formatter(ticker.FixedFormatter( ([ \" \" for", "= region_right_border if(not blacklist is None and gene_name in blacklist):", ":param start: Start position of the genomic region. :type start:", "to \"top_down\". :type direction: str, optional. :param ax: Axis on", ":param links_bed: Iterator, that contains bed-like structured lists with the", "= introns_bed region_border_up = int(region_bed[0][1]) region_border_down = int(region_bed[0][2]) region_size =", "gene plot. If not set, then y_max is the max", "Position of the translocation. :type TX_pos: int :param direction: Direction", "color_gain=\"g\", color_loss=\"r\", color_neutral=\"k\", ax=None): '''Function for plotting CNV segments :param", "then y_max is the max number of stacked genes, default", "n_unmeth = int(element[4]) current_bin = int((position-start)/bin_size) counter += 1 binned_meth_calls[current_bin][0]", "region size from extension extension=extension-(region[2]-region[1])*.01 boxprops = {\"color\": \"k\", \"linewidth\":", "end_r]) plt.ylim([0.3, 0.7]) def plotRegions(regions, start, end, color=\"#cbebc4\", edgecolor=False, alpha=1,", "ax is not None else plt.gca() binned_meth_calls = [ [0,", "patch_description_list is the list of descriptions for the patches \\", "to be plotted. :type chrom: str :param start: Start position", "of the region to be plotted. :type end: int :param", "if not j == \"NA\" ] binned_average_meth_no_missing += [ (float(sum(average_list))/", "optional :param color: color of bars, defaults to \"b\". :type", "the legend. Either of \"lower left\", \"lower right\", \"upper left\",", "[ [0, 0] for i in range(int(((end-start)/bin_size)+1)) ] counter =", "[] ploidy = None for line in input_file: if(line[:7] ==", "g1 samples (columns: sample ids; index: gene ids) :type expression_df_g1:", "pyramid points upwards, else if location == \"bottom\" the pyramid", "plotted, default to None. :type blacklist: set, optional :param ax:", "[] heights += [height[i]] lefts += [left[i]] if(not i %", "(chrom, start, end, name, score, strand, start, end, color). The", "elements will be averaged an plotted, defaults to 0. :type", "binned_meth_calls ] binned_average_meth_no_missing = [] n = len(binned_average_meth) for i", "of region to be plotted. :type chrom: str :param start:", "ax.add_patch(rect) patches_dict[segment_type] = rect plt.xlim(int(start), int(end)) plt.ylim(0, 1) plt.yticks([], [])", "return ax def plotGenomicSegments(segments_list, chrom, start, end, ax = None):", "no points are plotted otherwise, defaults to False. :type plot_points:", "Number of elements to be merged. If this value is", "exp_values_g1 = expression_df_g1.loc[gene_name, :] if(type(exp_values_g1).__name__ == \"Series\"): exp_values_g1 = list(exp_values_g1)", "y_range = (range(contact_map_index1+(i-contact_map_index1), contact_map_index2) if location == \"top\" else range(contact_map_index1,", "else the GENE SYMBOLs are hidden. :type plot_gene_names: bool. :return:", "must be included: Chrom, Start, End, Methylated Cs, Unmethylated Cs.", "for i in exons_in_region: start = int(i[1]) end = int(i[2])", ":type cmap: str, optional :param vmin: Minimal value of intensity", "color_g1: str, optional :param color_g2: Color used for plotting g2", ":class:`matplotlib.axes._subplots.AxesSubplot`, optional :param plot_legend: If True plot legend, False otherwise,", "CNVs from ACESeq (\"*most_important*\") files and converts them to pybedtools.BedTool", "= [[gene_start, gene_end]] break else: continue return max_y_pos, y_pos_dict def", "= None tick_positions = [] gene_names_clean = [] counter=0 patch_saved", "= matplotlib.colors.Normalize(vmin=0., vmax=1.) m = matplotlib.cm.ScalarMappable(norm = norm, cmap =", "tuple ''' sort_indices = [int(idx) for idx in np.argsort([i[1] for", "1000000): scale = \"Mb\" else: scale=\"Kb\" digits_to_round = None divisor", "else plt.gca() region_bed = pybedtools.BedTool(\"\\t\".join([str(i) for i in region]), from_string=True)", "index: gene ids). :type expression_df: class:`pandas.DataFrame` :param blacklist: Set containing", "gene ids) :type expression_df_g2: :class:`pandas.DataFrame` :param gene_names_map: Dictionary with keys:", "standard_colors = [\"#66c2a5\", \"#fc8d62\", \"#8da0cb\", \"#ec87c2\", \"#a6d854\", \"#ffd92f\", \"#e5c494\", \"#bbbbbb\"]", "strand\"] met_reverse = True # Plot Gene Names if(plot_gene_ids): for", "True, make less ticks, else if False make more ticks.", "counter += 1 binned_meth_calls[current_bin][0] += n_meth binned_meth_calls[current_bin][1] += n_unmeth binned_average_meth", "ticks += [current_tick] current_tick = current_tick + tick_size scale =", "equalized segments n_segments = len(genomic_segments) equalized_region_size = (end-start) if(n_segments >", "int :param end: End position of the genomic region. :type", "= int(region_bed[0][1]) region_border_down = int(region_bed[0][2]) region_size = region_border_down-region_border_up color_forward =", "(end-start) if(n_segments > 0): equalized_region_size=(end-start)/n_segments equalized_region_mid_points = [] for i", "color=color, edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect) else: rect = Rectangle((current_start, tcn-.1),", "= norm, cmap = cmap) for cbin in range(len(binned_average_meth)): rect", "is None): vmin = 0 if(vmax is None): vmax =", "head_width=0.2, head_length=1000, overhang=0, color_plus=\"#80b1d3\", color_minus=\"#fb8072\", ax=None): '''Function that plots TF", "= i y_level_dict[i] = [[gene_start, gene_end]] break elif(gene_start > y_level_dict[i][-1][1]", "linewidth=0) ax.add_patch(patch) if(strand == \"+\" and not(met_forward)): patch_list += [patch]", "plot_gene_names: bool. :return: Axis on which plot was placed. :rtype:", "max_dev color = colors((ploidy_dev+max_dev)/(2*max_dev)) if(abs(ploidy_dev) < cnv_threshold): color=colors(.5) rect =", "int :param head_width: Width of the arrow head as proportion", "\"+\"] ] input_file.close() return pybedtools.BedTool(\"\\n\".join([\"\\t\".join(e) for e in cnv_bed_list]), from_string=True)", "start: int :param end: End position on chromosome. :type end:", "object containing genes to be plotted. :type genes_bed: :class:`pybedtools.BedTool` :param", "expression_values = exp_values if(log_transformed): expression_values = np.log2([i if i >=", "position of region to be plotted. :type end: int :param", "List containing the region to be plotted ([<chrom>, <start>, <end>]).", "== \"Series\"): exp_values_g2 = list(exp_values_g2) else: exp_values_g2 = list(exp_values_g2.iloc[0, :])", "= {} for segment in segments_list: segment_start = int(segment[1]) segment_end", "end]) plt.ylim([0, 1]) return equalized_region_mid_points def plotCoordinates(chrom, start, end, color=\"k\",", "= color_loss if(abs(ploidy_dev) > cnv_threshold): rect = Rectangle((current_start, tcn-.2), current_end-current_start,", "bool, optional :param loc_coordinates: Either of \"up\" | \"down\". If", "linewidth=0) ax.add_patch(rect) patch_list = [] patch_description_list = [] met_forward =", ":type start: int :param end: End position of region to", "Position, 4. Deviation from ploidy, 5. True Copy Number) :type", "= ((end)/segment_size)+1 sliced_contact_map = contact_map.iloc[contact_map_index1:contact_map_index2, contact_map_index1:contact_map_index2] if(vmin is None): vmin", "= ax if ax is not None else plt.gca() n_entries", "position 3. end position 4. Number methylated cytosines 5. Number", "False otherwise, default is True :type plot_gene_ids: bool, optional :param", "the gene plot. If not set, then y_max is the", "patch_description_list, fontsize=5, loc='lower left') return ax def plotGenomicSegments(segments_list, chrom, start,", "genes_bed: :class:`pybedtools.BedTool` :param exons_bed: :class:`pybedtools.BedTool` object containing exons of genes.", "left_border = gene_regions[i][0] right_border = None if(i < len(gene_names)-1): right_border", "decreasing order. Else, coordinates stay in increasing order, defaults to", "methylation_bed: :class:`pybedtools.BedTool` :param chrom: Chromosome of region to be plotted.", "Size of region to be plotted in base pairs. :type", "Chromosomal end positiont of the region to be plotted. :type", "Unmethylated Cs. :type methylation_bed: :class:`pybedtools.BedTool` :param chrom: Chromosome of region", "for m in meth_calls], color=color, marker=\".\", linestyle='None', markersize=1, alpha=.5) elif(n_entries", "[bplot_g1[\"boxes\"][0], bplot_g2[\"boxes\"][0]] patch_description_list = [g1_id, g2_id] counter += 1 ax.set_xlim(region_left_border,", "is plotted, False otherwise. Default is False. :type plot_legend: bool,", "defaults to 1000. :type bin_size: int, optional :param ax: Axis", "= int(interval[2]) ploidy_dev = float(interval[3]) tcn = float(interval[4]) if(tcn <", "fashion. :param chrom: Chromosome of the region to be plotted.", "segment_size: int :param cmap: Name of the colormap to be", "((2*extension)/(float(n_groups)*3))) for i in list(np.random.rand(len(expression_values))) ] plt.plot(x_positions, expression_values, \"k.\", markersize=3)", "genes. :type genes_bed: :class:`pybedtools.BedTool` :param exons_bed: :class:`pybedtools.BedTool` object containing exons", "optional :param cmap: Colormap used for plotting CNVs, defaults to", "bed-like structured lists with the following elements: 1. Chromosome region1", ":param blacklist: List of gene names, for genes that should", "2. Start postion 3. End position 4. Value to be", "y_max is the max number of stacked genes, default is", "((end)/segment_size)+1 sliced_contact_map = contact_map.iloc[contact_map_index1:contact_map_index2, contact_map_index1:contact_map_index2] if(vmin is None): vmin =", "vmin=None, vmax=None, location=\"top\", ax=None): '''Function that plots HiC contact maps", "cmap: str, optional :param max_dev: Maximal deviation from ploidy to", "(midpoint[0], midpoint[1]+segment_size/2.), (midpoint[0]-segment_size/2., midpoint[1]) ] codes = [Path.MOVETO, Path.LINETO, Path.LINETO,", "to be plotted. :type chrom_r: str :param start_r: Chromosomal start", "intensity_value = contact_map.iloc[i, j] intensity_value = (intensity_value/vmax if intensity_value <=", "otherwise, default is True :type plot_gene_ids: bool, optional :param y_max:", "None else plt.gca() for interval in cnvs_bed: current_start = int(interval[1])", "\"top\"): ax.set_ylim(0, (end-start)/2.) else: ax.set_ylim(-1.*(end-start)/2., 0) def distanceEqualizer(genomic_segments, start, end,", "plot_legend: If True legend is plotted, False otherwise, defaults to", "None else plt.gca() region_bed = pybedtools.BedTool(\"\\t\".join([str(i) for i in region]),", "\"upper right\", default is \"lower right\". :type legend_loc: str, optional", "Color used for plotting g1 samples expression, defaults to \"#fb8072\".", "else: current_color = color rect = Rectangle([int(region[1]), -.75], int(region[2])-int(region[1]), 1.5,", "barplot extension=None for i in range(len(gene_regions)): if(not blacklist is None", ":param color_minus: Color of plus stranded TF regions, defaults to", "head_starts_at_zero=False, edgecolor=\"none\", facecolor=color, length_includes_head=True) plt.xlim([start, end]) plt.ylim([0.4, 0.6]) def plotHiCContactMap(contact_map,", "start: Start position on chromosome. :type start: int :param end:", "patch_list is the list of patches drawn on the ax.", "digits_to_round))+scale for i in ticks ] if(loc_coordinates == \"up\"): plt.plot([start,", "if(position_gene_names == \"top\"): ax.xaxis.set_ticks_position(\"top\") ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions))) ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean))) if(not plot_gene_names): ax.xaxis.set_major_formatter(ticker.FixedFormatter( ([", "color = color_forward if(strand == \"-\"): color = color_reverse y", "= gene_regions[i+1][0] else: right_border = region_right_border current_extension = right_border-left_border if(current_extension", "plt.gca() region_bed = pybedtools.BedTool(\"\\t\".join([str(i) for i in region]), from_string=True) #", "ax is not None else plt.gca() for motif in motifs_bed:", ".8), (equalized_region_mid_point, .2), (equalized_region_mid_point, 0)] else: codes = [Path.MOVETO, Path.LINETO,", "the number of stacked genes. 2. y_pos_dict: Dictionary with keys", "of the genomic region. :type start: int :param end: End", "str, optional :param direction: Direction of distance equalization (top_down |", "tumor, defaults to 2. :type ploidy: int, optional :param cnv_threshold:", "ax.spines[\"bottom\"].set_visible(False) ax.spines[\"top\"].set_visible(False) ax.spines[\"left\"].set_visible(False) ax.spines[\"right\"].set_visible(False) def plotLinksAsArcs(links_bed, chrom_r, start_r, end_r, lw=1,", "n_meth binned_meth_calls[current_bin][1] += n_unmeth binned_average_meth = [ float(i[0])/(float(i[0])+float(i[1])) if (float(i[0])+float(i[1]))", "region2 5. Start region2 6. End region2 :type links_bed: iterator", "in exp_values_g1])], positions=[bplot_g1_pos], widths=extension/2., patch_artist=True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops,", "TX start, and TX end of genes. :type genes_bed: :class:`pybedtools.BedTool`", "sample ids; index: gene ids). :type expression_df: class:`pandas.DataFrame` :param blacklist:", "# Determine first tick position first_tick = start+(tick_size-start%tick_size) ticks =", "2], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [3, 3], color=color_threshold, linestyle=\"--\",", "arrows, indicating their directionality. :param motifs_bed: :class:`pybedtools.BedTool` object containing regions", ":param alpha: Alpha value for the background color of the", "gene_map[ensembl_gene_id] = hugo_gene_symbol gene_name_mapping_file.close() return gene_map def plotGeneExpression(genes_bed, region_bed, expression_df_g1,", "the form [<chrom>, <start>, <end>]) :type genomic_segments: list :param start:", ":class:`matplotlib.axes._subplots.AxesSubplot` ''' standard_colors = [\"#66c2a5\", \"#fc8d62\", \"#8da0cb\", \"#ec87c2\", \"#a6d854\", \"#ffd92f\",", "head_length if(strand == \"-\"): arrow_start = motif_end arrow_end = motif_start", "None. :type offset: int, optional :param merge: Number of elements", "plt.xlim([start, end]) plt.ylim([0.4, 0.6]) def plotHiCContactMap(contact_map, start, end, segment_size, cmap=\"Greys\",", "values as dot plots. :param meth_calls: Iterator containing list-like elements", "gene structures, i.e. introns exons of genes. :param genes_bed: :class:`pybedtools.BedTool`", "optional. :param ax: Axis on which to plot, defaults to", "to False. :type plot_points: bool, optional :param alpha: Alpha value", ":type color_minus: str, optional :param ax: Axis on which to", "motif_end = int(motif[2]) strand = str(motif[3]) arrow_start = motif_start arrow_end", "ids; index: gene ids). :type expression_df: class:`pandas.DataFrame` :param blacklist: Set", "linewidth=1) plt.text(ticks[i], .4, tick_labels[i], horizontalalignment=\"center\", verticalalignment=\"bottom\", fontsize=5, color=color, rotation=rotation) else:", "0), segment_end-segment_start, 1, color=color) ax.add_patch(rect) patches_dict[segment_type] = rect plt.xlim(int(start), int(end))", "= gene_regions[counter+1][0] bplot_g1_pos = left_border + extension/4. bplot_g2_pos = left_border", "Color of region edge. If False, no edge is plotted,", "(each segment is of the form [<chrom>, <start>, <end>]) :type", "region, for which the gene plot is created. :type region_bed:", "in genes_in_region_bed: gene_name_ens = str(e[3]) if(not gene_names_map is None): gene_names", "codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3] path = Path(vertices, codes) patch", "ax if ax is not None else plt.gca() for interval", "the region to be plotted. :type chrom_r: str :param start_r:", ":type segments_Tabix_filename: str :param chrom: Chromosome of the region to", "genomic regions as simple rectangles. :param regions: Iterator containig list-like", ":rtype: None ''' # Use given axis for plotting ax", "float(interval[3]) tcn = float(interval[4]) if(tcn < -1.*max_dev): tcn = -1.*max_dev", "be plotted. :type end: int :param head_width: Width of the", "range(n_groups): bplot_pos = left_border + (2*g+1)*extension/float((n_groups*2.)) tick_positions += [left_border +", "ax = None): '''Function for plotting genomix segments in different", "defaults to \"k\". :type color: str, optional. :param ax: Axis", "plotted. :type start: int :param end: End position of region", "= cmap) for cbin in range(len(binned_average_meth)): rect = Rectangle((start+cbin*bin_size, 0),", "intensities, defaults to \"Greys\". :type cmap: str, optional :param vmin:", "\"23\"): chrom=\"X\" elif(chrom == \"24\"): chrom = \"Y\" cnv_bed_list +=", "digits_to_round = int(6-np.log10(tick_size)) divisor = 1000000 else: digits_to_round = int(5-np.log10(tick_size))", "6], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.xlim([int(start), int(end)]) if(ploidy == 2): plt.ylim([0,", "list :param region: List containing the region to be plotted", ":param color: Color of lines equalizing distances, defaults to \"k\".", "patch :rtype: dict ''' ax = ax if ax is", "bin_size=1000, ax = None): '''Function for plotting methylation values as", "= [ start + end-i for i in ticks ]", "= False # Plot Introns for i in introns_in_region: start", "Path(vertices, codes) path_patch = PathPatch(path, facecolor=\"none\", edgecolor=color, linewidth=.5) ax.add_patch(path_patch) ax.axis(\"off\")", "patch_list = [bplot_g1[\"boxes\"][0], bplot_g2[\"boxes\"][0]] patch_description_list = [g1_id, g2_id] counter +=", "right_border = region_right_border if(not blacklist is None and gene_name in", "= [] current_tick = first_tick while(current_tick <= end): ticks +=", "to plot contact map, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`,", "region_border_down = int(region_bed[0][2]) region_size = region_border_down-region_border_up color_forward = color_plus color_reverse", ":return: Dictionary containing the gene id mapping. :rtype: dictionary '''", "not None else plt.gca() tick_size = 10**math.ceil((np.log10((end-start)/10))) if(not upper): tick_size", "descriptions for the patches \\ drawn on the ax. :rtype:", "horizontalalignment=\"center\", verticalalignment=\"bottom\", fontsize=5, color=color, rotation=rotation) else: plt.plot([ticks[i], ticks[i]], [.3, .0],", "if(ploidy == 2): plt.ylim([0, 4.5]) plt.yticks([0, 1, 2, 3, 4],", "start position of region to be plotted. :type start: int", "genes, default is \"#80b1d3\". :type color_plus: str, optional. :param color_minus:", ":return: Dictionary with keys = names of segments, and values", "color: str, optional. :param ax: Axis where the plot is", "arrow is swept back (0 overhang means triangular shape). Can", "plt.plot([int(start), int(end)], [4, 4], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [5,", "= determineYPosGene(genes_in_region, (region_border_down- region_border_up), distance_ratio) if(not y_max is None): max_y_pos", "plotting, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Dictionary", "alpha=alpha) c += 1 ax.add_patch(rect) plt.xticks([], []) plt.yticks([], []) plt.xlim([start,", "\"linewidth\": .3} patch_list = [] patch_description_list = [] tick_positions =", "'''Function for plotting methylation values as heatmap :param methylation_bed: Methylation", ":type head_width: float, optional :param head_length: Length of the arrow", "smaller than cnv_threshold if(abs(ploidy_dev) < cnv_threshold): tcn = ploidy color", "legend_loc: str, optional :param color_plus: Color code for plus stranded", ":type motifs_bed: :class:`pybedtools.BedTool` :param start: Start position of the region", "range to be plotted, defaults to None :type vmin: float,", "if(start < region_border_up): start = region_border_up border_distance_down = region_border_down-start if(not(float(border_distance_down)/float(region_size)", "Axis on which plot was placed. :rtype: :class:`matplotlib.axes._subplots.AxesSubplot` ''' ax", "the genomic region. :type end: int :param color: Color of", "[] lefts = [] for i in range(len(left)): if(i %", "in exons_in_region: start = int(i[1]) end = int(i[2]) gene_name =", "blacklist: set, optional :param ax: Axis used for plotting, defaults", "the region to be plotted. :type chrom: str :param start:", "region_right_border if(not blacklist is None and gene_name in blacklist): counter", "for plotting, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param", ":class:`pybedtools.BedTool` :param blacklist: List of gene names, for genes that", "file containing (chrom, start, end, name, score, strand, start, end,", "# Calculate midpoints of original and distance equalized segments n_segments", "segment is of the form [<chrom>, <start>, <end>]) :type genomic_segments:", "the following entries: 1. Chromosome 2. Start position 3. End", "y-.03), end-start, .06, color=color, capstyle='butt', linewidth=0) ax.add_patch(patch) if(strand == \"+\"", "([<chrom>, <start>, <end>]). :type region: list :param groups: List of", "+= [g_id] counter += 1 ax.set_xlim(region_left_border, region_right_border) if(position_gene_names == \"top\"):", "== \"#\" or line[:5] == \"chrom\"): continue split_line = line.rstrip().split(\"\\t\")", "2. Start region1 3. End region1 4. Chromosome region2 5.", "y-.2), end-start, .4, color=color, capstyle='butt', linewidth=0) ax.add_patch(rect) patch_list = []", "(start)/segment_size contact_map_index2 = ((end)/segment_size)+1 sliced_contact_map = contact_map.iloc[contact_map_index1:contact_map_index2, contact_map_index1:contact_map_index2] if(vmin is", "distances of genomic segments to equal distances. :param genomic_segments: List", "plotting g2 samples expression, defaults to \"#80b1d3\". :type color_g2: str,", "True legend is plotted, False otherwise, defaults to False. :type", "exons_in_region: start = int(i[1]) end = int(i[2]) gene_name = str(i[3])", "= gene_mid_points[counter]+extension/2 if(not blacklist is None and gene_name in blacklist):", "normal) on a gene region scale equalizing the position of", "be plotted. :type end: int :param bin_size: size of bin", "end, direction=\"top_down\", color=\"k\", ax = None): '''Function that plots arcs", "+ extension/2.] gene_names_clean += [gene_name] exp_values = expression_df.loc[gene_name, groups[g]] if(type(exp_values).__name__", "region that is plotted), defaults to 1000. :type head_length: int,", "= color_reverse border_distance_down = region_border_down-start if(start < region_border_up): start =", "genes. :type gene_mid_points: list :param region: List containing the region", "4): plt.plot([int(start), int(end)], [1, 1], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)],", "to \"k\". :type color: str, optional :param direction: Direction of", "Chromsome 2. Start position 3. end position 4. Beta Value", ":type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param plot_legend: If True plot legend,", "End position of region to be plotted. :type r_end: int", "be shown on the plot, default is None :type blacklist:", "minus stranded genes, default is \"#fb8072\". :type color_minus: str, optional.", "is None): color = colors[g] else: color = standard_colors[g] bplot[\"boxes\"][0].set_facecolor(color)", "the GENE SYMBOLs are hidden. :type plot_gene_names: bool. :return: Axis", "unequal distances of genomic segments to equal distances. :param genomic_segments:", "plt.gca() max_dist = 0 for e in links_bed: link_pos1 =", "default is \"#80b1d3\". :type color_plus: str, optional. :param color_minus: Color", "is not None else plt.gca() patches_dict = {} for segment", "elif(ploidy == 4): plt.plot([int(start), int(end)], [1, 1], color=color_threshold, linestyle=\"--\", linewidth=.5)", "int(region_bed[0][1]) # Determine minimal extension of barplot extension=None if(len(gene_mid_points) <=", "position_gene_names: Either of \"top\", or \"bottom\", defaults to \"bottom\". :type", "1 continue n_groups = len(groups) for g in range(n_groups): bplot_pos", "patch = Rectangle((start, y-.03), end-start, .06, color=color, capstyle='butt', linewidth=0) ax.add_patch(patch)", "not i == len(binned_average_meth)-1 else \"NA\") average_list = [ j", "in base pairs. :type region_size: int :param distance_ratio: Minimal distance", "defaults to \"#cbebc4\". :type color: str, optional :param edge_color: Color", "size=5, color = color) gene_name = str(i[3]) gene_name_label = gene_name", "mid_point = link_pos2 + (link_pos1-link_pos2)/2 vertices = [(link_pos1, 0), (mid_point,", "\"k\", \"linewidth\": .3, \"alpha\":alpha} flierprops = {\"color\": \"k\"} medianprops =", "[\"reverse strand\"] met_reverse = True # Plot Gene Names if(plot_gene_ids):", "\"k.\", markersize=3) g_id = None if(not ids is None): g_id", "of bin to average methylation values, defaults to 1000. :type", ":param g2_id: ID of g2 used for legend plotting, defaults", "Path(vertices, codes) patch = PathPatch(path, facecolor = \"None\", edgecolor =", "plt.xticks(rotation=45) def plotCNVsHeat(cnvs_bed, chromosome, start, end, ploidy=2, cnv_threshold=0.7, cmap=\"bwr\", max_dev=None,", "link_pos2 + (link_pos1-link_pos2)/2 vertices = [(link_pos1, 0), (mid_point, distance), (link_pos2,", ":param merge: Number of elements to be merged. If this", "motif in motifs_bed: motif_start = int(motif[1]) motif_end = int(motif[2]) strand", "linewidth=1) plt.text(ticks[i], -.1, tick_labels[i], horizontalalignment=\"center\", fontsize=5, color=color, verticalalignment=\"top\", rotation=rotation) plt.xlim([start,", "<end>]) :type genomic_segments: list :param start: Start position of the", "containing TXstart, and TXend of genes. :type genes_bed: :class:`pybedtools.BedTool` :param", "str, optional :param revert_coordinates: If True, coordinates are reverted to", "= expression_df.loc[gene_name, groups[g]] if(type(exp_values).__name__ == \"Series\"): exp_values = list(exp_values) else:", "optional :param ids: IDs used for legend plotting, defaults to", "= pybedtools.BedTool(\"\\t\".join([str(i) for i in region]), from_string=True) # Get gene", "str, optional :param color_loss: Plot color of copy number losses,", "plt.plot([ticks[i], ticks[i]], [0., .3], linestyle=\"-\", color=color, linewidth=1) plt.text(ticks[i], .4, tick_labels[i],", "(link_pos2, 0)] codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3] path = Path(vertices,", "background color of the boxplots boxes, defaults to 0.5. :type", "cnv_bed_list]), from_string=True) def plotChIPSignals(chip_signals, r_chrom, r_start, r_end, ax=None, color=\"b\", offset=None,", "If True, all gene ids will be included in the", "or line[:5] == \"chrom\"): continue split_line = line.rstrip().split(\"\\t\") ploidy_dev =", "and not(met_forward)): patch_list += [patch] patch_description_list += [\"forward strand\"] met_forward", "len(average_list) > 0 else 0. ] binned_average_meth = binned_average_meth_no_missing #", "current_color = color rect = Rectangle([int(region[1]), -.75], int(region[2])-int(region[1]), 1.5, facecolor=current_color,", "be plotted. :type r_start: int :param r_end: End position of", "samples (columns: sample ids; index: gene ids). :type expression_df: class:`pandas.DataFrame`", "values for plotting, non-transformed values otherwise. :type log_transformed: bool, optional", "end: int :param bin_size: size of bin to average methylation", "ax=None): '''Function that plots TF motifs as arrows, indicating their", "to upper direction, else if \"down\", plot ticks to lower", "cnv_threshold): tcn = ploidy color = color_neutral if(ploidy_dev >= cnv_threshold):", "plotted. :type start: int :param end: Chromosomal end position of", ":type blacklist: list, optional :param plot_gene_ids: If True, all gene", "colormap to be used for plotting HiC intensities, defaults to", "= {\"color\": \"k\"} medianprops = {\"color\": \"k\", \"linewidth\": .3} whiskerprops", "if(scale == \"Mb\"): digits_to_round = int(6-np.log10(tick_size)) divisor = 1000000 else:", "capprops={\"color\": \"k\", \"linewidth\": .3} patch_list = None patch_description_list = None", "of the segments for which contacts were called. :type segment_size:", "2. :type ploidy: int, optional :param cnv_threshold: Minimal deviation from", "+= [start+(int(e[2])-start)/2] elif(int(e[2]) > end): region_mid_points += [int(e[1])+(end-int(e[1]))/2] else: region_mid_points", ":param ax: Axis on which to plot, defaults to None.", "len(meth_calls[0]) if(n_entries == 5): plt.plot([ (float(m[1])+float(m[2]))/2. for m in meth_calls", "edgecolor = color, lw = lw) ax.add_patch(patch) #ax.spines[\"bottom\"].set_visible(False) ax.spines[\"top\"].set_visible(False) ax.spines[\"left\"].set_visible(False)", "str(e[3]) gene_names += [gene_names_map[gene_name_ens]] gene_regions += [[int(e[1]), int(e[2])]] region_right_border =", "Path from matplotlib.patches import PathPatch import matplotlib.cm as cm import", "If True, the HUGO GENE SYMBOLs will be shown, else", "offset, color = color, edgecolor = color) plt.xlim(r_start, r_end) def", "of colors must be the same as the number of", "\"lower right\". :type legend_loc: str, optional :param color_plus: Color code", "of genes. :param genes_bed: :class:`pybedtools.BedTool` object containing TX start, and", "i in genes_in_region: start = int(i[1]) gene_name = str(i[3]) if(not", "location=\"top\", ax=None): '''Function that plots HiC contact maps as pyramid", "\"+\" and not(met_forward)): patch_list += [patch] patch_description_list += [\"forward strand\"]", "plotting CNV segments as heatmap :param cnvs_bed: :class:`pybedtools.BedTool` object containing", "genes. :type exons_bed: :class:`pybedtools.BedTool` :param introns_bed: :class:`pybedtools.BedTool` object containing introns", "color_plus=\"#80b1d3\", color_minus=\"#fb8072\", ax=None): '''Function that plots TF motifs as arrows,", "matplotlib.colors.Normalize(vmin=0., vmax=1.) m = matplotlib.cm.ScalarMappable(norm = norm, cmap = cmap)", "of genes. :type genes_bed: :class:`pybedtools.BedTool` :param exons_bed: :class:`pybedtools.BedTool` object containing", "containing genes to be plotted. :type genes_bed: :class:`pybedtools.BedTool` :param region_size:", "TX_pos TX_end = end_r if(direction == \"left\"): TX_start = start_r", "== 0.): continue if(extension is None): extension = float(current_extension) elif(current_extension", "0)] else: codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO] vertices =", "(binned_average_meth[i-1] if not i == 0 else \"NA\") meth_after =", ":class:`pybedtools.BedTool` :param gene_mid_points: list of integer values containing center positions", "rectangles representing the regions to be plotted, defaults to \"#cbebc4\".", "defaults to False. :type edge_color: str, optional :param alpha: Alpha", "for ytick in ax.get_yticklabels(): ytick.set_size(5) if(plot_legend): ax.legend(patch_list, patch_description_list, fontsize=5, loc='lower", "start_r: int :param end_r: Chromosomal end positiont of the region", "import matplotlib.cm as cm import matplotlib import tabix import math", "not None else plt.gca() colors = plt.cm.get_cmap(cmap) if(max_dev is None):", "elif(int(e[2]) > end): region_mid_points += [int(e[1])+(end-int(e[1]))/2] else: region_mid_points += [int(e[1])+(int(e[2])-int(e[1]))/2]", "legend describing plus or minus stranded genes is plotted, False", "patch_description_list): patch_list += [bplot[\"boxes\"][0]] patch_description_list += [g_id] counter += 1", "capprops=capprops, showfliers=False) bplot_g1[\"boxes\"][0].set_facecolor(color_g1) bplot_g2[\"boxes\"][0].set_facecolor(color_g2) if(not patch_saved): patch_saved=True patch_list = [bplot_g1[\"boxes\"][0],", "to be plotted. :type motifs_bed: :class:`pybedtools.BedTool` :param start: Start position", ":class:`pandas.Dataframe` containing the expression values of g2 samples (columns: sample", "max_dev = max([abs(float(i[3])) for i in cnvs_bed]) for interval in", "patch_description_list def determineYPosGene(genes_bed, region_size, distance_ratio): '''Function that determines the max", "If this value is not equal to 0, than merge", "in input_file: if(line[:7] == \"#ploidy\"): ploidy = float(line.rstrip().split(\":\")[1]) print(ploidy) if(line[0]", "hugo_gene_symbol = split_line[1].split(\".\")[0] gene_map[ensembl_gene_id] = hugo_gene_symbol gene_name_mapping_file.close() return gene_map def", "meth_after = (binned_average_meth[i+1] if not i == len(binned_average_meth)-1 else \"NA\")", "in a linea fashion. :param chrom: Chromosome of the region", "their directionality. :param motifs_bed: :class:`pybedtools.BedTool` object containing regions of the", "of barplot extension=None if(len(gene_mid_points) <= 1): extension=region[2]-region[1] else: extension=gene_mid_points[1]-gene_mid_points[0] #", "to be returned :rtype: None ''' # Use given axis", "1], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [2, 2], color=color_threshold, linestyle=\"--\",", "offset: Length of intervals, defaults to None. :type offset: int,", "Start position of the region to be plotted. :type start_r:", "ids :param gene_name_mapping_file: Path to a tab separated file, for", "color_plus: str, optional :param color_minus: Color of plus stranded TF", "for which to plot CNVs. :type chromosome: str :param start:", "marker=\".\", linestyle='None', markersize=1, alpha=.5) plt.ylim([0, 1]) plt.xticks([], []) plt.xlim([start, end])", "color_gain: str, optional :param color_loss: Plot color of copy number", ":rtype: tuple ''' sort_indices = [int(idx) for idx in np.argsort([i[1]", "for cbin in range(len(binned_average_meth)): rect = Rectangle((start+cbin*bin_size, 0), bin_size, 1,", "distance between two genes, as ratio of ax width, such", "is True :type plot_gene_ids: bool, optional :param y_max: Max y", "\"bottom\", defaults to \"bottom\". :type position_gene_names: str, optional :param log_transformed:", "Plots axis. :rtype: :class:`matplotlib.axes._subplots.AxesSubplot` ''' standard_colors = [\"#66c2a5\", \"#fc8d62\", \"#8da0cb\",", "for interval in genes_sorted_bed: gene_name = interval[3] gene_start = int(interval[1])", "max number of stacked genes, default is None. :type y_max:", "the list of descriptions for the patches \\ drawn on", "'''Function for plotting CNV segments as heatmap :param cnvs_bed: :class:`pybedtools.BedTool`", "If not set, then y_max is the max number of", "= None if(not ids is None): g_id = ids[g] else:", "plt.ylim([.5, 1.5]) plt.xticks([], []) plt.yticks([], []) def readACESeqAsBed(input_filename): '''Function that", "region_border_down-region_border_up color_forward = color_plus color_reverse = color_minus max_y_pos = None", ":type color_plus: str, optional :param color_minus: Color of plus stranded", "the region to be plotted. :type start_r: int :param end_r:", "edgecolor='none', alpha=alpha) c += 1 else: current_color = color rect", "genes, default is \"#fb8072\". :type color_minus: str, optional. :return: Tuple", ":type start: int :param end: End position on chromosome. :type", "> 0): equalized_region_size=(end-start)/n_segments equalized_region_mid_points = [] for i in range(1,", "ploidy to plot, defaults to None. :type max_dev: float, optional", "False. :type edge_color: str, optional :param alpha: Alpha value of", "== \"up\"): plt.ylim([-.1, .8]) else: plt.ylim([-1.5, .3]) plt.xticks([], []) ax.spines[\"bottom\"].set_visible(False)", "region to be plotted. :type r_chrom: str :param r_start: Start", "if ax is not None else plt.gca() n_entries = len(meth_calls[0])", "color=color, linewidth=1) plt.text(ticks[i], -.1, tick_labels[i], horizontalalignment=\"center\", fontsize=5, color=color, verticalalignment=\"top\", rotation=rotation)", "i in cnvs_bed]) for interval in cnvs_bed: current_start = int(interval[1])", "is None): max_dev = max([abs(float(i[3])) for i in cnvs_bed]) for", "= Rectangle((segment_start, 0), segment_end-segment_start, 1, color=color) ax.add_patch(rect) patches_dict[segment_type] = rect", "non-transformed values otherwise. :type log_transformed: bool, optional :param plot_points: If", "chrom_r, start_r, end_r, lw=1, color=\"k\", ax = None): '''Function that", "gene_end]] break else: continue return max_y_pos, y_pos_dict def createGeneNameMap(gene_name_mapping_filename): '''Function", "int :param color: Color of the arc, defaults to \"k\".", "plots HiC contact maps as pyramid plots :param contact_map: Matrix", "scale = \"Mb\" else: scale=\"Kb\" digits_to_round = None divisor =", "that plots genomic coordinates in a linea fashion. :param chrom:", "upper: If True, make less ticks, else if False make", "be plotted. :type start: str :param end: End position of", "capstyle='butt', linewidth=0) ax.add_patch(rect) # Plot thresholds color_threshold=(189./255., 189./255., 189./255., 0.5)", "not None else plt.gca() TX_start = TX_pos TX_end = end_r", ":param plot_legend: If True plot legend, False otherwise, defaults to", "None): end = start + offset left += [start] height", "bin_size: int, optional :param ax: Axis to be used for", "< link_pos2): mid_point = link_pos2 + (link_pos1-link_pos2)/2 vertices = [(link_pos1,", "Either of \"top\", or \"bottom\", defaults to \"bottom\". :type position_gene_names:", "Chromosome of the region to be plotted. :type chrom: str", "binned_meth_calls = [ [0, 0] for i in range(int(((end-start)/bin_size)+1)) ]", "be plotted. :type chrom_r: str :param start_r: Start position of", "defaults to None. Number of ids must be the same", ":class:`pybedtools.BedTool` object containing gene regions. :type genes_bed: :class:`pybedtools.BedTool` :param gene_mid_points:", "if(chrom == \"23\"): chrom=\"X\" elif(chrom == \"24\"): chrom = \"Y\"", "False make more ticks. :type upper: bool, optional :param loc_coordinates:", "increasing order, defaults to False. :type revert_coordinates: bool, optional :param", "[]) plt.xlim([start, end]) plt.ylim([-1, 1]) def plotMotifDirections(motifs_bed, start, end, head_width=0.2,", ":type start_r: int :param end_r: End position of the region", "in meth_calls ], [ float(m[4]) for m in m in", "list :param gene_names_map: Dictionary with keys: ENSEMBL GENE IDs, and", "gene_map[gene_name] in blacklist): continue # Define color for gene plotting", "start = int(i[1]) gene_name = str(i[3]) if(not blacklist is None", "matplotlib.patches import Arrow from matplotlib.path import Path from matplotlib.patches import", "gene_name_ens = str(e[3]) gene_names += [gene_names_map[gene_name_ens]] gene_regions += [[int(e[1]), int(e[2])]]", "# Plot thresholds color_threshold=(189./255., 189./255., 189./255., 0.5) if(ploidy == 2):", "contact_map: :class:`pandas.DataFrame` :param start: Chromosomal start position of region to", "lw) ax.add_patch(patch) #ax.spines[\"bottom\"].set_visible(False) ax.spines[\"top\"].set_visible(False) ax.spines[\"left\"].set_visible(False) ax.spines[\"right\"].set_visible(False) plt.xticks([], []) plt.yticks([], [])", "to equal distances. :param genomic_segments: List of segments for which", "i y_level_dict[i] = [[gene_start, gene_end]] break elif(gene_start > y_level_dict[i][-1][1] and", "optional :return: Nothing to be returned. :rtype: None ''' ax", "not(float(m[3])+float(m[4]) == 0.) else 0. for m in meth_calls], color=color,", "to \"normal\". :type g2_id: str, optional :param plot_gene_names: If True,", ":type chrom: str :param start: Start position of the region", "plots arcs from unequal distances of genomic segments to equal", "in gene_names: left_border = gene_regions[counter][0] right_border = region_right_border if(not blacklist", "cnvs_bed: :class:`pybedtools.BedTool` :param chromosome: Chromosome for which to plot CNVs.", "optional :param vmin: Minimal value of intensity range to be", "for genes that should not be shown on the plot,", "max_y_pos, y_pos_dict = determineYPosGene(genes_in_region, (region_border_down- region_border_up), distance_ratio) if(not y_max is", "plt.yticks([], []) if(plot_legend): plt.legend(patch_list, patch_description_list, loc=legend_loc, fontsize=5) return max_y_pos+1.5, patch_list,", ":param color: Color of the arc, defaults to \"k\". :type", "groups: list :param gene_names_map: Dictionary with keys: ENSEMBL GENE IDs,", "be plotted. :type end_r: int :param color: Color of the", "bplot = ax.boxplot(expression_values, positions=[bplot_pos], widths=extension/float(n_groups), patch_artist=True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops,", ":param r_start: Start position of region to be plotted. :type", "showfliers=False) bplot_g1[\"boxes\"][0].set_facecolor(color_g1) bplot_g2[\"boxes\"][0].set_facecolor(color_g2) if(not patch_saved): patch_saved=True patch_list = [bplot_g1[\"boxes\"][0], bplot_g2[\"boxes\"][0]]", "def determineYPosGene(genes_bed, region_size, distance_ratio): '''Function that determines the max y", "not None else plt.gca() contact_map_index1 = (start)/segment_size contact_map_index2 = ((end)/segment_size)+1", "regions of the TF sited to be plotted. :type motifs_bed:", "for i in range(len(ticks)): if(loc_coordinates == \"up\"): plt.plot([ticks[i], ticks[i]], [0.,", "str :param chrom: Chromosome of the region to be plotted.", "[start+(int(e[2])-start)/2] elif(int(e[2]) > end): region_mid_points += [int(e[1])+(end-int(e[1]))/2] else: region_mid_points +=", "gene_names_clean]))) for tick in ax.get_xticklabels(): tick.set_rotation(45) tick.set_size(5) for ytick in", "to None. :type blacklist: set, optional :param ax: Axis used", ":] if(type(exp_values_g1).__name__ == \"Series\"): exp_values_g1 = list(exp_values_g1) else: exp_values_g1 =", "[height[i]] lefts += [left[i]] if(not i % merge == 0):", "import matplotlib import tabix import math def plotGenes(genes_bed, exons_bed, introns_bed,", "str, optional. :param color_minus: Color code for minus stranded genes,", "else: ax.set_ylim(-1.*(end-start)/2., 0) def distanceEqualizer(genomic_segments, start, end, direction=\"top_down\", color=\"k\", ax", "= y position \\ of gene. :rtype: tuple ''' sort_indices", "bool, optional :param y_max: Max y value in the gene", "is None): end = start + offset left += [start]", "bool :param color_g1: Color used for plotting g1 samples expression,", "int(interval[1]) current_end = int(interval[2]) ploidy_dev = float(interval[3]) tcn = float(interval[4])", "color) plt.xlim(r_start, r_end) def plotMethylationProfileHeat(methylation_bed, chrom, start, end, bin_size=1000, ax", ":param TX_pos: Position of the translocation. :type TX_pos: int :param", "the same as the number of groups, defaults to None.", "max_y_position + 1.5. max_y_pos defines the \\ number of stacked", "= ax if ax is not None else plt.gca() for", "fontsize=5, color=color, rotation=rotation) else: plt.plot([ticks[i], ticks[i]], [.3, .0], linestyle=\"-\", color=color,", "plt.xticks([], []) ax.spines[\"bottom\"].set_visible(False) ax.spines[\"top\"].set_visible(False) ax.spines[\"left\"].set_visible(False) ax.spines[\"right\"].set_visible(False) def plotLinksAsArcs(links_bed, chrom_r, start_r,", "end: int :param ploidy: Assumed ploidy of tumor, defaults to", "motifs_bed: motif_start = int(motif[1]) motif_end = int(motif[2]) strand = str(motif[3])", "= color_plus color_reverse = color_minus max_y_pos = None if(not len(genes_in_region)", "flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False) bplot_g1[\"boxes\"][0].set_facecolor(color_g1) bplot_g2[\"boxes\"][0].set_facecolor(color_g2) if(not patch_saved): patch_saved=True", ":type regions: iterator :param start: Start position of the region", "containig list-like elements with the following entries: 1. Chromosome 2.", "end, ax = None): '''Function for plotting genomix segments in", "gene_end]] break elif(i == max_y_pos): max_y_pos += 1 y_pos_dict[gene_name] =", "if(plot_legend): ax.legend(patch_list, patch_description_list, fontsize=5, loc='lower left') return ax def plotGeneExpressionEqualDist(genes_bed,", "alpha=1, ax = None): '''Functions that plots genomic regions as", "facecolor=current_color, edgecolor='none', alpha=alpha) c += 1 else: current_color = color", "codes) intensity_value = contact_map.iloc[i, j] intensity_value = (intensity_value/vmax if intensity_value", "y positions of genes for plotting max_y_pos, y_pos_dict = determineYPosGene(genes_in_region,", "keys = gene ids and values = y position \\", "by side. If this ratio is underwent, the genes will", "gene_names_clean += [gene_name] exp_values_g1 = expression_df_g1.loc[gene_name, :] if(type(exp_values_g1).__name__ == \"Series\"):", "None): '''Function for plotting genomix segments in different colors :param", ":type gene_mid_points: list :param region: List containing the region to", "of the colormap to be used for plotting HiC intensities,", "genes_bed: :class:`pybedtools.BedTool` :param gene_mid_points: list of integer values containing center", "in range(len(gene_regions)): if(not blacklist is None and gene_names[i] in blacklist):", "ensembl_gene_id = split_line[0].split(\".\")[0] hugo_gene_symbol = split_line[1].split(\".\")[0] gene_map[ensembl_gene_id] = hugo_gene_symbol gene_name_mapping_file.close()", ":param alpha: Alpha value of the rectangle, representing the region", "ids). :type expression_df: class:`pandas.DataFrame` :param blacklist: Set containing gene ids", "to \"#80b1d3\". :type color_g2: str, optional :param g1_id: ID of", "the arrow in bp (depends on the region that is", "to determine the color for plotting (R,G,B). :type segments_Tabix_filename: str", "== \"-\" and not(met_reverse)): patch_list += [patch] patch_description_list += [\"reverse", ":param input_filename: Full path to ACESeq \"most_important\" file :type input_filename:", "def plotHiCContactMap(contact_map, start, end, segment_size, cmap=\"Greys\", vmin=None, vmax=None, location=\"top\", ax=None):", "bool, optional :param rotation: Rotational angle of coordinate strings, defaults", "samples (columns: sample ids; index: gene ids) :type expression_df_g2: :class:`pandas.DataFrame`", "plt.gca() c = 0 for region in regions: if(not edgecolor):", "sort_indices = [int(idx) for idx in np.argsort([i[1] for i in", "defaults to True. :type plot_gene_names: bool, optional :param position_gene_names: Either", "coordinates stay in increasing order, defaults to False. :type revert_coordinates:", "segments for which contacts were called. :type segment_size: int :param", "[] height = [] for signal in chip_signals: start =", "End position 4. Value to be plotted as bar :type", ":class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Nothing to be returned :rtype: None '''", "of \"up\" | \"down\". If \"up\", plot ticks to upper", "the arrow is swept back (0 overhang means triangular shape).", "Maximal deviation from ploidy to plot, defaults to None. :type", "= split_line[0] if(chrom == \"23\"): chrom=\"X\" elif(chrom == \"24\"): chrom", "extension = float(current_extension) boxprops = {\"color\": \"k\", \"linewidth\": .3} flierprops", "gene_regions[counter][0] right_border = region_right_border if(not blacklist is None and gene_name", "> cnv_threshold): rect = Rectangle((current_start, tcn-.2), current_end-current_start, .4, color=color, edgecolor='none',", "end]) plt.yticks([], []) if(loc_coordinates == \"up\"): plt.ylim([-.1, .8]) else: plt.ylim([-1.5,", "list(exp_values.iloc[0, :]) expression_values = exp_values if(log_transformed): expression_values = np.log2([i if", "if(not plot_gene_names): ax.xaxis.set_major_formatter(ticker.FixedFormatter( ([ \" \" for i in gene_names_clean])))", "= int(interval[1]) gene_end = int(interval[2]) for i in range(max_y_pos+1): if(i", "'''Function for plotting paired gene expression (e.g. tumor and normal)", "of points representing methylation values, defaults to \"k\". :type color:", "not None else plt.gca() for interval in cnvs_bed: current_start =", "Tuple of max_y_pos+1.5, patch_list, patch_description_list, where 1. max_y_pos+1.5 is the", "= False for gene_name in gene_names: left_border = gene_regions[counter][0] right_border", "List of lists containing the IDs of the different groups.", "\"k\", \"linewidth\": .3} patch_list = [] patch_description_list = [] tick_positions", ":param exons_bed: :class:`pybedtools.BedTool` object containing exons of genes. :type exons_bed:", "plotted, defaults to None :type vmin: float, optional :param vmax:", "alpha=alpha) c += 1 else: current_color = color rect =", "blacklist: Set containing gene ids not to be plotted, defaults", ":param chrom: Chromosome of region to be plotted. :type chrom:", "field is used to determine the color for plotting (R,G,B).", "which the gene plot is created. :type region_bed: :class:`pybedtools.BedTool` :param", ":type end_r: int :param TX_pos: Position of the translocation. :type", "i in range(len(left)): if(i % merge == 0 and not", ".2), (equalized_region_mid_point, 0)] else: codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO]", "for j in [meth_before, meth_after] if not j == \"NA\"", "describing plus or minus stranded genes is plotted, False otherwise.", ":type blacklist: set, optional :param ax: (default: None) Axis used", "gene_regions += [[int(e[1]), int(e[2])]] region_right_border = int(region_bed[0][2]) region_left_border = int(region_bed[0][1])", "\"#fb8072\". :type color_minus: str, optional :param ax: Axis on which", ":type offset: int, optional :param merge: Number of elements to", "distance equalized segments n_segments = len(genomic_segments) equalized_region_size = (end-start) if(n_segments", "Fraction that the arrow is swept back (0 overhang means", "4, 6], [\"0\", \"2\", \"4\", \"6\"], size=6) plt.xticks(rotation=45) def plotCNVsHeat(cnvs_bed,", "None if(first_tick > 1000000): scale = \"Mb\" else: scale=\"Kb\" digits_to_round", "color=color, capstyle='butt', linewidth=0) ax.add_patch(rect) plt.xlim([start_r, end_r]) plt.ylim([0.3, 0.7]) def plotRegions(regions,", "for which contacts were called. :type segment_size: int :param cmap:", "is False. :type plot_legend: bool, optional :param legend_loc: Location of", "\"left\" (upstream), or \"right\" (downstream), defaults to \"left\". :type direction:", ":type introns_bed: :class:`pybedtools.BedTool` :param region_bed: :class:`pybedtools.BedTool` object containing the one", "tick.set_size(5) for ytick in ax.get_yticklabels(): ytick.set_size(5) if(plot_legend): ax.legend(patch_list, patch_description_list, fontsize=5,", "int(end)], [3, 3], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [4, 4],", "an plotted, defaults to 0. :type merge: int, optional :return:", "genes. :param genes_bed: :class:`pybedtools.BedTool` object containing gene regions. :type genes_bed:", "# Determine bin position = int(element[1]) if(position < start or", "color_g1: Color used for plotting g1 samples expression, defaults to", "vmin: Minimal value of intensity range to be plotted, defaults", "else plt.gca() colors = plt.cm.get_cmap(cmap) if(max_dev is None): max_dev =", "end position 4. Beta Value :type meth_calles: iterator :param chrom:", ":class:`pybedtools.BedTool` :param exons_bed: :class:`pybedtools.BedTool` object containing exons of genes. :type", "[]) plt.yticks([], []) def plotMethylationProfile(meth_calls, chrom, start, end, color=\"k\", ax=None):", "3. End region1 4. Chromosome region2 5. Start region2 6.", "for i in range(len(region_mid_points)): region_mid_point = region_mid_points[i] equalized_region_mid_point = equalized_region_mid_points[i]", ":type loc_coordinates: str, optional :param revert_coordinates: If True, coordinates are", "bin_size: size of bin to average methylation values, defaults to", "else \"NA\") meth_after = (binned_average_meth[i+1] if not i == len(binned_average_meth)-1", "where the plot is drawn, defaults to None. :type ax:", "be plotted. :type end: int :param color: Color of points", ":class:`matplotlib.axes._subplots.AxesSubplot`, optional :param upper: If True, make less ticks, else", ":type end: int :param color: Color of the rectangles representing", "log_transformed=True, plot_points=False, alpha=.5): '''Function for plotting grouped gene expression (e.g.", "color: Color of the bar representing the translocation, defaults to", "0 if(vmax is None): vmax = np.percentile(contact_map, 99.9) colormap =", "scales elements, defaults to \"k\". :type color: str, optional :param", "object containing introns :type introns_bed: :class:`pybedtools.BedTool` :param region_bed: :class:`pybedtools.BedTool` object", "in meth_calls ], [ float(m[3])/(float(m[3])+float(m[4])) if not(float(m[3])+float(m[4]) == 0.) else", "color of copy number gains, defaults to \"g\". :type color_gain:", "patches drawn on the ax. 3. patch_description_list is the list", "== \"#ploidy\"): ploidy = float(line.rstrip().split(\":\")[1]) print(ploidy) if(line[0] == \"#\" or", "None, :type blacklist: set, optional :param ax: (default: None) Axis", "used to determine the color for plotting (R,G,B). :type segments_Tabix_filename:", "TF sited to be plotted. :type motifs_bed: :class:`pybedtools.BedTool` :param start:", "the genomic region. :type start: int :param end: End position", "for i in binned_meth_calls ] binned_average_meth_no_missing = [] n =", "for e in genes_in_region_bed: gene_name_ens = str(e[3]) if(not gene_names_map is", "vmax = np.percentile(contact_map, 99.9) colormap = plt.get_cmap(cmap) for i in", "region_mid_points[i] equalized_region_mid_point = equalized_region_mid_points[i] codes = [] vertices = []", "gene_name = str(i[3]) gene_name_label = gene_name if(not gene_map is None):", "= 10**int((np.log10((end-start)/10))) # Determine first tick position first_tick = start+(tick_size-start%tick_size)", "one. Defaults to 0. :type overhang: float, optional :param color_plus:", "Color of the arc, defaults to \"k\". :type color: str,", "= distance mid_point = link_pos1 + (link_pos2-link_pos1)/2 if(link_pos2 < link_pos2):", "(columns: sample ids; index: gene ids) :type expression_df_g1: :class:`pandas.DataFrame` :param", "a translocation event as a bar, showing the part of", "[ j for j in [meth_before, meth_after] if not j", "int :param cmap: Name of the colormap to be used", "methylation values, defaults to \"k\". :type color: str, optional :param", "= max([abs(float(i[3])) for i in cnvs_bed]) for interval in cnvs_bed:", "ax.axis(\"off\") plt.xlim([start, end]) plt.ylim([0, 1]) return equalized_region_mid_points def plotCoordinates(chrom, start,", "float, optional :param color_plus: Color of plus stranded TF regions,", "= None if(not len(genes_in_region) == 0): # Determine y positions", ":type color: str, optional :param edge_color: Color of region edge.", "region_border_down-start if(not(float(border_distance_down)/float(region_size) < distance_ratio)): gene_name = str(i[3]) gene_name_label = gene_name", "for i in gene_names_clean]))) for tick in ax.get_xticklabels(): tick.set_rotation(45) tick.set_size(6)", "midpoint[1]), (midpoint[0], midpoint[1]+segment_size/2.), (midpoint[0]-segment_size/2., midpoint[1]) ] codes = [Path.MOVETO, Path.LINETO,", "Chromosome of region to be plotted. :type chrom: str :param", "plt.text(ticks[i], -.1, tick_labels[i], horizontalalignment=\"center\", fontsize=5, color=color, verticalalignment=\"top\", rotation=rotation) plt.xlim([start, end])", "region_size: int :param distance_ratio: Minimal distance between two genes, as", ":type plot_legend: bool, optional :param legend_loc: Location of the legend.", "'''Function that plots genomic coordinates in a linea fashion. :param", "region, expression_df, groups, gene_names_map=None, blacklist=None, ax=None, plot_legend=False, colors=None, ids=None, plot_gene_names=True,", "patch_list += [patch] patch_description_list += [\"reverse strand\"] met_reverse = True", "ax = None): '''Function for plotting methylation values as heatmap", "on the ax. 3. patch_description_list is the list of descriptions", "of gene. :rtype: tuple ''' sort_indices = [int(idx) for idx", "= motif_start arrow_end = motif_end color=color_plus dx = head_length if(strand", "Start position of the region to be plotted. :type start:", "representing the translocation, defaults to \"k\". :type color: str, optional", "ax.get_yticklabels(): ytick.set_size(5) if(plot_legend): ax.legend(patch_list, patch_description_list, fontsize=5, loc='lower left') return ax", "range(contact_map_index1, contact_map_index2-(contact_map_index2-i))) for j in y_range: # Define midpoint of", "end: End position of the region to be plotted. :type", "region2 :type links_bed: iterator :param chrom_r: Chromosome of the region", "= float(interval[4]) if(tcn < -1.*max_dev): tcn = -1.*max_dev elif(tcn >", "expression (e.g. tumor and normal) on a gene region scale", "optional :param ax: (default: None) Axis used for plotting, defaults", "list(np.random.rand(len(expression_values))) ] plt.plot(x_positions, expression_values, \"k.\", markersize=3) g_id = None if(not", "plotGenes. :param genes_bed: :class:`pybedtools.BedTool` object containing genes to be plotted.", "of the bar representing the translocation, defaults to \"k\". :type", ":type groups: list :param gene_names_map: Dictionary with keys: ENSEMBL GENE", ":class:`pybedtools.BedTool` object containing TX start, and TX end of genes.", "i y_level_dict[i] += [[gene_start, gene_end]] break elif(i == max_y_pos): max_y_pos", "= Path(vertices, codes) intensity_value = contact_map.iloc[i, j] intensity_value = (intensity_value/vmax", "defaults to False. :type revert_coordinates: bool, optional :param rotation: Rotational", "else: plt.plot([start, end], [0.3, 0.3], linestyle=\"-\", color=color, linewidth=1) if(revert_coordinates): ticks", ":type revert_coordinates: bool, optional :param rotation: Rotational angle of coordinate", "gene_start = int(interval[1]) gene_end = int(interval[2]) for i in range(max_y_pos+1):", "used for legend plotting, defaults to \"normal\". :type g2_id: str,", "<gh_stars>0 import matplotlib.pyplot as plt import pybedtools import pandas as", "returned. :rtype: None ''' # Use given axis for plotting", "defaults to 0.2 :type head_width: float, optional :param head_length: Length", "gene_mid_points: list :param region: List containing the region to be", "otherwise, defaults to False. :type plot_legend: bool :param color_g1: Color", "Minimal deviation from ploidy to be considered as a CNV,", "ax.spines[\"right\"].set_visible(False) def plotLinksAsArcs(links_bed, chrom_r, start_r, end_r, lw=1, color=\"k\", ax =", ".3} whiskerprops = {\"color\": \"k\", \"linewidth\": .3} capprops={\"color\": \"k\", \"linewidth\":", "np.percentile(contact_map, 99.9) colormap = plt.get_cmap(cmap) for i in range(contact_map_index1, contact_map_index2):", "ax=None): '''Function that plots methylation values as dot plots. :param", "None, upper=True, loc_coordinates=\"up\", revert_coordinates=False, rotation=0): '''Function that plots genomic coordinates", "], [ float(m[4]) for m in m in meth_calls], color=color,", ":param chromosome: Chromosome for which to plot CNVs. :type chromosome:", ":]) bplot_g1 = ax.boxplot([np.log2([i if i >= 1. else 1.", "blacklist is None and gene_map[gene_name] in blacklist): continue # Define", "location == \"bottom\" the pyramid points downwards, defaults to top,", "in blacklist): continue left_border = gene_regions[i][0] right_border = None if(i", "color_minus=\"#fb8072\"): \"\"\"Function for plotting gene structures, i.e. introns exons of", "0] for i in range(int(((end-start)/bin_size)+1)) ] counter = 0 for", "= int(element[3]) n_unmeth = int(element[4]) current_bin = int((position-start)/bin_size) counter +=", ":type color_neutral: str, optional :param ax: Axis used for plotting.", "which the first column is a ensemble gene id, and", "= ax if ax is not None else plt.gca() #", "= lw) ax.add_patch(patch) #ax.spines[\"bottom\"].set_visible(False) ax.spines[\"top\"].set_visible(False) ax.spines[\"left\"].set_visible(False) ax.spines[\"right\"].set_visible(False) plt.xticks([], []) plt.yticks([],", "\"NA\") average_list = [ j for j in [meth_before, meth_after]", "color=color_gain elif(ploidy_dev <= -1.*cnv_threshold): color = color_loss if(abs(ploidy_dev) > cnv_threshold):", "segments_Tabix_filename: str :param chrom: Chromosome of the region to be", "to be plotted, default to None. :type blacklist: set, optional", "gene_name = interval[3] gene_start = int(interval[1]) gene_end = int(interval[2]) for", "be plotted, defaults to \"#cbebc4\". :type color: str, optional :param", "plt.gca() for motif in motifs_bed: motif_start = int(motif[1]) motif_end =", "meth_calls: Iterator containing list-like elements with the following entries: 1.", "m in meth_calls ], [ float(m[4]) for m in m", "return max_y_pos+1.5, patch_list, patch_description_list def determineYPosGene(genes_bed, region_size, distance_ratio): '''Function that", "+= 1 continue n_groups = len(groups) for g in range(n_groups):", "right_border = region_right_border current_extension = right_border-left_border if(current_extension == 0.): continue", "1, color=m.to_rgba(binned_average_meth[cbin])) ax.add_patch(rect) plt.xlim([start, end]) plt.ylim([0, 1]) plt.xticks([], []) plt.yticks([],", "\"k\", \"linewidth\": .3} whiskerprops = {\"color\": \"k\", \"linewidth\": .3} capprops={\"color\":", "plt.plot(x_positions, expression_values, \"k.\", markersize=3) g_id = None if(not ids is", "genomic scales elements, defaults to \"k\". :type color: str, optional", "\"k\", \"linewidth\": .3} flierprops = {\"color\": \"k\"} medianprops = {\"color\":", "Color of lines equalizing distances, defaults to \"k\". :type color:", "as a CNV, defaults to 0.7. :type cnv_threshold: float, optional", "\" \" for i in gene_names_clean]))) for tick in ax.get_xticklabels():", "plot_legend: bool :param color_g1: Color used for plotting g1 samples", "ax.set_xlim(region_left_border, region_right_border) ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions))) ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean))) if(not plot_gene_names): ax.xaxis.set_major_formatter( ticker.FixedFormatter(([ \" \"", "[] for e in genes_in_region_bed: gene_name_ens = str(e[3]) if(not gene_names_map", "in genomic_segments: if(int(e[1]) < start): region_mid_points += [start+(int(e[2])-start)/2] elif(int(e[2]) >", "exons of genes. :param genes_bed: :class:`pybedtools.BedTool` object containing TX start,", "linewidth=.5) ax.add_patch(path_patch) ax.axis(\"off\") plt.xlim([start, end]) plt.ylim([0, 1]) return equalized_region_mid_points def", "matplotlib.path import Path from matplotlib.patches import PathPatch import matplotlib.cm as", "Either of \"left\" (upstream), or \"right\" (downstream), defaults to \"left\".", "alpha=.5): '''Function for plotting grouped gene expression (e.g. tumor and", "of the region to be plotted. :type start: int :param", "color=\"k\", ax = None, upper=True, loc_coordinates=\"up\", revert_coordinates=False, rotation=0): '''Function that", "to be plotted. :type end: int :param segment_size: Size of", "plotted otherwise, defaults to False. :type plot_points: bool, optional :param", "patch_artist=True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False) bplot_g2 = ax.boxplot([np.log2([i", "plotted. :type end: int :param segment_size: Size of the segments", "for the patches \\ drawn on the ax. :rtype: list", "float(interval[3]) tcn = float(interval[4]) # Smooth tcn, if ploidy_dev is", "str(segment[-1]).split(\",\") ]+[1]) segment_type = str(segment[3]) if(segment_type == \"R\"): color =", "patches_dict def plotCNVs(cnvs_bed, chromosome, start, end, ploidy=2, cnv_threshold=0.7, color_gain=\"g\", color_loss=\"r\",", "None if(i < len(gene_names)-1): right_border = gene_regions[i+1][0] else: right_border =", "None else plt.gca() max_signal = 0 left = [] height", "extension=None if(len(gene_mid_points) <= 1): extension=region[2]-region[1] else: extension=gene_mid_points[1]-gene_mid_points[0] # Subtract a", "Color used for plotting g2 samples expression, defaults to \"#80b1d3\".", "= color) plt.xlim(r_start, r_end) def plotMethylationProfileHeat(methylation_bed, chrom, start, end, bin_size=1000,", "the plot is drawn, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`,", "= len(genomic_segments) equalized_region_size = (end-start) if(n_segments > 0): equalized_region_size=(end-start)/n_segments equalized_region_mid_points", "True, a point per expression value is plotted in addition", "is not None else plt.gca() c = 0 for region", "number of groups. :type ids: list, optional. :param plot_gene_names: True", "| \"bottom\". If location == \"top\", the pyramid points upwards,", "= int(5-np.log10(tick_size)) divisor = 100000 tick_labels = [ str(round(i/float(divisor), digits_to_round))+scale", "linestyle=\"-\", color=color, linewidth=1) else: plt.plot([start, end], [0.3, 0.3], linestyle=\"-\", color=color,", "position_gene_names: str, optional :param log_transformed: If True use log transformed", "else plt.gca() binned_meth_calls = [ [0, 0] for i in", "end): ticks += [current_tick] current_tick = current_tick + tick_size scale", "iterators. :param chip_signals: Iterator for which each element is a", "plotCNVs(cnvs_bed, chromosome, start, end, ploidy=2, cnv_threshold=0.7, color_gain=\"g\", color_loss=\"r\", color_neutral=\"k\", ax=None):", "ax.get_yticklabels(): ytick.set_size(6) if(plot_legend): ax.legend(patch_list, patch_description_list, fontsize=5, loc='lower left') return ax", "genes to be plotted. :type genes_bed: :class:`pybedtools.BedTool` :param region_size: Size", "head_width=head_width, head_length=head_length, overhang=overhang, head_starts_at_zero=False, edgecolor=\"none\", facecolor=color, length_includes_head=True) plt.xlim([start, end]) plt.ylim([0.4,", "\"top\", or \"bottom\", defaults to \"bottom\". :type position_gene_names: str, optional", "input_file.close() return pybedtools.BedTool(\"\\n\".join([\"\\t\".join(e) for e in cnv_bed_list]), from_string=True) def plotChIPSignals(chip_signals,", "\"Mb\" else: scale=\"Kb\" digits_to_round = None divisor = None if(scale", "chrom = split_line[0] if(chrom == \"23\"): chrom=\"X\" elif(chrom == \"24\"):", "indicating their directionality. :param motifs_bed: :class:`pybedtools.BedTool` object containing regions of", "cnv_threshold): color=colors(.5) rect = Rectangle((current_start, .5), current_end-current_start, 1, color=color, edgecolor='none',", "plt.ylim([-1.5, .3]) plt.xticks([], []) ax.spines[\"bottom\"].set_visible(False) ax.spines[\"top\"].set_visible(False) ax.spines[\"left\"].set_visible(False) ax.spines[\"right\"].set_visible(False) def plotLinksAsArcs(links_bed,", "cmap = cmap) for cbin in range(len(binned_average_meth)): rect = Rectangle((start+cbin*bin_size,", "5. Number unmethylated cytosines Or 1. Chromsome 2. Start position", "str, optional :param color: Color of the bar representing the", "0.3], linestyle=\"-\", color=color, linewidth=1) if(revert_coordinates): ticks = [ start +", "split_line[1].split(\".\")[0] gene_map[ensembl_gene_id] = hugo_gene_symbol gene_name_mapping_file.close() return gene_map def plotGeneExpression(genes_bed, region_bed,", ":type cmap: str, optional :param max_dev: Maximal deviation from ploidy", "as proportion of the arrow, defaults to 0.2 :type head_width:", "Subtract a small percentage of region size from extension extension=extension-(region[2]-region[1])*.01", "Arrow from matplotlib.path import Path from matplotlib.patches import PathPatch import", "will be shown, else the GENE SYMBOLs are hidden. :type", "= int(segment[2]) color = tuple([ float(i)/256. for i in str(segment[-1]).split(\",\")", "different colors :param segments_tabix_filename: Path to tabixed bed file containing", "be plotted :type region_bed: :class:`pybedtools.BedTool` :param expression_df_g1: :class:`pandas.Dataframe` containing the", "expression_df: class:`pandas.DataFrame` object containing the expression values of all samples", "(2*g+1)*extension/float((n_groups*2.)) tick_positions += [left_border + extension/2.] gene_names_clean += [gene_name] exp_values", "\"+str(g) if(not g_id in patch_description_list): patch_list += [bplot[\"boxes\"][0]] patch_description_list +=", "axis. :rtype: :class:`matplotlib.axes._subplots.AxesSubplot` ''' standard_colors = [\"#66c2a5\", \"#fc8d62\", \"#8da0cb\", \"#ec87c2\",", "\"left\". :type direction: str, optional :param color: Color of the", "upper): tick_size = 10**int((np.log10((end-start)/10))) # Determine first tick position first_tick", "+= [ [chrom, split_line[1], split_line[2], str(ploidy_dev), split_line[5], \"+\"] ] input_file.close()", "\"#8da0cb\", \"#ec87c2\", \"#a6d854\", \"#ffd92f\", \"#e5c494\", \"#bbbbbb\"] ax = ax if", "= max_y_pos-y_pos_dict[gene_name]+0.5 patch = Rectangle((start, y-.03), end-start, .06, color=color, capstyle='butt',", "patch_saved = False for gene_name in gene_names: left_border = gene_regions[counter][0]", "\\ number of stacked genes. 2. patch_list is the list", "plotting genomix segments in different colors :param segments_tabix_filename: Path to", "gene_map = {} for line in gene_name_mapping_file: split_line = line.rstrip().split(\"\\t\")", "if(not gene_map is None): gene_name_label = gene_map[gene_name] y = max_y_pos-y_pos_dict[gene_name]+.8", "y = max_y_pos-y_pos_dict[gene_name]+0.5 patch = Rectangle((start, y-.03), end-start, .06, color=color,", "bplot_g1_pos = left_border + extension/4. bplot_g2_pos = left_border + 3*(extension/4.)", ".3]) plt.xticks([], []) ax.spines[\"bottom\"].set_visible(False) ax.spines[\"top\"].set_visible(False) ax.spines[\"left\"].set_visible(False) ax.spines[\"right\"].set_visible(False) def plotLinksAsArcs(links_bed, chrom_r,", "that plots links between genomic regions as arcs. :param links_bed:", "to be plotted ([<chrom>, <start>, <end>]). :type region: list :param", "is None and gene_names[i] in blacklist): continue left_border = gene_regions[i][0]", "patch_description_list = [] tick_positions = [] gene_names_clean = [] counter=0", "0.7]) def plotRegions(regions, start, end, color=\"#cbebc4\", edgecolor=False, alpha=1, ax =", "= color) plt.xlim([region_border_up, region_border_down]) plt.ylim([0, max_y_pos+1.5]) plt.yticks([], []) if(plot_legend): plt.legend(patch_list,", "distance_ratio) if(not y_max is None): max_y_pos = y_max # Plot", "float(i[0])/(float(i[0])+float(i[1])) if (float(i[0])+float(i[1])) > 0 else \"NA\" for i in", "genes for plotting max_y_pos, y_pos_dict = determineYPosGene(genes_in_region, (region_border_down- region_border_up), distance_ratio)", "arc, defaults to \"k\". :type color: str, optional. :param ax:", "of \"lower left\", \"lower right\", \"upper left\", \"upper right\", default", "plot_gene_names: If True, the HUGO GENE SYMBOLs will be shown,", "averaged an plotted, defaults to 0. :type merge: int, optional", "default is None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param plot_legend: If", "with keys = names of segments, and values patch :rtype:", "exons_bed introns_in_region = introns_bed region_border_up = int(region_bed[0][1]) region_border_down = int(region_bed[0][2])", "on chromosome. :type start: int :param end: End position on", "None. :type blacklist: set, optional :param ax: Axis used for", "to be plotted. :type r_chrom: str :param r_start: Start position", "else plt.gca() patches_dict = {} for segment in segments_list: segment_start", "\"#fb8072\". :type color_minus: str, optional. :return: Tuple of max_y_pos+1.5, patch_list,", "optional :param plot_gene_names: If True, the HUGO GENE SYMBOLs will", "None else plt.gca() n_entries = len(meth_calls[0]) if(n_entries == 5): plt.plot([", "plt.get_cmap(cmap) for i in range(contact_map_index1, contact_map_index2): y_range = (range(contact_map_index1+(i-contact_map_index1), contact_map_index2)", "ax=None, plot_legend=False, legend_loc=\"lower right\", color_plus=\"#80b1d3\", color_minus=\"#fb8072\"): \"\"\"Function for plotting gene", "0), (mid_point, distance), (link_pos2, 0)] codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]", "size=6) plt.xticks(rotation=45) def plotCNVsHeat(cnvs_bed, chromosome, start, end, ploidy=2, cnv_threshold=0.7, cmap=\"bwr\",", "chrom, start, end, bin_size=1000, ax = None): '''Function for plotting", "cnv_threshold): color=color_gain elif(ploidy_dev <= -1.*cnv_threshold): color = color_loss if(abs(ploidy_dev) >", "else: digits_to_round = int(5-np.log10(tick_size)) divisor = 100000 tick_labels = [", ":return: Tuple of 1. max_y_pos: Defines the number of stacked", "be plotted. :type start: int :param end: Chromosomal end position", "region2 6. End region2 :type links_bed: iterator :param chrom_r: Chromosome", "to 1. :type alpha: float, optional. :param ax: Axis of", ":param end_r: End position of the region to be plotted.", "them to pybedtools.BedTool object :param input_filename: Full path to ACESeq", "max_y_pos = 0 for interval in genes_sorted_bed: gene_name = interval[3]", "color = color) gene_name = str(i[3]) gene_name_label = gene_name if(not", "translocation, defaults to \"k\". :type color: str, optional :param ax:", "ensemble gene id, and the second column is the HUGO", "Can be negative or greater than one. Defaults to 0.", "or \"bottom\", defaults to \"bottom\". :type position_gene_names: str, optional :param", "max_y_pos = None if(not len(genes_in_region) == 0): # Determine y", "plt.plot([int(start), int(end)], [3, 3], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [4,", "region_size = region_border_down-region_border_up color_forward = color_plus color_reverse = color_minus max_y_pos", "of the rectangle, representing the region to be plotted, defaults", "be included in the plot, False otherwise, default is True", "gene_name_label = gene_name if(not gene_map is None): gene_name_label = gene_map[gene_name]", "equalized_region_mid_points += [((start+ i*equalized_region_size)- equalized_region_size/2)] region_mid_points = [] for e", "g_id = \"group \"+str(g) if(not g_id in patch_description_list): patch_list +=", ":type genes_bed: :class:`pybedtools.BedTool` :param region_bed: :class:`pybedtools.BedTool` object containing the region", "tabixed bed file containing (chrom, start, end, name, score, strand,", "def readACESeqAsBed(input_filename): '''Function that reads CNVs from ACESeq (\"*most_important*\") files", "> end): region_mid_points += [int(e[1])+(end-int(e[1]))/2] else: region_mid_points += [int(e[1])+(int(e[2])-int(e[1]))/2] for", "plt.plot([int(start), int(end)], [1, 1], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [2,", "start, end, ploidy=2, cnv_threshold=0.7, color_gain=\"g\", color_loss=\"r\", color_neutral=\"k\", ax=None): '''Function for", "in addition to the boxplot, no points are plotted otherwise,", "= (end-start) if(n_segments > 0): equalized_region_size=(end-start)/n_segments equalized_region_mid_points = [] for", ":type meth_calles: iterator :param chrom: Chromosome of region to be", "int(5-np.log10(tick_size)) divisor = 100000 tick_labels = [ str(round(i/float(divisor), digits_to_round))+scale for", "colormap(intensity_value) patch = matplotlib.patches.PathPatch(path, facecolor=facecolor, edgecolor='none') ax.add_patch(patch) ax.set_xlim(start, end) if(location", "Axis used for plotting. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Nothing", ":param start_r: Chromosomal start position of the region to be", ".06, color=color, capstyle='butt', linewidth=0) ax.add_patch(patch) if(strand == \"+\" and not(met_forward)):", "None if(not len(genes_in_region) == 0): # Determine y positions of", "for i in range(contact_map_index1, contact_map_index2): y_range = (range(contact_map_index1+(i-contact_map_index1), contact_map_index2) if", "end, ploidy=2, cnv_threshold=0.7, cmap=\"bwr\", max_dev=None, ax=None): '''Function for plotting CNV", "= {\"color\": \"k\", \"linewidth\": .3} capprops={\"color\": \"k\", \"linewidth\": .3} patch_list", "code for minus stranded genes, default is \"#fb8072\". :type color_minus:", "optional :param log_transformed: If True use log transformed values for", "binned_average_meth = [ float(i[0])/(float(i[0])+float(i[1])) if (float(i[0])+float(i[1])) > 0 else \"NA\"", "should not be shown on the plot, default is None", "optional :param color_gain: Plot color of copy number gains, defaults", "of stacked genes, default is None. :type y_max: bool, optional", "counter += 1 ax.set_xlim(region_left_border, region_right_border) ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions))) ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean))) if(not plot_gene_names): ax.xaxis.set_major_formatter(", "== \"-\"): color = color_reverse y = max_y_pos-y_pos_dict[gene_name]+0.5 patch =", "gene_name in gene_names: left_border = gene_regions[counter][0] right_border = region_right_border if(not", "not None else plt.gca() # Get gene names and regions", ".3} flierprops = {\"color\": \"k\"} medianprops = {\"color\": \"k\", \"linewidth\":", "intensity range to be plotted, defaults to None. :type vmax:", "matplotlib.cm.ScalarMappable(norm = norm, cmap = cmap) for cbin in range(len(binned_average_meth)):", "List of equalized region midpoints. :rtype: list ''' ax =", "not None else plt.gca() max_signal = 0 left = []", "link_pos2): mid_point = link_pos2 + (link_pos1-link_pos2)/2 vertices = [(link_pos1, 0),", "if(first_tick > 1000000): scale = \"Mb\" else: scale=\"Kb\" digits_to_round =", "None and gene_name in blacklist): counter += 1 continue n_groups", "plotting grouped gene expression (e.g. tumor and normal) on a", "+= [binned_average_meth[i]] else: meth_before = (binned_average_meth[i-1] if not i ==", "= [g1_id, g2_id] counter += 1 ax.set_xlim(region_left_border, region_right_border) ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions))) ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean)))", "\"#bbbbbb\"] ax = ax if ax is not None else", "2. Start Position, 3. End Position, 4. Deviation from ploidy,", ":type cnvs_bed: :class:`pybedtools.BedTool` :param chromosome: Chromosome for which to plot", "reads CNVs from ACESeq (\"*most_important*\") files and converts them to", "the genes will be stacked, default is 0.1. :type distance_ratio:", "expression_df_g2, gene_names_map, blacklist=None, ax=None, plot_legend=False, color_g1=\"#fb8072\", color_g2=\"#80b1d3\", g1_id=\"tumor\", g2_id=\"normal\", plot_gene_names=True):", "plt.plot([int(start), int(end)], [4, 4], color=color_threshold, linestyle=\"--\", linewidth=.5) elif(ploidy == 4):", "strand = str(i[5]) color = color_forward if(strand == \"-\"): color", ":param cmap: Colormap used for plotting CNVs, defaults to \"bwr\".", "not (i == 0)): left_merged += [lefts[0]] lefts = []", "is swept back (0 overhang means triangular shape). Can be", "of region to be plotted. :type start: int :param end:", "1), (region_mid_point, .8), (equalized_region_mid_point, .2), (equalized_region_mid_point, 0)] else: codes =", "= [] met_forward = False met_reverse = False # Plot", "intensity_value = (intensity_value/vmax if intensity_value <= vmax else 1.) facecolor", "if(not blacklist is None and gene_map[gene_name] in blacklist): continue #", "in introns_in_region: start = int(i[1]) end = int(i[2]) gene_name =", "ax.add_patch(rect) plt.xticks([], []) plt.yticks([], []) plt.xlim([start, end]) plt.ylim([-1, 1]) def", "edgecolor=color, linewidth=.5) ax.add_patch(path_patch) ax.axis(\"off\") plt.xlim([start, end]) plt.ylim([0, 1]) return equalized_region_mid_points", "not None else plt.gca() for motif in motifs_bed: motif_start =", "List of gene names, for genes that should not be", "1]) return equalized_region_mid_points def plotCoordinates(chrom, start, end, color=\"k\", ax =", "edge is plotted, defaults to False. :type edge_color: str, optional", "path_patch = PathPatch(path, facecolor=\"none\", edgecolor=color, linewidth=.5) ax.add_patch(path_patch) ax.axis(\"off\") plt.xlim([start, end])", "1. max_y_pos: Defines the number of stacked genes. 2. y_pos_dict:", "] counter = 0 for element in methylation_bed: # Determine", ":class:`pybedtools.BedTool` :param start: Start position of the region to be", "i in range(1, n_segments+1): equalized_region_mid_points += [((start+ i*equalized_region_size)- equalized_region_size/2)] region_mid_points", "extension=None for i in range(len(gene_regions)): if(not blacklist is None and", "= [\"#66c2a5\", \"#fc8d62\", \"#8da0cb\", \"#ec87c2\", \"#a6d854\", \"#ffd92f\", \"#e5c494\", \"#bbbbbb\"] ax", "= color, lw = lw) ax.add_patch(patch) #ax.spines[\"bottom\"].set_visible(False) ax.spines[\"top\"].set_visible(False) ax.spines[\"left\"].set_visible(False) ax.spines[\"right\"].set_visible(False)", "< cnv_threshold): tcn = ploidy color = color_neutral if(ploidy_dev >=", "]+[1]) segment_type = str(segment[3]) if(segment_type == \"R\"): color = (1,1,1,1)", "cmap: Name of the colormap to be used for plotting", "(i == 0)): left_merged += [lefts[0]] lefts = [] height_merged", "losses, defaults to \"r\". :type color_loss: str, optional :param color_neutral:", "position on chromosome. :type start: int :param end: End position", "e in links_bed: link_pos1 = int(e[1])+(int(e[2])-int(e[1]))/2 link_pos2 = int(e[4])+(int(e[5])-int(e[4]))/2 distance", "edgecolor='none') ax.add_patch(patch) ax.set_xlim(start, end) if(location == \"top\"): ax.set_ylim(0, (end-start)/2.) else:", "== \"R\"): color = (1,1,1,1) rect = Rectangle((segment_start, 0), segment_end-segment_start,", "plt.bar(left, height, offset, color = color, edgecolor = color) plt.xlim(r_start,", "] binned_average_meth_no_missing = [] n = len(binned_average_meth) for i in", "lw=1, color=\"k\", ax = None): '''Function that plots links between", ":type position_gene_names: str, optional :param log_transformed: If True use log", "capstyle='butt', linewidth=0) ax.add_patch(rect) plt.xlim([int(start), int(end)]) plt.ylim([.5, 1.5]) plt.xticks([], []) plt.yticks([],", "Axis used for plotting, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`,", "None. :type y_max: bool, optional :param distance_ratio: Minimal distance between", ":]) exp_values_g2 = expression_df_g2.loc[gene_name, :] if(type(exp_values_g2).__name__ == \"Series\"): exp_values_g2 =", "1 ax.add_patch(rect) plt.xticks([], []) plt.yticks([], []) plt.xlim([start, end]) plt.ylim([-1, 1])", "is underwent, the genes will be stacked. :type distance_ratio: float", "str, optional :param alpha: Alpha value of the rectangle, representing", "start, end, color=\"#cbebc4\", edgecolor=False, alpha=1, ax = None): '''Functions that", "end: int :param color: Color of points representing methylation values,", "for j in y_range: # Define midpoint of rectangle midpoint", "= [(link_pos1, 0), (mid_point, distance), (link_pos2, 0)] codes = [Path.MOVETO,", ":param plot_points: If True, a point per expression value is", "ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param upper: If True, make less ticks,", "int(end)], [4, 4], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [5, 5],", ":type ploidy: int, optional :param cnv_threshold: Minimal deviation from ploidy", "1. Chromsome 2. Start position 3. end position 4. Number", "sliced_contact_map = contact_map.iloc[contact_map_index1:contact_map_index2, contact_map_index1:contact_map_index2] if(vmin is None): vmin = 0", "as cm import matplotlib import tabix import math def plotGenes(genes_bed,", ":param ax: Axis to be used for plotting, defaults to", "End position of the genomic region. :type end: int :param", "Else, coordinates stay in increasing order, defaults to False. :type", "int(6-np.log10(tick_size)) divisor = 1000000 else: digits_to_round = int(5-np.log10(tick_size)) divisor =", ":type chrom_r: str :param start_r: Chromosomal start position of the", "of the different groups. :type groups: list :param gene_names_map: Dictionary", "plus stranded TF regions, defaults to \"#fb8072\". :type color_minus: str,", "if gene names shall be plotted, False otherwise, defaults to", "motifs as arrows, indicating their directionality. :param motifs_bed: :class:`pybedtools.BedTool` object", ":param start: Chromosomal start position of region to be plotted.", "size=5, color = color) plt.xlim([region_border_up, region_border_down]) plt.ylim([0, max_y_pos+1.5]) plt.yticks([], [])", "Chromosomal start position of region to be plotted. :type start:", "ID of g1 used for legend plotting, defaults to \"tumor\".", "stranded genes is plotted, False otherwise. Default is False. :type", "shown on the plot, default is None :type blacklist: list,", "== 0): left_merged += [lefts[0]] lefts = [] height_merged +=", "to \"bottom\". :type position_gene_names: str, optional :param log_transformed: If True", "cmap) for cbin in range(len(binned_average_meth)): rect = Rectangle((start+cbin*bin_size, 0), bin_size,", "the region to be plotted. :type start: str :param end:", "int :param ax: Axis of plot :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional", "medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False) color = None if(not colors is", "genes_in_region_bed = genes_bed.intersect(region_bed, wa=True, u=True).sort() gene_names = [] gene_regions =", "== \"bottom\" the pyramid points downwards, defaults to top, :type", "\" for i in gene_names_clean]))) for tick in ax.get_xticklabels(): tick.set_rotation(45)", "g2_id: str, optional :param plot_gene_names: If True, the HUGO GENE", "in sort_indices] y_pos_dict = {} y_level_dict = {} max_y_pos =", "will be averaged an plotted, defaults to 0. :type merge:", ":param meth_calls: Iterator containing list-like elements with the following entries:", "defaults to None. :type colors: str, optional :param ids: IDs", "gene name :type gene_name_mapping_file: str :return: Dictionary containing the gene", "if(not(float(border_distance_down)/float(region_size) < distance_ratio)): gene_name = str(i[3]) gene_name_label = gene_name if(not", "int(i[2]) gene_name = str(i[3]) if(not blacklist is None and gene_map[gene_name]", "heatmap :param methylation_bed: Methylation calls. Following fields must be included:", "as arcs. :param links_bed: Iterator, that contains bed-like structured lists", ":param color: Color of points representing methylation values, defaults to", "for line in gene_name_mapping_file: split_line = line.rstrip().split(\"\\t\") ensembl_gene_id = split_line[0].split(\".\")[0]", "[ float(m[3])/(float(m[3])+float(m[4])) if not(float(m[3])+float(m[4]) == 0.) else 0. for m", "counter += 1 ax.set_xlim(region_left_border, region_right_border) if(position_gene_names == \"top\"): ax.xaxis.set_ticks_position(\"top\") ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions)))", "end_r: Chromosomal end positiont of the region to be plotted.", "and not(met_reverse)): patch_list += [patch] patch_description_list += [\"reverse strand\"] met_reverse", "to be plotted. :type end_r: int :param TX_pos: Position of", "r_end: int :param ax: Axis of plot :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`,", "(float(i[0])+float(i[1])) > 0 else \"NA\" for i in binned_meth_calls ]", "average_list = [ j for j in [meth_before, meth_after] if", "if(type(exp_values).__name__ == \"Series\"): exp_values = list(exp_values) else: exp_values = list(exp_values.iloc[0,", "patches_dict = {} for segment in segments_list: segment_start = int(segment[1])", "< len(gene_names)-1): right_border = gene_regions[counter+1][0] bplot_g1_pos = left_border + extension/4.", "not None else plt.gca() patches_dict = {} for segment in", "in m in meth_calls], color=color, marker=\".\", linestyle='None', markersize=1, alpha=.5) plt.ylim([0,", "width, such that two genes are plotted side by side.", "values: HUGO GENE SYMBOLs. :type gene_names_map: dict. :param blacklist: Set", "\"NA\"): binned_average_meth_no_missing += [binned_average_meth[i]] else: meth_before = (binned_average_meth[i-1] if not", "else plt.gca() for interval in cnvs_bed: current_start = int(interval[1]) current_end", "i in ticks ] ticks.reverse() tick_labels.reverse() print(tick_labels) for i in", "[] patch_description_list = [] met_forward = False met_reverse = False", "= interval[3] gene_start = int(interval[1]) gene_end = int(interval[2]) for i", "if(not binned_average_meth[i] == \"NA\"): binned_average_meth_no_missing += [binned_average_meth[i]] else: meth_before =", "max_y_pos-y_pos_dict[gene_name]+0.5 rect = Rectangle((start, y-.2), end-start, .4, color=color, capstyle='butt', linewidth=0)", "Color code for plus stranded genes, default is \"#80b1d3\". :type", "as simple rectangles. :param regions: Iterator containig list-like elements with", "from matplotlib.patches import Arrow from matplotlib.path import Path from matplotlib.patches", "== 0)): left_merged += [lefts[0]] lefts = [] height_merged +=", "= int(signal[2]) value = float(signal[3]) if(value > max_signal): max_signal =", "import matplotlib.ticker as ticker from matplotlib.patches import Rectangle from matplotlib.patches", "ratio is underwent, the genes will be stacked. :type distance_ratio:", "for segment in segments_list: segment_start = int(segment[1]) segment_end = int(segment[2])", "introns_bed: :class:`pybedtools.BedTool` :param region_bed: :class:`pybedtools.BedTool` object containing the one region,", "\"#a6d854\", \"#ffd92f\", \"#e5c494\", \"#bbbbbb\"] ax = ax if ax is", "defaults to 2. :type ploidy: int, optional :param cnv_threshold: Minimal", "= list(exp_values_g1.iloc[0, :]) exp_values_g2 = expression_df_g2.loc[gene_name, :] if(type(exp_values_g2).__name__ == \"Series\"):", "float, optional :param head_length: Length of the arrow in bp", "is not None else plt.gca() region_bed = pybedtools.BedTool(\"\\t\".join([str(i) for i", "of \"left\" (upstream), or \"right\" (downstream), defaults to \"left\". :type", "= True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False) bplot_g1[\"boxes\"][0].set_facecolor(color_g1) bplot_g2[\"boxes\"][0].set_facecolor(color_g2)", ":param overhang: Fraction that the arrow is swept back (0", "gene_names_clean = [] counter=0 patch_saved = False for gene_name in", "ax=None): '''Function that plots HiC contact maps as pyramid plots", "= list(exp_values_g2.iloc[0, :]) bplot_g1 = ax.boxplot([np.log2([i if i >= 1.", ":param plot_gene_names: True if gene names shall be plotted, False", "Number unmethylated cytosines Or 1. Chromsome 2. Start position 3.", "np.argsort([i[1] for i in genes_bed])] genes_sorted_bed = [genes_bed[i] for i", "end): continue n_meth = int(element[3]) n_unmeth = int(element[4]) current_bin =", "index: gene ids) :type expression_df_g1: :class:`pandas.DataFrame` :param expression_df_g2: :class:`pandas.Dataframe` containing", "plot_gene_names: bool, optional :param position_gene_names: Either of \"top\", or \"bottom\",", "ax if ax is not None else plt.gca() # Get", "0.): continue if(extension is None): extension = float(current_extension) elif(current_extension <", "plt.plot([int(start), int(end)], [5, 5], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [6,", "region1 2. Start region1 3. End region1 4. Chromosome region2", ":type r_chrom: str :param r_start: Start position of region to", "= gene ids and values = y position \\ of", ":param color: Color of the genomic scales elements, defaults to", "plot ticks to upper direction, else if \"down\", plot ticks", "expression_df_g2: :class:`pandas.DataFrame` :param gene_names_map: Dictionary with keys: ENSEMBL GENE IDs,", "plt.yticks([], []) def readACESeqAsBed(input_filename): '''Function that reads CNVs from ACESeq", "plt.plot([int(start), int(end)], [6, 6], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.xlim([int(start), int(end)]) if(ploidy", "lefts = [] height_merged += [np.mean(heights)] heights = [] heights", "not equal to 0, than merge elements will be averaged", "math def plotGenes(genes_bed, exons_bed, introns_bed, region_bed, blacklist=None, gene_map=None, plot_gene_ids=True, y_max=None,", "not to be plotted, default to None. :type blacklist: set,", "values as heatmap :param methylation_bed: Methylation calls. Following fields must", "the gene id mapping. :rtype: dictionary ''' gene_name_mapping_file = open(gene_name_mapping_filename,", "None): '''Function for plotting methylation values as heatmap :param methylation_bed:", "if(abs(ploidy_dev) < cnv_threshold): color=colors(.5) rect = Rectangle((current_start, .5), current_end-current_start, 1,", "Start position of the genomic region. :type start: int :param", "optional :return: Nothing to be returned. :rtype: None ''' #", "= color rect = Rectangle([int(region[1]), -.75], int(region[2])-int(region[1]), 1.5, facecolor=current_color, edgecolor=edgecolor,", "of copy number gains, defaults to \"g\". :type color_gain: str,", "Deviation from ploidy, 5. True Copy Number) :type cnvs_bed: :class:`pybedtools.BedTool`", "1 continue if(counter < len(gene_names)-1): right_border = gene_regions[counter+1][0] bplot_g1_pos =", "chrom=\"X\" elif(chrom == \"24\"): chrom = \"Y\" cnv_bed_list += [", "int :param color: Color of lines equalizing distances, defaults to", "= motif_end color=color_plus dx = head_length if(strand == \"-\"): arrow_start", "Chromosome of region to be plotted. :type r_chrom: str :param", "plotted. :type chrom: str :param start: Start position of region", ":param r_end: End position of region to be plotted. :type", "split_line[0] if(chrom == \"23\"): chrom=\"X\" elif(chrom == \"24\"): chrom =", "ax=None, color=\"b\", offset=None, merge=None): '''Function that plots bedGraph like iterators.", "in blacklist): counter += 1 continue n_groups = len(groups) for", "ID of g2 used for legend plotting, defaults to \"normal\".", "must be the same as the number of groups, defaults", "None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param plot_legend: If True, a", "GENE SYMBOLs will be shown, else the GENE SYMBOLs are", "Define Colormap cmap = cm.bwr norm = matplotlib.colors.Normalize(vmin=0., vmax=1.) m", "[gene_name_ens] gene_regions += [[int(e[1]), int(e[2])]] region_right_border = int(region_bed[0][2]) region_left_border =", "region_mid_points += [int(e[1])+(int(e[2])-int(e[1]))/2] for i in range(len(region_mid_points)): region_mid_point = region_mid_points[i]", "the segments for which contacts were called. :type segment_size: int", "color_reverse border_distance_down = region_border_down-start if(start < region_border_up): start = region_border_up", ":param segment_size: Size of the segments for which contacts were", "optional :param ax: Axis on which to plot contact map,", "defaults to \"g\". :type color_gain: str, optional :param color_loss: Plot", "color=\"k\", ax=None): '''Function that plots methylation values as dot plots.", "g1 samples expression, defaults to \"#fb8072\". :type color_g1: str, optional", "plt.legend(patch_list, patch_description_list, loc=legend_loc, fontsize=5) return max_y_pos+1.5, patch_list, patch_description_list def determineYPosGene(genes_bed,", "y position \\ of gene. :rtype: tuple ''' sort_indices =", "4.5]) plt.yticks([0, 1, 2, 3, 4], [\"0\", \"1\", \"2\", \"3\",", "start, end, segment_size, cmap=\"Greys\", vmin=None, vmax=None, location=\"top\", ax=None): '''Function that", "[] met_forward = False met_reverse = False # Plot Introns", ":param gene_name_mapping_file: Path to a tab separated file, for which", "Color code for minus stranded genes, default is \"#fb8072\". :type", ":class:`pandas.Dataframe` containing the expression values of g1 samples (columns: sample", ":class:`pybedtools.BedTool` object containing the one region, for which the gene", "1 ax.set_xlim(region_left_border, region_right_border) if(position_gene_names == \"top\"): ax.xaxis.set_ticks_position(\"top\") ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions))) ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean))) if(not", "plotted. :type end: int :param head_width: Width of the arrow", "plotting, defaults to \"tumor\". :type g1_id: str, optional :param g2_id:", "if(len(gene_mid_points) <= 1): extension=region[2]-region[1] else: extension=gene_mid_points[1]-gene_mid_points[0] # Subtract a small", "optional. :return: Tuple of max_y_pos+1.5, patch_list, patch_description_list, where 1. max_y_pos+1.5", "retaining the position of genes. :param genes_bed: :class:`pybedtools.BedTool` object containing", "color: Color of the genomic scales elements, defaults to \"k\".", "head as proportion of the arrow, defaults to 0.2 :type", "region to be plotted. :type end: int :param color: Color", "[(midpoint[0]-segment_size/2., midpoint[1]), (midpoint[0], midpoint[1]-segment_size/2.), (midpoint[0]+segment_size/2., midpoint[1]), (midpoint[0], midpoint[1]+segment_size/2.), (midpoint[0]-segment_size/2., midpoint[1])", "TX end of genes. :type genes_bed: :class:`pybedtools.BedTool` :param exons_bed: :class:`pybedtools.BedTool`", "counter=0 patch_saved = False for gene_name in gene_names: left_border =", "= link_pos1 + (link_pos2-link_pos1)/2 if(link_pos2 < link_pos2): mid_point = link_pos2", "1000. :type bin_size: int, optional :param ax: Axis to be", "as ticker from matplotlib.patches import Rectangle from matplotlib.patches import Arrow", "CNVs. :type chromosome: str :param start: Start position on chromosome.", "legend plotting, defaults to None. Number of ids must be", "plt.yticks([0, 2, 4, 6], [\"0\", \"2\", \"4\", \"6\"], size=6) plt.xticks(rotation=45)", "plt.ylim([0.3, 0.7]) def plotRegions(regions, start, end, color=\"#cbebc4\", edgecolor=False, alpha=1, ax", "to be plotted. :type genes_bed: :class:`pybedtools.BedTool` :param region_size: Size of", "> end): continue n_meth = int(element[3]) n_unmeth = int(element[4]) current_bin", "abs(link_pos2-link_pos1) if(distance > max_dist): max_dist = distance mid_point = link_pos1", "# Plot Introns for i in introns_in_region: start = int(i[1])", "stranded genes, default is \"#fb8072\". :type color_minus: str, optional. :return:", "ax.add_patch(patch) ax.set_xlim(start, end) if(location == \"top\"): ax.set_ylim(0, (end-start)/2.) else: ax.set_ylim(-1.*(end-start)/2.,", "of all samples (columns: sample ids; index: gene ids). :type", ":type rotation: int, optional :return: Nothing to be returned. :rtype:", "% merge == 0): left_merged += [lefts[0]] lefts = []", ":type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: List of equalized region midpoints.", "in str(segment[-1]).split(\",\") ]+[1]) segment_type = str(segment[3]) if(segment_type == \"R\"): color", "gene ids and values = y position \\ of gene.", "Rectangle([int(region[1]), -.75], int(region[2])-int(region[1]), 1.5, facecolor=current_color, edgecolor=edgecolor, alpha=alpha) c += 1", "for i in range(n): if(not binned_average_meth[i] == \"NA\"): binned_average_meth_no_missing +=", "plt.plot([int(start), int(end)], [2, 2], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [3,", "start: Start position of the genomic region. :type start: int", "\"#80b1d3\". :type color_plus: str, optional. :param color_minus: Color code for", ":type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Nothing to be returned. :rtype:", "point per expression value is plotted in addition to the", "(equalized_region_mid_point, .8), (equalized_region_mid_point, 1)] path = Path(vertices, codes) path_patch =", "if(not merge is None): heights = [] lefts = []", "plotting gene structures, i.e. introns exons of genes. :param genes_bed:", "for i in exp_values_g1])], positions=[bplot_g1_pos], widths=extension/2., patch_artist=True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops,", "region_border_down-start if(start < region_border_up): start = region_border_up border_distance_down = region_border_down-start", "start or position > end): continue n_meth = int(element[3]) n_unmeth", "2. Start position 3. end position 4. Number methylated cytosines", "List of colors used for plotting samples expression. The number", "= None if(scale == \"Mb\"): digits_to_round = int(6-np.log10(tick_size)) divisor =", "y_level_dict[i] += [[gene_start, gene_end]] break elif(i == max_y_pos): max_y_pos +=", "for plotting HiC intensities, defaults to \"Greys\". :type cmap: str,", "plotting strand = str(i[5]) color = color_forward if(strand == \"-\"):", "of region to be plotted. :type end: int :param bin_size:", "be plotted. :type chrom: str :param start: Start position of", "start, end, ax = None): '''Function for plotting genomix segments", "optional :param color_minus: Color of plus stranded TF regions, defaults", "value = float(signal[3]) if(value > max_signal): max_signal = value if(not", "plt.ylim([0, max_y_pos+1.5]) plt.yticks([], []) if(plot_legend): plt.legend(patch_list, patch_description_list, loc=legend_loc, fontsize=5) return", "to be plotted in base pairs. :type region_size: int :param", "g2 samples (columns: sample ids; index: gene ids) :type expression_df_g2:", "samples expression, defaults to \"#80b1d3\". :type color_g2: str, optional :param", "for i in region]), from_string=True) # Get gene names and", "str :param start: Start position on chromosome. :type start: int", "returned :rtype: None ''' ax = ax if ax is", "float, optional :param color_gain: Plot color of copy number gains,", "linea fashion. :param chrom: Chromosome of the region to be", "if(strand == \"-\"): arrow_start = motif_end arrow_end = motif_start color", "containing exons of genes. :type exons_bed: :class:`pybedtools.BedTool` :param introns_bed: :class:`pybedtools.BedTool`", "rotation=rotation) else: plt.plot([ticks[i], ticks[i]], [.3, .0], linestyle=\"-\", color=color, linewidth=1) plt.text(ticks[i],", "[]) ax.spines[\"bottom\"].set_visible(False) ax.spines[\"top\"].set_visible(False) ax.spines[\"left\"].set_visible(False) ax.spines[\"right\"].set_visible(False) def plotLinksAsArcs(links_bed, chrom_r, start_r, end_r,", "expression_df_g1: :class:`pandas.Dataframe` containing the expression values of g1 samples (columns:", ":class:`pybedtools.BedTool` :param region_bed: :class:`pybedtools.BedTool` object containing the one region, for", "plotted, defaults to 0. :type merge: int, optional :return: Nothing", ":class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: List of equalized region midpoints. :rtype: list", "\"lower left\", \"lower right\", \"upper left\", \"upper right\", default is", "= exp_values if(log_transformed): expression_values = np.log2([i if i >= 1.", ":type direction: str, optional. :param ax: Axis on which to", "deviation from ploidy to be considered as a CNV, defaults", "for i in genes_bed])] genes_sorted_bed = [genes_bed[i] for i in", "Alpha value of the rectangle, representing the region to be", "optional :param alpha: Alpha value of the rectangle, representing the", "if location == \"bottom\" the pyramid points downwards, defaults to", "plt.gca() # Get gene names and regions genes_in_region_bed = genes_bed.intersect(region_bed,", "import Rectangle from matplotlib.patches import Arrow from matplotlib.path import Path", "i in range(max_y_pos+1): if(i == 0 and not max_y_pos in", "tick_labels[i], horizontalalignment=\"center\", fontsize=5, color=color, verticalalignment=\"top\", rotation=rotation) plt.xlim([start, end]) plt.yticks([], [])", "(region_mid_point, .2), (equalized_region_mid_point, .8), (equalized_region_mid_point, 1)] path = Path(vertices, codes)", "= Rectangle((start, y-.03), end-start, .06, color=color, capstyle='butt', linewidth=0) ax.add_patch(patch) if(strand", "methylation values per bin # Define Colormap cmap = cm.bwr", "str :return: Dictionary containing the gene id mapping. :rtype: dictionary", "first_tick = start+(tick_size-start%tick_size) ticks = [] current_tick = first_tick while(current_tick", "which to plot, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional", "the genes will be stacked. :type distance_ratio: float :return: Tuple", "3. end position 4. Beta Value :type meth_calles: iterator :param", "rectangles. :param regions: Iterator containig list-like elements with the following", "[(region_mid_point, 0), (region_mid_point, .2), (equalized_region_mid_point, .8), (equalized_region_mid_point, 1)] path =", "[] height_merged = [] if(not merge is None): heights =", "(region_mid_point, .8), (equalized_region_mid_point, .2), (equalized_region_mid_point, 0)] else: codes = [Path.MOVETO,", "def plotGeneExpressionEqualDist(genes_bed, gene_mid_points, region, expression_df, groups, gene_names_map=None, blacklist=None, ax=None, plot_legend=False,", ":param color_loss: Plot color of copy number losses, defaults to", "= plt.cm.get_cmap(cmap) if(max_dev is None): max_dev = max([abs(float(i[3])) for i", "midpoint[1]+segment_size/2.), (midpoint[0]-segment_size/2., midpoint[1]) ] codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,", "the genes are plotted, default is None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`,", "use log transformed values for plotting, non-transformed values otherwise. :type", "optional :param location: Either of \"top\" | \"bottom\". If location", "plt.gca() colors = plt.cm.get_cmap(cmap) if(max_dev is None): max_dev = max([abs(float(i[3]))", "== max_y_pos): max_y_pos += 1 y_pos_dict[gene_name] = max_y_pos y_level_dict[max_y_pos] =", "y = max_y_pos-y_pos_dict[gene_name]+.8 plt.text(start, y, gene_name_label, size=5, color = color)", ":param loc_coordinates: Either of \"up\" | \"down\". If \"up\", plot", "start_r: Start position of the region to be plotted. :type", "color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [3, 3], color=color_threshold, linestyle=\"--\", linewidth=.5)", "of the region to be plotted. :type chrom: str :param", "import Arrow from matplotlib.path import Path from matplotlib.patches import PathPatch", "float, optional :return: Plots axis. :rtype: :class:`matplotlib.axes._subplots.AxesSubplot` ''' standard_colors =", "of plot, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return:", "this ratio is underwent, the genes will be stacked, default", "defaults to False. :type plot_legend: bool :param color_g1: Color used", "for gene plotting via function plotGenes. :param genes_bed: :class:`pybedtools.BedTool` object", "plot_points: bool, optional :param alpha: Alpha value for the background", "to 0. :type rotation: int, optional :return: Nothing to be", "patch_list += [patch] patch_description_list += [\"forward strand\"] met_forward = True", "ax.legend(patch_list, patch_description_list, fontsize=5, loc='lower left') return ax def plotGenomicSegments(segments_list, chrom,", "overhang: float, optional :param color_plus: Color of plus stranded TF", "region_bed, expression_df_g1, expression_df_g2, gene_names_map, blacklist=None, ax=None, plot_legend=False, color_g1=\"#fb8072\", color_g2=\"#80b1d3\", g1_id=\"tumor\",", "\"top\"): ax.xaxis.set_ticks_position(\"top\") ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions))) ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean))) if(not plot_gene_names): ax.xaxis.set_major_formatter(ticker.FixedFormatter( ([ \" \"", "TXend of genes. :type genes_bed: :class:`pybedtools.BedTool` :param region_bed: :class:`pybedtools.BedTool` object", "flierprops = {\"color\": \"k\"} medianprops = {\"color\": \"k\", \"linewidth\": .3}", "number of stacked genes. 2. patch_list is the list of", "plotting methylation values as heatmap :param methylation_bed: Methylation calls. Following", "intensity values of HiC contacts. :type contact_map: :class:`pandas.DataFrame` :param start:", "end position 4. Number methylated cytosines 5. Number unmethylated cytosines", "gene_name = str(i[3]) if(not blacklist is None and gene_map[gene_name] in", "ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param plot_legend: If True legend is plotted,", "ax: (default: None) Axis used for plotting, defaults to None.", "\"k\". :type color: str, optional. :param ax: Axis where the", "cytosines 5. Number unmethylated cytosines Or 1. Chromsome 2. Start", "'''Function for plotting CNV segments :param cnvs_bed: :class:`pybedtools.BedTool` object containing", "Tuple of 1. max_y_pos: Defines the number of stacked genes.", "markersize=1, alpha=.5) elif(n_entries == 4): plt.plot([ (float(m[1])+float(m[2]))/2. for m in", "vertices = [(region_mid_point, 1), (region_mid_point, .8), (equalized_region_mid_point, .2), (equalized_region_mid_point, 0)]", "plot_gene_ids: If True, all gene ids will be included in", "names, for genes that should not be shown on the", "[(link_pos1, 0), (mid_point, distance), (link_pos2, 0)] codes = [Path.MOVETO, Path.CURVE3,", "+ 3*(extension/4.) tick_positions += [left_border + extension/2.] gene_names_clean += [gene_name]", "list, optional :param plot_gene_ids: If True, all gene ids will", "merge: int, optional :return: Nothing to be returned. :rtype: None", "n_meth = int(element[3]) n_unmeth = int(element[4]) current_bin = int((position-start)/bin_size) counter", "region to be plotted. :type start_r: int :param end_r: Chromosomal", "stacked genes, default is None. :type y_max: bool, optional :param", "4], color=color_threshold, linestyle=\"--\", linewidth=.5) elif(ploidy == 4): plt.plot([int(start), int(end)], [1,", "gene names and regions genes_in_region_bed = genes_bed.intersect(region_bed, wa=True, u=True).sort() gene_names", "], [ float(m[3])/(float(m[3])+float(m[4])) if not(float(m[3])+float(m[4]) == 0.) else 0. for", "If True, a legend describing plus or minus stranded genes", "If this ratio is underwent, the genes will be stacked,", "optional :param distance_ratio: Minimal distance between two genes, as ratio", "[] counter=0 patch_saved = False for gene_name in gene_names: left_border", "return gene_map def plotGeneExpression(genes_bed, region_bed, expression_df_g1, expression_df_g2, gene_names_map, blacklist=None, ax=None,", "values otherwise. :type log_transformed: bool, optional :param plot_points: If True,", "== \"left\"): TX_start = start_r TX_end = TX_pos rect =", "GENE SYMBOLs. :type gene_names_map: dict. :param blacklist: Set containing gene", "g_id in patch_description_list): patch_list += [bplot[\"boxes\"][0]] patch_description_list += [g_id] counter", "strings, defaults to 0. :type rotation: int, optional :return: Nothing", "color = color) plt.xlim([region_border_up, region_border_down]) plt.ylim([0, max_y_pos+1.5]) plt.yticks([], []) if(plot_legend):", "for tick in ax.get_xticklabels(): tick.set_rotation(45) tick.set_size(5) for ytick in ax.get_yticklabels():", "position first_tick = start+(tick_size-start%tick_size) ticks = [] current_tick = first_tick", "be plotted. :type end: int :param segment_size: Size of the", "= ax if ax is not None else plt.gca() max_signal", ".5, dx, 0, head_width=head_width, head_length=head_length, overhang=overhang, head_starts_at_zero=False, edgecolor=\"none\", facecolor=color, length_includes_head=True)", ":param end: End position of the genomic region. :type end:", "e in cnv_bed_list]), from_string=True) def plotChIPSignals(chip_signals, r_chrom, r_start, r_end, ax=None,", "'''Function that creates a mapping between gene ids :param gene_name_mapping_file:", ":rtype: list ''' ax = ax if ax is not", "None): vmax = np.percentile(contact_map, 99.9) colormap = plt.get_cmap(cmap) for i", "range(n): if(not binned_average_meth[i] == \"NA\"): binned_average_meth_no_missing += [binned_average_meth[i]] else: meth_before", "in genes_sorted_bed: gene_name = interval[3] gene_start = int(interval[1]) gene_end =", "Start position 3. end position 4. Beta Value :type meth_calles:", "location == \"top\" else range(contact_map_index1, contact_map_index2-(contact_map_index2-i))) for j in y_range:", "[((start+ i*equalized_region_size)- equalized_region_size/2)] region_mid_points = [] for e in genomic_segments:", "[] if(direction == \"top_down\"): codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO]", "translocation event as a bar, showing the part of the", "(i-.5)* ((2*extension)/(float(n_groups)*3))) for i in list(np.random.rand(len(expression_values))) ] plt.plot(x_positions, expression_values, \"k.\",", "default is \"lower right\". :type legend_loc: str, optional :param color_plus:", ":return: List of equalized region midpoints. :rtype: list ''' ax", ":param ax: Axis of plot :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param", "be the same as the number of groups. :type ids:", "per expression value is plotted in addition to the boxplot,", "left') return ax def plotGeneExpressionEqualDist(genes_bed, gene_mid_points, region, expression_df, groups, gene_names_map=None,", "color). The color field is used to determine the color", "gene_name_label, size=5, color = color) plt.xlim([region_border_up, region_border_down]) plt.ylim([0, max_y_pos+1.5]) plt.yticks([],", "interval[3] gene_start = int(interval[1]) gene_end = int(interval[2]) for i in", "the region to be plotted. :type end_r: int :param color:", "\"b\". :type color: str, optional :param offset: Length of intervals,", "included in the plot, False otherwise, default is True :type", "head_length: Length of the arrow in bp (depends on the", "genes_bed: :class:`pybedtools.BedTool` :param region_bed: :class:`pybedtools.BedTool` object containing the region to", "'''Functions that plots genomic regions as simple rectangles. :param regions:", "len(genes_in_region) == 0): # Determine y positions of genes for", "matplotlib import tabix import math def plotGenes(genes_bed, exons_bed, introns_bed, region_bed,", "optional :param plot_points: If True, a point per expression value", "dot plots. :param meth_calls: Iterator containing list-like elements with the", ":class:`pybedtools.BedTool` object containing TXstart, and TXend of genes. :type genes_bed:", ":param motifs_bed: :class:`pybedtools.BedTool` object containing regions of the TF sited", "to be plotted. :type end: int :param color: Color of", "end: int :param color: Color of lines equalizing distances, defaults", "\"6\"], size=6) plt.xticks(rotation=45) def plotCNVsHeat(cnvs_bed, chromosome, start, end, ploidy=2, cnv_threshold=0.7,", ":type end: int :param ploidy: Assumed ploidy of tumor, defaults", "i in binned_meth_calls ] binned_average_meth_no_missing = [] n = len(binned_average_meth)", "equalization (top_down | bottom_up), defaults to \"top_down\". :type direction: str,", "that is translocated. :param chrom_r: Chromosome of the region to", "merge*offset left = left_merged height = height_merged plt.bar(left, height, offset,", "2. y_pos_dict: Dictionary with keys = gene ids and values", "expression_df_g1, expression_df_g2, gene_names_map, blacklist=None, ax=None, plot_legend=False, color_g1=\"#fb8072\", color_g2=\"#80b1d3\", g1_id=\"tumor\", g2_id=\"normal\",", "== \"24\"): chrom = \"Y\" cnv_bed_list += [ [chrom, split_line[1],", "str :param ax: Axis used for plotting, defaults to None.", "end_r, TX_pos, direction=\"right\", color=\"k\", ax=None): '''Function that plots a translocation", "and normal) on a gene region scale equalizing the position", "of groups. :type ids: list, optional. :param plot_gene_names: True if", "0. :type rotation: int, optional :return: Nothing to be returned.", "not(met_forward)): patch_list += [patch] patch_description_list += [\"forward strand\"] met_forward =", "number of colors must be the same as the number", "is not None else plt.gca() contact_map_index1 = (start)/segment_size contact_map_index2 =", "otherwise, defaults to True. :type plot_gene_names: bool, optional :param position_gene_names:", "matplotlib.pyplot as plt import pybedtools import pandas as pnd import", ":param head_length: Length of the arrow in bp (depends on", "= int(i[1]) end = int(i[2]) gene_name = str(i[3]) if(not blacklist", "each element is a list-ike object containing: 1. Chromosome 2.", ":type start_r: int :param end_r: Chromosomal end positiont of the", "object containing TX start, and TX end of genes. :type", "ax.add_patch(rect) else: rect = Rectangle((current_start, tcn-.1), current_end-current_start, .2, color=color, edgecolor='none',", "like iterators. :param chip_signals: Iterator for which each element is", "= color, edgecolor = color) plt.xlim(r_start, r_end) def plotMethylationProfileHeat(methylation_bed, chrom,", "int(i[1]) gene_name = str(i[3]) if(not blacklist is None and gene_map[gene_name]", "for m in meth_calls ], [ float(m[4]) for m in", "is None and gene_name in blacklist): counter += 1 continue", "meth_calles: iterator :param chrom: Chromosome of region to be plotted.", "def plotGenes(genes_bed, exons_bed, introns_bed, region_bed, blacklist=None, gene_map=None, plot_gene_ids=True, y_max=None, distance_ratio=0.1,", "\"#e5c494\", \"#bbbbbb\"] ax = ax if ax is not None", "None and gene_name in blacklist): counter += 1 continue if(counter", "to \"tumor\". :type g1_id: str, optional :param g2_id: ID of", "\"up\". :type loc_coordinates: str, optional :param revert_coordinates: If True, coordinates", "overhang=overhang, head_starts_at_zero=False, edgecolor=\"none\", facecolor=color, length_includes_head=True) plt.xlim([start, end]) plt.ylim([0.4, 0.6]) def", "links_bed: link_pos1 = int(e[1])+(int(e[2])-int(e[1]))/2 link_pos2 = int(e[4])+(int(e[5])-int(e[4]))/2 distance = abs(link_pos2-link_pos1)", "ax is not None else plt.gca() # Calculate midpoints of", "'''Function for plotting grouped gene expression (e.g. tumor and normal)", "be shown, else the GENE SYMBOLs are hidden. :type plot_gene_names:", "fields must be included: Chrom, Start, End, Methylated Cs, Unmethylated", "contact map, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return:", "in increasing order, defaults to False. :type revert_coordinates: bool, optional", "[genes_bed[i] for i in sort_indices] y_pos_dict = {} y_level_dict =", "[ str(round(i/float(divisor), digits_to_round))+scale for i in ticks ] if(loc_coordinates ==", "fontsize=5, loc='lower left') return ax def plotGenomicSegments(segments_list, chrom, start, end,", "patch_artist = True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False) bplot_g1[\"boxes\"][0].set_facecolor(color_g1)", "gene_names += [gene_names_map[gene_name_ens]] else: gene_names += [gene_name_ens] gene_regions += [[int(e[1]),", "1 binned_meth_calls[current_bin][0] += n_meth binned_meth_calls[current_bin][1] += n_unmeth binned_average_meth = [", "optional :param cnv_threshold: Minimal deviation from ploidy to be considered", "str :param start_r: Chromosomal start position of the region to", "Axis to be used for plotting, defaults to None. :type", "of lists containing the IDs of the different groups. :type", "plot_gene_names=True, position_gene_names=\"bottom\", log_transformed=True, plot_points=False, alpha=.5): '''Function for plotting grouped gene", "readACESeqAsBed(input_filename): '''Function that reads CNVs from ACESeq (\"*most_important*\") files and", "genes. :param genes_bed: :class:`pybedtools.BedTool` object containing TXstart, and TXend of", "if(strand == \"-\"): color = color_reverse y = max_y_pos-y_pos_dict[gene_name]+0.5 patch", "exp_values = list(exp_values.iloc[0, :]) expression_values = exp_values if(log_transformed): expression_values =", "range(int(((end-start)/bin_size)+1)) ] counter = 0 for element in methylation_bed: #", "for signal in chip_signals: start = int(signal[1]) end = int(signal[2])", "Rectangle((current_start, tcn-.1), current_end-current_start, .2, color=color, edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect) #", "log_transformed: If True use log transformed values for plotting, non-transformed", "head_length=head_length, overhang=overhang, head_starts_at_zero=False, edgecolor=\"none\", facecolor=color, length_includes_head=True) plt.xlim([start, end]) plt.ylim([0.4, 0.6])", "optional :param vmax: Maximal value of intensity range to be", "segments_list: segment_start = int(segment[1]) segment_end = int(segment[2]) color = tuple([", "= str(e[3]) if(not gene_names_map is None): gene_names += [gene_names_map[gene_name_ens]] else:", "iterator :param chrom: Chromosome of region to be plotted. :type", "arcs from unequal distances of genomic segments to equal distances.", "color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [2, 2], color=color_threshold, linestyle=\"--\", linewidth=.5)", "color_g2=\"#80b1d3\", g1_id=\"tumor\", g2_id=\"normal\", plot_gene_names=True): '''Function for plotting paired gene expression", "distance mid_point = link_pos1 + (link_pos2-link_pos1)/2 if(link_pos2 < link_pos2): mid_point", "split_line[0].split(\".\")[0] hugo_gene_symbol = split_line[1].split(\".\")[0] gene_map[ensembl_gene_id] = hugo_gene_symbol gene_name_mapping_file.close() return gene_map", "between gene ids :param gene_name_mapping_file: Path to a tab separated", "None else plt.gca() genes_in_region = genes_bed exons_in_region = exons_bed introns_in_region", "def plotTX(chrom_r, start_r, end_r, TX_pos, direction=\"right\", color=\"k\", ax=None): '''Function that", "than cnv_threshold if(abs(ploidy_dev) < cnv_threshold): tcn = ploidy color =", "transformed values for plotting, non-transformed values otherwise. :type log_transformed: bool,", "color=\"b\", offset=None, merge=None): '''Function that plots bedGraph like iterators. :param", ":type end: int :param head_width: Width of the arrow head", "verticalalignment=\"bottom\", fontsize=5, color=color, rotation=rotation) else: plt.plot([ticks[i], ticks[i]], [.3, .0], linestyle=\"-\",", "Beta Value :type meth_calles: iterator :param chrom: Chromosome of region", "0], linestyle=\"-\", color=color, linewidth=1) else: plt.plot([start, end], [0.3, 0.3], linestyle=\"-\",", "Chrom, Start, End, Methylated Cs, Unmethylated Cs. :type methylation_bed: :class:`pybedtools.BedTool`", ":param chip_signals: Iterator for which each element is a list-ike", "r_start: int :param r_end: End position of region to be", "end: int :param color: Color of the genomic scales elements,", "ploidy of tumor, defaults to 2. :type ploidy: int, optional", "== \"up\"): plt.plot([ticks[i], ticks[i]], [0., .3], linestyle=\"-\", color=color, linewidth=1) plt.text(ticks[i],", "0 for interval in genes_sorted_bed: gene_name = interval[3] gene_start =", "None): heights = [] lefts = [] for i in", "if(type(exp_values_g1).__name__ == \"Series\"): exp_values_g1 = list(exp_values_g1) else: exp_values_g1 = list(exp_values_g1.iloc[0,", "= colormap(intensity_value) patch = matplotlib.patches.PathPatch(path, facecolor=facecolor, edgecolor='none') ax.add_patch(patch) ax.set_xlim(start, end)", "tick position first_tick = start+(tick_size-start%tick_size) ticks = [] current_tick =", "if(i % merge == 0 and not (i == 0)):", "HiC intensities, defaults to \"Greys\". :type cmap: str, optional :param", "None and gene_names[i] in blacklist): continue left_border = gene_regions[i][0] right_border", "== \"top\" else range(contact_map_index1, contact_map_index2-(contact_map_index2-i))) for j in y_range: #", "of the arrow head as proportion of the arrow, defaults", "the translocation, defaults to \"k\". :type color: str, optional :param", "of plus stranded TF regions, defaults to \"#80b1d3\". :type color_plus:", "[[gene_start, gene_end]] break elif(i == max_y_pos): max_y_pos += 1 y_pos_dict[gene_name]", "= int(region_bed[0][1]) # Determine minimal extension of barplot extension=None if(len(gene_mid_points)", "arrow, defaults to 0.2 :type head_width: float, optional :param head_length:", "which to plot contact map, defaults to None. :type ax:", "1, 2, 3, 4], [\"0\", \"1\", \"2\", \"3\", \"4\"], size=6)", "else: scale=\"Kb\" digits_to_round = None divisor = None if(scale ==", "color rect = Rectangle([int(region[1]), -.75], int(region[2])-int(region[1]), 1.5, facecolor=current_color, edgecolor='none', alpha=alpha)", "in [meth_before, meth_after] if not j == \"NA\" ] binned_average_meth_no_missing", "if(segment_type == \"R\"): color = (1,1,1,1) rect = Rectangle((segment_start, 0),", "in regions: if(not edgecolor): current_color = color rect = Rectangle([int(region[1]),", "max y position for gene plotting via function plotGenes. :param", "= open(input_filename, \"r\") cnv_bed_list = [] ploidy = None for", ":type r_start: int :param r_end: End position of region to", "segment_size, cmap=\"Greys\", vmin=None, vmax=None, location=\"top\", ax=None): '''Function that plots HiC", "else: region_mid_points += [int(e[1])+(int(e[2])-int(e[1]))/2] for i in range(len(region_mid_points)): region_mid_point =", "= int(interval[2]) for i in range(max_y_pos+1): if(i == 0 and", "position of the region to be plotted. :type end: int", "end, color=\"#cbebc4\", edgecolor=False, alpha=1, ax = None): '''Functions that plots", "ploidy=2, cnv_threshold=0.7, color_gain=\"g\", color_loss=\"r\", color_neutral=\"k\", ax=None): '''Function for plotting CNV", "ax: Axes instance on which the genes are plotted, default", "defaults to False. :type plot_points: bool, optional :param alpha: Alpha", "for plotting. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Nothing to be", "genes is plotted, False otherwise. Default is False. :type plot_legend:", "value is plotted in addition to the boxplot, no points", "4): plt.plot([ (float(m[1])+float(m[2]))/2. for m in meth_calls ], [ float(m[4])", "= len(binned_average_meth) for i in range(n): if(not binned_average_meth[i] == \"NA\"):", "defaults to 1. :type alpha: float, optional. :param ax: Axis", "c = 0 for region in regions: if(not edgecolor): current_color", "patch_description_list += [g_id] counter += 1 ax.set_xlim(region_left_border, region_right_border) if(position_gene_names ==", "a gene region scale equalizing the position of genes. :param", "integer values containing center positions of genes. :type gene_mid_points: list", "region to be plotted. :type start: str :param end: End", ":type expression_df: class:`pandas.DataFrame` :param blacklist: Set containing gene ids not", "= split_line[0].split(\".\")[0] hugo_gene_symbol = split_line[1].split(\".\")[0] gene_map[ensembl_gene_id] = hugo_gene_symbol gene_name_mapping_file.close() return", "cnvs_bed]) for interval in cnvs_bed: current_start = int(interval[1]) current_end =", "= Rectangle((current_start, .5), current_end-current_start, 1, color=color, edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect)", "PathPatch import matplotlib.cm as cm import matplotlib import tabix import", "left_border = gene_mid_points[counter]-extension/2 right_border = gene_mid_points[counter]+extension/2 if(not blacklist is None", "keys: ENSEMBL GENE IDs, and values: HUGO GENE SYMBOLs. :type", "optional :param color: Color of the bar representing the translocation,", "\"up\" | \"down\". If \"up\", plot ticks to upper direction,", "right_border = gene_regions[i+1][0] else: right_border = region_right_border current_extension = right_border-left_border", "max_y_pos y_level_dict[max_y_pos] = [[gene_start, gene_end]] break else: continue return max_y_pos,", "as heatmap :param cnvs_bed: :class:`pybedtools.BedTool` object containing CNVs with following", "0.2 :type head_width: float, optional :param head_length: Length of the", "ids[g] else: g_id = \"group \"+str(g) if(not g_id in patch_description_list):", "None): g_id = ids[g] else: g_id = \"group \"+str(g) if(not", "separated file, for which the first column is a ensemble", "Exons for i in exons_in_region: start = int(i[1]) end =", "[meth_before, meth_after] if not j == \"NA\" ] binned_average_meth_no_missing +=", "medianprops = {\"color\": \"k\", \"linewidth\": .3} whiskerprops = {\"color\": \"k\",", "== 2): plt.plot([int(start), int(end)], [1, 1], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start),", ":param plot_legend: If True, a legend describing plus or minus", ":class:`pandas.DataFrame` :param start: Chromosomal start position of region to be", "gene_names_map is None): gene_names += [gene_names_map[gene_name_ens]] else: gene_names += [gene_name_ens]", "position > end): continue n_meth = int(element[3]) n_unmeth = int(element[4])", "for plotting methylation values as heatmap :param methylation_bed: Methylation calls.", "plot_gene_ids=True, y_max=None, distance_ratio=0.1, ax=None, plot_legend=False, legend_loc=\"lower right\", color_plus=\"#80b1d3\", color_minus=\"#fb8072\"): \"\"\"Function", "for i in list(np.random.rand(len(expression_values))) ] plt.plot(x_positions, expression_values, \"k.\", markersize=3) g_id", "plt.gca() contact_map_index1 = (start)/segment_size contact_map_index2 = ((end)/segment_size)+1 sliced_contact_map = contact_map.iloc[contact_map_index1:contact_map_index2,", "optional :param ax: Axis of plot, defaults to None. :type", "int(end)]) plt.ylim([.5, 1.5]) plt.xticks([], []) plt.yticks([], []) def readACESeqAsBed(input_filename): '''Function", "is None and gene_map[gene_name] in blacklist): continue # Define color", "alpha: Alpha value for the background color of the boxplots", "len(genomic_segments) equalized_region_size = (end-start) if(n_segments > 0): equalized_region_size=(end-start)/n_segments equalized_region_mid_points =", ":type color_loss: str, optional :param color_neutral: Plot color of copy", ":type region: list :param groups: List of lists containing the", "(depends on the region that is plotted), defaults to 1000.", "bp (depends on the region that is plotted), defaults to", ":type exons_bed: :class:`pybedtools.BedTool` :param introns_bed: :class:`pybedtools.BedTool` object containing introns :type", "Determine bin position = int(element[1]) if(position < start or position", "\"#ec87c2\", \"#a6d854\", \"#ffd92f\", \"#e5c494\", \"#bbbbbb\"] ax = ax if ax", "plots a translocation event as a bar, showing the part", "tcn = float(interval[4]) # Smooth tcn, if ploidy_dev is smaller", "plotted. :type chrom_r: str :param start_r: Start position of the", "\"linewidth\": .3} flierprops = {\"color\": \"k\"} medianprops = {\"color\": \"k\",", "color = color_minus dx = -1.*head_length plt.arrow(arrow_start, .5, dx, 0,", "optional :param loc_coordinates: Either of \"up\" | \"down\". If \"up\",", "= current_tick + tick_size scale = None if(first_tick > 1000000):", "optional :param upper: If True, make less ticks, else if", "dx = head_length if(strand == \"-\"): arrow_start = motif_end arrow_end", "as ratio of ax width, such that two genes are", "overhang: Fraction that the arrow is swept back (0 overhang", "+= [((start+ i*equalized_region_size)- equalized_region_size/2)] region_mid_points = [] for e in", "ax: Axis where the plot is drawn, defaults to None.", "not None else plt.gca() c = 0 for region in", "of 1. max_y_pos: Defines the number of stacked genes. 2.", "None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param upper: If True, make", "of the region to be plotted. :type end: str :param", "ax: Axis on which to plot contact map, defaults to", "segments to equal distances. :param genomic_segments: List of segments for", "meth_calls], color=color, marker=\".\", linestyle='None', markersize=1, alpha=.5) elif(n_entries == 4): plt.plot([", "1.5. max_y_pos defines the \\ number of stacked genes. 2.", "str :param r_start: Start position of region to be plotted.", "is not None else plt.gca() # Get gene names and", ":param vmin: Minimal value of intensity range to be plotted,", "linestyle=\"-\", color=color, linewidth=1) plt.text(ticks[i], .4, tick_labels[i], horizontalalignment=\"center\", verticalalignment=\"bottom\", fontsize=5, color=color,", "linewidth=.5) plt.plot([int(start), int(end)], [3, 3], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)],", "input_filename: Full path to ACESeq \"most_important\" file :type input_filename: str", "10**int((np.log10((end-start)/10))) # Determine first tick position first_tick = start+(tick_size-start%tick_size) ticks", "blacklist=None, gene_map=None, plot_gene_ids=True, y_max=None, distance_ratio=0.1, ax=None, plot_legend=False, legend_loc=\"lower right\", color_plus=\"#80b1d3\",", "color_minus: str, optional :param ax: Axis on which to plot", "[start] height += [value] left_merged = [] height_merged = []", ":param ax: (default: None) Axis used for plotting, defaults to", "elif(i == max_y_pos): max_y_pos += 1 y_pos_dict[gene_name] = max_y_pos y_level_dict[max_y_pos]", "vmax else 1.) facecolor = colormap(intensity_value) patch = matplotlib.patches.PathPatch(path, facecolor=facecolor,", "plt.xlim([start, end]) plt.ylim([-1, 1]) def plotMotifDirections(motifs_bed, start, end, head_width=0.2, head_length=1000,", "values containing center positions of genes. :type gene_mid_points: list :param", "False for gene_name in gene_names: left_border = gene_regions[counter][0] right_border =", ":param gene_mid_points: list of integer values containing center positions of", "[ (bplot_pos+ (i-.5)* ((2*extension)/(float(n_groups)*3))) for i in list(np.random.rand(len(expression_values))) ] plt.plot(x_positions,", "Plot average methylation values per bin # Define Colormap cmap", "bplot_g2 = ax.boxplot([np.log2([i if i >= 1. else 1. for", "to 0.5. :type alpha: float, optional :return: Plots axis. :rtype:", "(\"*most_important*\") files and converts them to pybedtools.BedTool object :param input_filename:", "meth_before = (binned_average_meth[i-1] if not i == 0 else \"NA\")", "= int(element[4]) current_bin = int((position-start)/bin_size) counter += 1 binned_meth_calls[current_bin][0] +=", "current_tick = current_tick + tick_size scale = None if(first_tick >", "that contains bed-like structured lists with the following elements: 1.", "max_y_pos: Defines the number of stacked genes. 2. y_pos_dict: Dictionary", "5. Start region2 6. End region2 :type links_bed: iterator :param", "linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [3, 3], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start),", "capstyle='butt', linewidth=0) ax.add_patch(rect) plt.xlim([start_r, end_r]) plt.ylim([0.3, 0.7]) def plotRegions(regions, start,", "size of bin to average methylation values, defaults to 1000.", "chromosome: Chromosome for which to plot CNVs. :type chromosome: str", "as pyramid plots :param contact_map: Matrix that contains the intensity", "optional :param color_loss: Plot color of copy number losses, defaults", "int(motif[1]) motif_end = int(motif[2]) strand = str(motif[3]) arrow_start = motif_start", "# Determine y positions of genes for plotting max_y_pos, y_pos_dict", "None divisor = None if(scale == \"Mb\"): digits_to_round = int(6-np.log10(tick_size))", "gene_names_clean += [gene_name] exp_values = expression_df.loc[gene_name, groups[g]] if(type(exp_values).__name__ == \"Series\"):", "tick_positions = [] gene_names_clean = [] counter=0 patch_saved = False", "Rectangle((current_start, .5), current_end-current_start, 1, color=color, edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect) plt.xlim([int(start),", "ax.set_xlim(region_left_border, region_right_border) if(position_gene_names == \"top\"): ax.xaxis.set_ticks_position(\"top\") ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions))) ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean))) if(not plot_gene_names):", ":class:`pybedtools.BedTool` ''' input_file = open(input_filename, \"r\") cnv_bed_list = [] ploidy", "the \\ number of stacked genes. 2. patch_list is the", "equalizing distances, defaults to \"k\". :type color: str, optional :param", "location: str, optional :param ax: Axis on which to plot", "position of genes. :param genes_bed: :class:`pybedtools.BedTool` object containing gene regions.", "gene names shall be plotted, False otherwise, defaults to True.", "groups, defaults to None. :type colors: str, optional :param ids:", "+= n_meth binned_meth_calls[current_bin][1] += n_unmeth binned_average_meth = [ float(i[0])/(float(i[0])+float(i[1])) if", "\"Y\" cnv_bed_list += [ [chrom, split_line[1], split_line[2], str(ploidy_dev), split_line[5], \"+\"]", "plt.ylim([0, 1]) return equalized_region_mid_points def plotCoordinates(chrom, start, end, color=\"k\", ax", "to None. :type offset: int, optional :param merge: Number of", "Plot Introns for i in introns_in_region: start = int(i[1]) end", "region_size: Size of region to be plotted in base pairs.", "ax if ax is not None else plt.gca() colors =", "g1_id: str, optional :param g2_id: ID of g2 used for", "in patch_description_list): patch_list += [bplot[\"boxes\"][0]] patch_description_list += [g_id] counter +=", "= None): '''Function for plotting methylation values as heatmap :param", "None else plt.gca() # Get gene names and regions genes_in_region_bed", "189./255., 0.5) if(ploidy == 2): plt.plot([int(start), int(end)], [1, 1], color=color_threshold,", "\"bwr\". :type cmap: str, optional :param max_dev: Maximal deviation from", "chrom_r: Chromosome of the region to be plotted. :type chrom_r:", "of stacked genes. 2. patch_list is the list of patches", "to be returned :rtype: None ''' ax = ax if", "rectangle midpoint = (i*segment_size+(j*segment_size-i*segment_size)/2., (j*segment_size-i*segment_size)/2.) vertices = [(midpoint[0]-segment_size/2., midpoint[1]), (midpoint[0],", "(range(contact_map_index1+(i-contact_map_index1), contact_map_index2) if location == \"top\" else range(contact_map_index1, contact_map_index2-(contact_map_index2-i))) for", "= \"Mb\" else: scale=\"Kb\" digits_to_round = None divisor = None", "region. :type start: int :param end: End position of the", "gene ids not to be plotted, defaults to None, :type", "ticks, else if False make more ticks. :type upper: bool,", "ax.spines[\"left\"].set_visible(False) ax.spines[\"right\"].set_visible(False) plt.xticks([], []) plt.yticks([], []) plt.xlim([start_r, end_r]) plt.ylim([0, max_dist/2])", "as arrows, indicating their directionality. :param motifs_bed: :class:`pybedtools.BedTool` object containing", "color: str, optional :param ax: Axis of plot, defaults to", "midpoints. :rtype: list ''' ax = ax if ax is", "gene regions. :type genes_bed: :class:`pybedtools.BedTool` :param gene_mid_points: list of integer", "y_pos_dict = determineYPosGene(genes_in_region, (region_border_down- region_border_up), distance_ratio) if(not y_max is None):", "fontsize=5, color=color, verticalalignment=\"top\", rotation=rotation) plt.xlim([start, end]) plt.yticks([], []) if(loc_coordinates ==", "ids=None, plot_gene_names=True, position_gene_names=\"bottom\", log_transformed=True, plot_points=False, alpha=.5): '''Function for plotting grouped", "genes that should not be shown on the plot, default", "be averaged an plotted, defaults to 0. :type merge: int,", "ax if ax is not None else plt.gca() binned_meth_calls =", "genome that is translocated. :param chrom_r: Chromosome of the region", "values: HUGO GENE SYMBOLs. :type gene_names_map: dict. :param expression_df: class:`pandas.DataFrame`", "are hidden. :type plot_gene_names: bool. :return: Axis on which plot", "plt.ylim(0, 1) plt.yticks([], []) return patches_dict def plotCNVs(cnvs_bed, chromosome, start,", "elif(n_entries == 4): plt.plot([ (float(m[1])+float(m[2]))/2. for m in meth_calls ],", "the plot, default is None :type blacklist: list, optional :param", "str(segment[3]) if(segment_type == \"R\"): color = (1,1,1,1) rect = Rectangle((segment_start,", "region midpoints. :rtype: list ''' ax = ax if ax", "[\"forward strand\"] met_forward = True elif(strand == \"-\" and not(met_reverse)):", "plt.xlim([int(start), int(end)]) if(ploidy == 2): plt.ylim([0, 4.5]) plt.yticks([0, 1, 2,", "+ extension/2.] gene_names_clean += [gene_name] exp_values_g1 = expression_df_g1.loc[gene_name, :] if(type(exp_values_g1).__name__", "value of the rectangle, representing the region to be plotted,", "ids) :type expression_df_g2: :class:`pandas.DataFrame` :param gene_names_map: Dictionary with keys: ENSEMBL", "genomix segments in different colors :param segments_tabix_filename: Path to tabixed", "= start_r TX_end = TX_pos rect = Rectangle((TX_start, .4), TX_end-TX_start,", "Chromosome, 2. Start Position, 3. End Position, 4. Deviation from", "region_bed: :class:`pybedtools.BedTool` :param expression_df_g1: :class:`pandas.Dataframe` containing the expression values of", "for plotting CNVs, defaults to \"bwr\". :type cmap: str, optional", "gene ids) :type expression_df_g1: :class:`pandas.DataFrame` :param expression_df_g2: :class:`pandas.Dataframe` containing the", "+= [lefts[0]] lefts = [] height_merged += [np.mean(heights)] heights =", "position on chromosome. :type end: int :param ploidy: Assumed ploidy", "Length of intervals, defaults to None. :type offset: int, optional", "in range(n_groups): bplot_pos = left_border + (2*g+1)*extension/float((n_groups*2.)) tick_positions += [left_border", "color=color_threshold, linestyle=\"--\", linewidth=.5) plt.xlim([int(start), int(end)]) if(ploidy == 2): plt.ylim([0, 4.5])", "is not None else plt.gca() for motif in motifs_bed: motif_start", "Axis on which to plot contact map, defaults to None.", "stacked genes. 2. y_pos_dict: Dictionary with keys = gene ids", "cnv_threshold: Minimal deviation from ploidy to be considered as a", "tick_labels[i], horizontalalignment=\"center\", verticalalignment=\"bottom\", fontsize=5, color=color, rotation=rotation) else: plt.plot([ticks[i], ticks[i]], [.3,", "values = y position \\ of gene. :rtype: tuple '''", "str :return: :class:`pybedtools.BedTool` object containing CNVs from ACESeq :rtype: :class:`pybedtools.BedTool`", "regions to be plotted, defaults to \"#cbebc4\". :type color: str,", "is None): max_y_pos = y_max # Plot Exons for i", "if(loc_coordinates == \"up\"): plt.plot([ticks[i], ticks[i]], [0., .3], linestyle=\"-\", color=color, linewidth=1)", "None :type vmin: float, optional :param vmax: Maximal value of", "y = max_y_pos-y_pos_dict[gene_name]+0.5 rect = Rectangle((start, y-.2), end-start, .4, color=color,", "= region_border_down-start if(start < region_border_up): start = region_border_up border_distance_down =", "4], [\"0\", \"1\", \"2\", \"3\", \"4\"], size=6) elif(ploidy == 4):", "= list(exp_values_g1) else: exp_values_g1 = list(exp_values_g1.iloc[0, :]) exp_values_g2 = expression_df_g2.loc[gene_name,", "is translocated. :param chrom_r: Chromosome of the region to be", "e in genes_in_region_bed: gene_name_ens = str(e[3]) if(not gene_names_map is None):", "plotted, defaults to False. :type edge_color: str, optional :param alpha:", "[np.mean(heights)] heights = [] heights += [height[i]] lefts += [left[i]]", "not None else plt.gca() n_entries = len(meth_calls[0]) if(n_entries == 5):", "end): region_mid_points += [int(e[1])+(end-int(e[1]))/2] else: region_mid_points += [int(e[1])+(int(e[2])-int(e[1]))/2] for i", "in range(1, n_segments+1): equalized_region_mid_points += [((start+ i*equalized_region_size)- equalized_region_size/2)] region_mid_points =", "color: Color of the rectangles representing the regions to be", ":type genomic_segments: list :param start: Start position of the genomic", "region to be plotted. :type end_r: int :param color: Color", "= PathPatch(path, facecolor=\"none\", edgecolor=color, linewidth=.5) ax.add_patch(path_patch) ax.axis(\"off\") plt.xlim([start, end]) plt.ylim([0,", "ax width, such that two genes are plotted side by", "= [] counter=0 patch_saved = False for gene_name in gene_names:", "Rotational angle of coordinate strings, defaults to 0. :type rotation:", "\"k\"} medianprops = {\"color\": \"k\", \"linewidth\": .3} whiskerprops = {\"color\":", ":type vmax: float, optional :param location: Either of \"top\" |", "Iterator for which each element is a list-ike object containing:", "False. :type revert_coordinates: bool, optional :param rotation: Rotational angle of", "left += [start] height += [value] left_merged = [] height_merged", "end]) plt.ylim([0, 1]) plt.xticks([], []) plt.yticks([], []) def plotMethylationProfile(meth_calls, chrom,", "the genome that is translocated. :param chrom_r: Chromosome of the", "= [genes_bed[i] for i in sort_indices] y_pos_dict = {} y_level_dict", "chip_signals: start = int(signal[1]) end = int(signal[2]) value = float(signal[3])", "and gene_name in blacklist): counter += 1 continue if(counter <", "Full path to ACESeq \"most_important\" file :type input_filename: str :return:", "blacklist: List of gene names, for genes that should not", "of segments, and values patch :rtype: dict ''' ax =", "genomic region. :type end: int :param color: Color of lines", "-1.*max_dev): tcn = -1.*max_dev elif(tcn > max_dev): tcn = max_dev", "False # Plot Introns for i in introns_in_region: start =", "= start+(tick_size-start%tick_size) ticks = [] current_tick = first_tick while(current_tick <=", "for line in input_file: if(line[:7] == \"#ploidy\"): ploidy = float(line.rstrip().split(\":\")[1])", "max_y_pos+1.5]) plt.yticks([], []) if(plot_legend): plt.legend(patch_list, patch_description_list, loc=legend_loc, fontsize=5) return max_y_pos+1.5,", "and float(gene_start-y_level_dict[i][-1][0])/float(region_size) > distance_ratio): y_pos_dict[gene_name] = i y_level_dict[i] += [[gene_start,", ".3} patch_list = [] patch_description_list = [] tick_positions = []", "genomic region. :type start: int :param end: End position of", "Methylation calls. Following fields must be included: Chrom, Start, End,", "str(i[3]) if(not blacklist is None and gene_map[gene_name] in blacklist): continue", "plt.gca() TX_start = TX_pos TX_end = end_r if(direction == \"left\"):", "genomic_segments: if(int(e[1]) < start): region_mid_points += [start+(int(e[2])-start)/2] elif(int(e[2]) > end):", "edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect) # Plot thresholds color_threshold=(189./255., 189./255., 189./255.,", "to 0. :type overhang: float, optional :param color_plus: Color of", "cnv_threshold: float, optional :param cmap: Colormap used for plotting CNVs,", "str, optional :param g1_id: ID of g1 used for legend", "else 1.) facecolor = colormap(intensity_value) patch = matplotlib.patches.PathPatch(path, facecolor=facecolor, edgecolor='none')", "Colormap cmap = cm.bwr norm = matplotlib.colors.Normalize(vmin=0., vmax=1.) m =", "c += 1 else: current_color = color rect = Rectangle([int(region[1]),", "default is \"#fb8072\". :type color_minus: str, optional. :return: Tuple of", "j in y_range: # Define midpoint of rectangle midpoint =", ":type merge: int, optional :return: Nothing to be returned. :rtype:", "plotted. :type start: int :param end: End position of the", "None and gene_map[gene_name] in blacklist): continue # Define color for", "start: str :param end: End position of the region to", "right_border = None if(i < len(gene_names)-1): right_border = gene_regions[i+1][0] else:", "values of HiC contacts. :type contact_map: :class:`pandas.DataFrame` :param start: Chromosomal", "int(end)) plt.ylim(0, 1) plt.yticks([], []) return patches_dict def plotCNVs(cnvs_bed, chromosome,", "0, than merge elements will be averaged an plotted, defaults", "[[int(e[1]), int(e[2])]] region_right_border = int(region_bed[0][2]) region_left_border = int(region_bed[0][1]) # Determine", "cnv_threshold): rect = Rectangle((current_start, tcn-.2), current_end-current_start, .4, color=color, edgecolor='none', capstyle='butt',", "gene id mapping. :rtype: dictionary ''' gene_name_mapping_file = open(gene_name_mapping_filename, \"r\")", "n_groups = len(groups) for g in range(n_groups): bplot_pos = left_border", "= int(segment[1]) segment_end = int(segment[2]) color = tuple([ float(i)/256. for", "== \"NA\" ] binned_average_meth_no_missing += [ (float(sum(average_list))/ float(len(average_list))) if len(average_list)", ".3], linestyle=\"-\", color=color, linewidth=1) plt.text(ticks[i], .4, tick_labels[i], horizontalalignment=\"center\", verticalalignment=\"bottom\", fontsize=5,", ":type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param upper: If True, make less", "end_r: int :param TX_pos: Position of the translocation. :type TX_pos:", "motif_end color=color_plus dx = head_length if(strand == \"-\"): arrow_start =", "pyramid points downwards, defaults to top, :type location: str, optional", "10**math.ceil((np.log10((end-start)/10))) if(not upper): tick_size = 10**int((np.log10((end-start)/10))) # Determine first tick", "region to be plotted. :type end: int :param head_width: Width", "-.75], int(region[2])-int(region[1]), 1.5, facecolor=current_color, edgecolor='none', alpha=alpha) c += 1 else:", "scale equalizing the position of genes. :param genes_bed: :class:`pybedtools.BedTool` object", "optional :param rotation: Rotational angle of coordinate strings, defaults to", "order. Else, coordinates stay in increasing order, defaults to False.", "boxprops = {\"color\": \"k\", \"linewidth\": .3} flierprops = {\"color\": \"k\"}", "via function plotGenes. :param genes_bed: :class:`pybedtools.BedTool` object containing genes to", "ids) :type expression_df_g1: :class:`pandas.DataFrame` :param expression_df_g2: :class:`pandas.Dataframe` containing the expression", "be the same as the number of groups, defaults to", "plt.xticks([], []) plt.xlim([start, end]) def plotTX(chrom_r, start_r, end_r, TX_pos, direction=\"right\",", ":class:`pandas.DataFrame` :param expression_df_g2: :class:`pandas.Dataframe` containing the expression values of g2", "structures, i.e. introns exons of genes. :param genes_bed: :class:`pybedtools.BedTool` object", "a linea fashion. :param chrom: Chromosome of the region to", "edgecolor = color) plt.xlim(r_start, r_end) def plotMethylationProfileHeat(methylation_bed, chrom, start, end,", "plt.xlim([start, end]) plt.ylim([0, 1]) return equalized_region_mid_points def plotCoordinates(chrom, start, end,", "else: right_border = region_right_border current_extension = right_border-left_border if(current_extension == 0.):", "segments in different colors :param segments_tabix_filename: Path to tabixed bed", "to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Dictionary with keys", "if(link_pos2 < link_pos2): mid_point = link_pos2 + (link_pos1-link_pos2)/2 vertices =", "length_includes_head=True) plt.xlim([start, end]) plt.ylim([0.4, 0.6]) def plotHiCContactMap(contact_map, start, end, segment_size,", "list(exp_values_g2.iloc[0, :]) bplot_g1 = ax.boxplot([np.log2([i if i >= 1. else", "linewidth=.5) plt.plot([int(start), int(end)], [4, 4], color=color_threshold, linestyle=\"--\", linewidth=.5) elif(ploidy ==", "and gene_names[i] in blacklist): continue left_border = gene_regions[i][0] right_border =", "color for plotting (R,G,B). :type segments_Tabix_filename: str :param chrom: Chromosome", "simple rectangles. :param regions: Iterator containig list-like elements with the", "If True legend is plotted, False otherwise, defaults to False.", "offset = merge*offset left = left_merged height = height_merged plt.bar(left,", "ytick.set_size(6) if(plot_legend): ax.legend(patch_list, patch_description_list, fontsize=5, loc='lower left') return ax def", "else if False make more ticks. :type upper: bool, optional", "genomic regions as arcs. :param links_bed: Iterator, that contains bed-like", "start: int :param end: End position of region to be", "defaults to \"normal\". :type g2_id: str, optional :param plot_gene_names: If", "g in range(n_groups): bplot_pos = left_border + (2*g+1)*extension/float((n_groups*2.)) tick_positions +=", ":type upper: bool, optional :param loc_coordinates: Either of \"up\" |", "elements to be merged. If this value is not equal", "= np.percentile(contact_map, 99.9) colormap = plt.get_cmap(cmap) for i in range(contact_map_index1,", "that determines the max y position for gene plotting via", "to False. :type plot_legend: bool :param color_g1: Color used for", "+= [patch] patch_description_list += [\"forward strand\"] met_forward = True elif(strand", "original and distance equalized segments n_segments = len(genomic_segments) equalized_region_size =", "rotation: int, optional :return: Nothing to be returned. :rtype: None", "optional :param colors: List of colors used for plotting samples", "that plots genomic regions as simple rectangles. :param regions: Iterator", "If False, no edge is plotted, defaults to False. :type", ":type log_transformed: bool, optional :param plot_points: If True, a point", "= np.log2([i if i >= 1. else 1. for i", ":param legend_loc: Location of the legend. Either of \"lower left\",", "right\", \"upper left\", \"upper right\", default is \"lower right\". :type", "flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False) bplot_g2 = ax.boxplot([np.log2([i if i", "i >= 1. else 1. for i in exp_values]) bplot", "matplotlib.cm as cm import matplotlib import tabix import math def", "0.6]) def plotHiCContactMap(contact_map, start, end, segment_size, cmap=\"Greys\", vmin=None, vmax=None, location=\"top\",", "of patches drawn on the ax. 3. patch_description_list is the", "range(contact_map_index1, contact_map_index2): y_range = (range(contact_map_index1+(i-contact_map_index1), contact_map_index2) if location == \"top\"", ":param edge_color: Color of region edge. If False, no edge", "ploidy to be considered as a CNV, defaults to 0.7.", "range(len(left)): if(i % merge == 0 and not (i ==", "line[:5] == \"chrom\"): continue split_line = line.rstrip().split(\"\\t\") ploidy_dev = float(split_line[5])-ploidy", "patch_description_list, loc=legend_loc, fontsize=5) return max_y_pos+1.5, patch_list, patch_description_list def determineYPosGene(genes_bed, region_size,", ":param blacklist: Set containing gene ids not to be plotted,", "object containing CNVs from ACESeq :rtype: :class:`pybedtools.BedTool` ''' input_file =", "[]) plt.yticks([], []) plt.xlim([start, end]) plt.ylim([-1, 1]) def plotMotifDirections(motifs_bed, start,", "\"up\", plot ticks to upper direction, else if \"down\", plot", ".3} patch_list = None patch_description_list = None tick_positions = []", "of stacked genes. 2. y_pos_dict: Dictionary with keys = gene", "Cs, Unmethylated Cs. :type methylation_bed: :class:`pybedtools.BedTool` :param chrom: Chromosome of", "introns_bed region_border_up = int(region_bed[0][1]) region_border_down = int(region_bed[0][2]) region_size = region_border_down-region_border_up", "color_neutral if(ploidy_dev >= cnv_threshold): color=color_gain elif(ploidy_dev <= -1.*cnv_threshold): color =", "of segments for which distances shall be equalized (each segment", "bar, showing the part of the genome that is translocated.", "= int(motif[2]) strand = str(motif[3]) arrow_start = motif_start arrow_end =", "optional :return: List of equalized region midpoints. :rtype: list '''", "Rectangle from matplotlib.patches import Arrow from matplotlib.path import Path from", "color = color_loss if(abs(ploidy_dev) > cnv_threshold): rect = Rectangle((current_start, tcn-.2),", "color_loss if(abs(ploidy_dev) > cnv_threshold): rect = Rectangle((current_start, tcn-.2), current_end-current_start, .4,", "extension/4. bplot_g2_pos = left_border + 3*(extension/4.) tick_positions += [left_border +", "color = color_reverse y = max_y_pos-y_pos_dict[gene_name]+0.5 rect = Rectangle((start, y-.2),", "= Rectangle((start+cbin*bin_size, 0), bin_size, 1, color=m.to_rgba(binned_average_meth[cbin])) ax.add_patch(rect) plt.xlim([start, end]) plt.ylim([0,", ">= 1. else 1. for i in exp_values_g1])], positions=[bplot_g1_pos], widths=extension/2.,", "of equalized region midpoints. :rtype: list ''' ax = ax", "equalized (each segment is of the form [<chrom>, <start>, <end>])", "otherwise. Default is False. :type plot_legend: bool, optional :param legend_loc:", "if(max_dev is None): max_dev = max([abs(float(i[3])) for i in cnvs_bed])", "created. :type region_bed: :class:`pybedtools.BedTool` :param blacklist: List of gene names,", "structured lists with the following elements: 1. Chromosome region1 2.", "plt.gca() # Calculate midpoints of original and distance equalized segments", "is not None else plt.gca() binned_meth_calls = [ [0, 0]", "\"Series\"): exp_values_g1 = list(exp_values_g1) else: exp_values_g1 = list(exp_values_g1.iloc[0, :]) exp_values_g2", "the expression values of all samples (columns: sample ids; index:", "sample ids; index: gene ids) :type expression_df_g2: :class:`pandas.DataFrame` :param gene_names_map:", "End region1 4. Chromosome region2 5. Start region2 6. End", "which distances shall be equalized (each segment is of the", ":type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param plot_legend: If True legend is", "defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param plot_legend: If", "equalizing the position of genes. :param genes_bed: :class:`pybedtools.BedTool` object containing", "plotted in base pairs. :type region_size: int :param distance_ratio: Minimal", "optional :param color_neutral: Plot color of copy number neutral regions,", "Location of the legend. Either of \"lower left\", \"lower right\",", "Start position of region to be plotted. :type r_start: int", "Plot color of copy number gains, defaults to \"g\". :type", "equalized_region_mid_points = [] for i in range(1, n_segments+1): equalized_region_mid_points +=", "elif(strand == \"-\" and not(met_reverse)): patch_list += [patch] patch_description_list +=", "links between genomic regions as arcs. :param links_bed: Iterator, that", "== 0 else \"NA\") meth_after = (binned_average_meth[i+1] if not i", "dict. :param blacklist: Set containing gene ids not to be", "0.5. :type alpha: float, optional :return: Plots axis. :rtype: :class:`matplotlib.axes._subplots.AxesSubplot`", "not j == \"NA\" ] binned_average_meth_no_missing += [ (float(sum(average_list))/ float(len(average_list)))", "+= [patch] patch_description_list += [\"reverse strand\"] met_reverse = True #", "gene_map[gene_name] y = max_y_pos-y_pos_dict[gene_name]+.8 plt.text(start, y, gene_name_label, size=5, color =", "for which the first column is a ensemble gene id,", "None): '''Functions that plots genomic regions as simple rectangles. :param", "g1_id: ID of g1 used for legend plotting, defaults to", "# Define color for gene plotting strand = str(i[5]) color", "edgecolor=\"none\", facecolor=color, length_includes_head=True) plt.xlim([start, end]) plt.ylim([0.4, 0.6]) def plotHiCContactMap(contact_map, start,", "directionality. :param motifs_bed: :class:`pybedtools.BedTool` object containing regions of the TF", "to top, :type location: str, optional :param ax: Axis on", "is drawn, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return:", "= None for line in input_file: if(line[:7] == \"#ploidy\"): ploidy", "contains the intensity values of HiC contacts. :type contact_map: :class:`pandas.DataFrame`", "of the region to be plotted. :type start: str :param", "the plot, False otherwise, default is True :type plot_gene_ids: bool,", "for plotting grouped gene expression (e.g. tumor and normal) on", "= (intensity_value/vmax if intensity_value <= vmax else 1.) facecolor =", "2. Start position 3. end position 4. Beta Value :type", "defaults to \"left\". :type direction: str, optional :param color: Color", "if(not ids is None): g_id = ids[g] else: g_id =", "chrom, start, end, ax = None): '''Function for plotting genomix", "color_g2: Color used for plotting g2 samples expression, defaults to", "if i >= 1. else 1. for i in exp_values_g2])],", "+= [[gene_start, gene_end]] break elif(i == max_y_pos): max_y_pos += 1", "[]) def plotMethylationProfile(meth_calls, chrom, start, end, color=\"k\", ax=None): '''Function that", "the gene plot is created. :type region_bed: :class:`pybedtools.BedTool` :param blacklist:", "= int(signal[1]) end = int(signal[2]) value = float(signal[3]) if(value >", "If True, coordinates are reverted to decreasing order. Else, coordinates", "Axis of plot, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional", "function plotGenes. :param genes_bed: :class:`pybedtools.BedTool` object containing genes to be", "ax.set_ylim(0, (end-start)/2.) else: ax.set_ylim(-1.*(end-start)/2., 0) def distanceEqualizer(genomic_segments, start, end, direction=\"top_down\",", "start: Start position of region to be plotted. :type start:", "to be plotted. :type end: str :param ax: Axis used", "i in region]), from_string=True) # Get gene names and regions", "plotMotifDirections(motifs_bed, start, end, head_width=0.2, head_length=1000, overhang=0, color_plus=\"#80b1d3\", color_minus=\"#fb8072\", ax=None): '''Function", "linewidth=.5) plt.xlim([int(start), int(end)]) if(ploidy == 2): plt.ylim([0, 4.5]) plt.yticks([0, 1,", "in the plot, False otherwise, default is True :type plot_gene_ids:", "if(not blacklist is None and gene_names[i] in blacklist): continue left_border", "meth_calls ], [ float(m[4]) for m in m in meth_calls],", "= [ str(round(i/float(divisor), digits_to_round))+scale for i in ticks ] if(loc_coordinates", "else if \"down\", plot ticks to lower direction, defaults to", "1.5, facecolor=current_color, edgecolor=edgecolor, alpha=alpha) c += 1 ax.add_patch(rect) plt.xticks([], [])", "Assumed ploidy of tumor, defaults to 2. :type ploidy: int,", "plt.xlim(r_start, r_end) def plotMethylationProfileHeat(methylation_bed, chrom, start, end, bin_size=1000, ax =", "plt.xlim([start, end]) plt.ylim([0, 1]) plt.xticks([], []) plt.yticks([], []) def plotMethylationProfile(meth_calls,", ":class:`matplotlib.axes._subplots.AxesSubplot` ''' ax = ax if ax is not None", "chromosome, start, end, ploidy=2, cnv_threshold=0.7, cmap=\"bwr\", max_dev=None, ax=None): '''Function for", "(region_border_down- region_border_up), distance_ratio) if(not y_max is None): max_y_pos = y_max", "j in [meth_before, meth_after] if not j == \"NA\" ]", "region in regions: if(not edgecolor): current_color = color rect =", "facecolor=facecolor, edgecolor='none') ax.add_patch(patch) ax.set_xlim(start, end) if(location == \"top\"): ax.set_ylim(0, (end-start)/2.)", "str, optional :param offset: Length of intervals, defaults to None.", "3. End position :type regions: iterator :param start: Start position", "plt.plot([start, end], [0.3, 0.3], linestyle=\"-\", color=color, linewidth=1) if(revert_coordinates): ticks =", "plotted. :type end: int :param color: Color of the genomic", "ticker from matplotlib.patches import Rectangle from matplotlib.patches import Arrow from", "of region size from extension extension=extension-(region[2]-region[1])*.01 boxprops = {\"color\": \"k\",", ":param vmax: Maximal value of intensity range to be plotted,", "counter += 1 continue if(counter < len(gene_names)-1): right_border = gene_regions[counter+1][0]", "and values: HUGO GENE SYMBOLs. :type gene_names_map: dict. :param expression_df:", "binned_average_meth[i] == \"NA\"): binned_average_meth_no_missing += [binned_average_meth[i]] else: meth_before = (binned_average_meth[i-1]", "containing gene regions. :type genes_bed: :class:`pybedtools.BedTool` :param gene_mid_points: list of", "the genomic part that is translocated. Either of \"left\" (upstream),", "end = int(signal[2]) value = float(signal[3]) if(value > max_signal): max_signal", "[] for i in range(1, n_segments+1): equalized_region_mid_points += [((start+ i*equalized_region_size)-", "path = Path(vertices, codes) path_patch = PathPatch(path, facecolor=\"none\", edgecolor=color, linewidth=.5)", "ax is not None else plt.gca() max_dist = 0 for", "default to None. :type blacklist: set, optional :param ax: Axis", "False otherwise, defaults to True. :type plot_gene_names: bool, optional :param", "scale = None if(first_tick > 1000000): scale = \"Mb\" else:", "ax = None): '''Function that plots links between genomic regions", "plt.text(start, y, gene_name_label, size=5, color = color) plt.xlim([region_border_up, region_border_down]) plt.ylim([0,", "\"linewidth\": .3, \"alpha\":alpha} flierprops = {\"color\": \"k\"} medianprops = {\"color\":", "showfliers=False) color = None if(not colors is None): color =", "end, color). The color field is used to determine the", "y position for gene plotting via function plotGenes. :param genes_bed:", "len(gene_names)-1): right_border = gene_regions[i+1][0] else: right_border = region_right_border current_extension =", "gene_name_mapping_file.close() return gene_map def plotGeneExpression(genes_bed, region_bed, expression_df_g1, expression_df_g2, gene_names_map, blacklist=None,", "form [<chrom>, <start>, <end>]) :type genomic_segments: list :param start: Start", "gene_name_label = gene_map[gene_name] y = max_y_pos-y_pos_dict[gene_name]+.8 plt.text(start, y, gene_name_label, size=5,", "ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean))) if(not plot_gene_names): ax.xaxis.set_major_formatter(ticker.FixedFormatter( ([ \" \" for i in", ":return: Nothing to be returned. :rtype: None ''' # Use", "of g1 samples (columns: sample ids; index: gene ids) :type", "codes) patch = PathPatch(path, facecolor = \"None\", edgecolor = color,", "j] intensity_value = (intensity_value/vmax if intensity_value <= vmax else 1.)", "binned_average_meth_no_missing += [ (float(sum(average_list))/ float(len(average_list))) if len(average_list) > 0 else", "defaults to 0.7. :type cnv_threshold: float, optional :param color_gain: Plot", "plotting, non-transformed values otherwise. :type log_transformed: bool, optional :param plot_points:", "tcn = ploidy color = color_neutral if(ploidy_dev >= cnv_threshold): color=color_gain", "exp_values_g2 = list(exp_values_g2) else: exp_values_g2 = list(exp_values_g2.iloc[0, :]) bplot_g1 =", "as plt import pybedtools import pandas as pnd import numpy", "of copy number losses, defaults to \"r\". :type color_loss: str,", "in np.argsort([i[1] for i in genes_bed])] genes_sorted_bed = [genes_bed[i] for", "class:`pandas.DataFrame` :param blacklist: Set containing gene ids not to be", "region to be plotted. :type chrom: str :param start: Start", "plotted, defaults to None. :type vmax: float, optional :param location:", "max_dist = distance mid_point = link_pos1 + (link_pos2-link_pos1)/2 if(link_pos2 <", "] binned_average_meth = binned_average_meth_no_missing # Plot average methylation values per", "overhang=0, color_plus=\"#80b1d3\", color_minus=\"#fb8072\", ax=None): '''Function that plots TF motifs as", "str, optional :param color_plus: Color code for plus stranded genes,", "len(gene_names)-1): right_border = gene_regions[counter+1][0] bplot_g1_pos = left_border + extension/4. bplot_g2_pos", ":param region_bed: :class:`pybedtools.BedTool` object containing the one region, for which", "None): '''Function that plots arcs from unequal distances of genomic", "end, ploidy=2, cnv_threshold=0.7, color_gain=\"g\", color_loss=\"r\", color_neutral=\"k\", ax=None): '''Function for plotting", ":class:`pybedtools.BedTool` object containing introns :type introns_bed: :class:`pybedtools.BedTool` :param region_bed: :class:`pybedtools.BedTool`", ":type plot_legend: bool, optional :param colors: List of colors used", "is None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param plot_legend: If True,", "else plt.gca() # Get gene names and regions genes_in_region_bed =", "Direction of distance equalization (top_down | bottom_up), defaults to \"top_down\".", ":param regions: Iterator containig list-like elements with the following entries:", "= matplotlib.cm.ScalarMappable(norm = norm, cmap = cmap) for cbin in", "optional :param plot_legend: If True legend is plotted, False otherwise,", "{\"color\": \"k\", \"linewidth\": .3, \"alpha\":alpha} flierprops = {\"color\": \"k\"} medianprops", "from_string=True) def plotChIPSignals(chip_signals, r_chrom, r_start, r_end, ax=None, color=\"b\", offset=None, merge=None):", "Chromosome region2 5. Start region2 6. End region2 :type links_bed:", "== \"+\" and not(met_forward)): patch_list += [patch] patch_description_list += [\"forward", "tick.set_rotation(45) tick.set_size(6) for ytick in ax.get_yticklabels(): ytick.set_size(6) if(plot_legend): ax.legend(patch_list, patch_description_list,", "= region_border_up border_distance_down = region_border_down-start if(not(float(border_distance_down)/float(region_size) < distance_ratio)): gene_name =", "as the number of groups, defaults to None. :type colors:", "iterator :param start: Start position of the region to be", "Calculate midpoints of original and distance equalized segments n_segments =", "\"bottom\" the pyramid points downwards, defaults to top, :type location:", "be plotted in base pairs. :type region_size: int :param distance_ratio:", "0 else 0. ] binned_average_meth = binned_average_meth_no_missing # Plot average", "ploidy: Assumed ploidy of tumor, defaults to 2. :type ploidy:", "+= [gene_name_ens] gene_regions += [[int(e[1]), int(e[2])]] region_right_border = int(region_bed[0][2]) region_left_border", "object containing regions of the TF sited to be plotted.", ":param direction: Direction of distance equalization (top_down | bottom_up), defaults", "legend plotting, defaults to \"normal\". :type g2_id: str, optional :param", "plt.ylim([0, 1]) plt.xticks([], []) plt.yticks([], []) def plotMethylationProfile(meth_calls, chrom, start,", "if ax is not None else plt.gca() for motif in", "= value if(not offset is None): end = start +", "color=color, marker=\".\", linestyle='None', markersize=1, alpha=.5) plt.ylim([0, 1]) plt.xticks([], []) plt.xlim([start,", "bplot_pos = left_border + (2*g+1)*extension/float((n_groups*2.)) tick_positions += [left_border + extension/2.]", "ax = None): '''Functions that plots genomic regions as simple", "color_loss: str, optional :param color_neutral: Plot color of copy number", ":param color_plus: Color of plus stranded TF regions, defaults to", "plotted. :type start_r: int :param end_r: End position of the", "ax is not None else plt.gca() c = 0 for", "float, optional :param cmap: Colormap used for plotting CNVs, defaults", "1 ax.set_xlim(region_left_border, region_right_border) ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions))) ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean))) if(not plot_gene_names): ax.xaxis.set_major_formatter( ticker.FixedFormatter(([ \"", "pairs. :type region_size: int :param distance_ratio: Minimal distance between two", "= str(motif[3]) arrow_start = motif_start arrow_end = motif_end color=color_plus dx", "= ax if ax is not None else plt.gca() region_bed", "underwent, the genes will be stacked. :type distance_ratio: float :return:", "position 4. Beta Value :type meth_calles: iterator :param chrom: Chromosome", "region to be plotted. :type end: int :param bin_size: size", "cytosines Or 1. Chromsome 2. Start position 3. end position", "optional :param direction: Direction of distance equalization (top_down | bottom_up),", "+= 1 ax.add_patch(rect) plt.xticks([], []) plt.yticks([], []) plt.xlim([start, end]) plt.ylim([-1,", "\"2\", \"4\", \"6\"], size=6) plt.xticks(rotation=45) def plotCNVsHeat(cnvs_bed, chromosome, start, end,", "drawn, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Nothing", "between genomic regions as arcs. :param links_bed: Iterator, that contains", "of the TF sited to be plotted. :type motifs_bed: :class:`pybedtools.BedTool`", "values, defaults to \"k\". :type color: str, optional :param ax:", "None if(not ids is None): g_id = ids[g] else: g_id", "elif(current_extension < extension): extension = float(current_extension) boxprops = {\"color\": \"k\",", "distances shall be equalized (each segment is of the form", "color=color, linewidth=1) if(revert_coordinates): ticks = [ start + end-i for", "= True elif(strand == \"-\" and not(met_reverse)): patch_list += [patch]", "cnvs_bed: :class:`pybedtools.BedTool` object containing CNVs with following entries: 1. Chromosome,", "expression values of g2 samples (columns: sample ids; index: gene", ":param genes_bed: :class:`pybedtools.BedTool` object containing gene regions. :type genes_bed: :class:`pybedtools.BedTool`", "segment_start = int(segment[1]) segment_end = int(segment[2]) color = tuple([ float(i)/256.", "blacklist is None and gene_names[i] in blacklist): continue left_border =", "(top_down | bottom_up), defaults to \"top_down\". :type direction: str, optional.", "[\"0\", \"1\", \"2\", \"3\", \"4\"], size=6) elif(ploidy == 4): plt.ylim([0,", "1.) facecolor = colormap(intensity_value) patch = matplotlib.patches.PathPatch(path, facecolor=facecolor, edgecolor='none') ax.add_patch(patch)", "size from extension extension=extension-(region[2]-region[1])*.01 boxprops = {\"color\": \"k\", \"linewidth\": .3,", "plotting g1 samples expression, defaults to \"#fb8072\". :type color_g1: str,", "The color field is used to determine the color for", "methylation_bed: Methylation calls. Following fields must be included: Chrom, Start,", "if \"down\", plot ticks to lower direction, defaults to \"up\".", "True. :type plot_gene_names: bool, optional :param position_gene_names: Either of \"top\",", "maps as pyramid plots :param contact_map: Matrix that contains the", "+ end-i for i in ticks ] ticks.reverse() tick_labels.reverse() print(tick_labels)", "if(plot_points): x_positions = [ (bplot_pos+ (i-.5)* ((2*extension)/(float(n_groups)*3))) for i in", "color of the boxplots boxes, defaults to 0.5. :type alpha:", "ax.set_ylim(-1.*(end-start)/2., 0) def distanceEqualizer(genomic_segments, start, end, direction=\"top_down\", color=\"k\", ax =", "start_r, end_r, TX_pos, direction=\"right\", color=\"k\", ax=None): '''Function that plots a", "Rectangle((TX_start, .4), TX_end-TX_start, .2, color=color, capstyle='butt', linewidth=0) ax.add_patch(rect) plt.xlim([start_r, end_r])", "for which the gene plot is created. :type region_bed: :class:`pybedtools.BedTool`", "a gene region scale retaining the position of genes. :param", "5], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [6, 6], color=color_threshold, linestyle=\"--\",", "fontsize=5) return max_y_pos+1.5, patch_list, patch_description_list def determineYPosGene(genes_bed, region_size, distance_ratio): '''Function", "continue n_groups = len(groups) for g in range(n_groups): bplot_pos =", "defaults to 0. :type rotation: int, optional :return: Nothing to", "def plotLinksAsArcs(links_bed, chrom_r, start_r, end_r, lw=1, color=\"k\", ax = None):", "Nothing to be returned :rtype: None ''' # Use given", "extension=extension-(region[2]-region[1])*.01 boxprops = {\"color\": \"k\", \"linewidth\": .3, \"alpha\":alpha} flierprops =", "elements: 1. Chromosome region1 2. Start region1 3. End region1", ".3} capprops={\"color\": \"k\", \"linewidth\": .3} patch_list = None patch_description_list =", "Dictionary with keys = names of segments, and values patch", "if(i < len(gene_names)-1): right_border = gene_regions[i+1][0] else: right_border = region_right_border", "= region_mid_points[i] equalized_region_mid_point = equalized_region_mid_points[i] codes = [] vertices =", "color_neutral: str, optional :param ax: Axis used for plotting. :type", "max_signal = value if(not offset is None): end = start", "ax if ax is not None else plt.gca() TX_start =", "list(exp_values_g1.iloc[0, :]) exp_values_g2 = expression_df_g2.loc[gene_name, :] if(type(exp_values_g2).__name__ == \"Series\"): exp_values_g2", "met_forward = False met_reverse = False # Plot Introns for", "str, optional :param ax: Axis of plot, defaults to None.", "ax = None, upper=True, loc_coordinates=\"up\", revert_coordinates=False, rotation=0): '''Function that plots", "methylation_bed: # Determine bin position = int(element[1]) if(position < start", ":type color: str, optional :param direction: Direction of distance equalization", "to be plotted. :type chrom_r: str :param start_r: Start position", "Determine first tick position first_tick = start+(tick_size-start%tick_size) ticks = []", "plotting max_y_pos, y_pos_dict = determineYPosGene(genes_in_region, (region_border_down- region_border_up), distance_ratio) if(not y_max", "= [] for e in genes_in_region_bed: gene_name_ens = str(e[3]) if(not", "2): plt.plot([int(start), int(end)], [1, 1], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)],", "dict ''' ax = ax if ax is not None", "(float(m[1])+float(m[2]))/2. for m in meth_calls ], [ float(m[3])/(float(m[3])+float(m[4])) if not(float(m[3])+float(m[4])", "ax.xaxis.set_major_formatter( ticker.FixedFormatter(([ \" \" for i in gene_names_clean]))) for tick", "of \"top\" | \"bottom\". If location == \"top\", the pyramid", "else: plt.plot([ticks[i], ticks[i]], [.3, .0], linestyle=\"-\", color=color, linewidth=1) plt.text(ticks[i], -.1,", "+= [gene_names_map[gene_name_ens]] else: gene_names += [gene_name_ens] gene_regions += [[int(e[1]), int(e[2])]]", "converts them to pybedtools.BedTool object :param input_filename: Full path to", "[] gene_names_clean = [] counter=0 for gene_name in gene_names: left_border", "as np import tabix import matplotlib.ticker as ticker from matplotlib.patches", "percentage of region size from extension extension=extension-(region[2]-region[1])*.01 boxprops = {\"color\":", "color: str, optional :param edge_color: Color of region edge. If", "expression_df_g2: :class:`pandas.Dataframe` containing the expression values of g2 samples (columns:", "upper direction, else if \"down\", plot ticks to lower direction,", "HUGO GENE SYMBOLs. :type gene_names_map: dict. :param expression_df: class:`pandas.DataFrame` object", "to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param plot_legend: If True", "= {} y_level_dict = {} max_y_pos = 0 for interval", "ax if ax is not None else plt.gca() tick_size =", "bin # Define Colormap cmap = cm.bwr norm = matplotlib.colors.Normalize(vmin=0.,", "is not None else plt.gca() max_dist = 0 for e", "def plotGenomicSegments(segments_list, chrom, start, end, ax = None): '''Function for", "genes_bed: :class:`pybedtools.BedTool` object containing TXstart, and TXend of genes. :type", "[] for e in genes_in_region_bed: gene_name_ens = str(e[3]) gene_names +=", "= region_right_border current_extension = right_border-left_border if(current_extension == 0.): continue if(extension", "== len(binned_average_meth)-1 else \"NA\") average_list = [ j for j", "import tabix import math def plotGenes(genes_bed, exons_bed, introns_bed, region_bed, blacklist=None,", "SYMBOLs. :type gene_names_map: dict. :param expression_df: class:`pandas.DataFrame` object containing the", "i in exp_values_g2])], positions=[bplot_g2_pos], widths=extension/2., patch_artist = True, boxprops=boxprops, flierprops=flierprops,", "end]) plt.ylim([-1, 1]) def plotMotifDirections(motifs_bed, start, end, head_width=0.2, head_length=1000, overhang=0,", "current_end-current_start, 1, color=color, edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect) plt.xlim([int(start), int(end)]) plt.ylim([.5,", "extension/2.] gene_names_clean += [gene_name] exp_values_g1 = expression_df_g1.loc[gene_name, :] if(type(exp_values_g1).__name__ ==", "distance equalization (top_down | bottom_up), defaults to \"top_down\". :type direction:", "be plotted, defaults to None, :type blacklist: set, optional :param", "Either of \"lower left\", \"lower right\", \"upper left\", \"upper right\",", "defaults to \"top_down\". :type direction: str, optional. :param ax: Axis", "and gene_map[gene_name] in blacklist): continue # Define color for gene", "int(end)]) if(ploidy == 2): plt.ylim([0, 4.5]) plt.yticks([0, 1, 2, 3,", "motif_start arrow_end = motif_end color=color_plus dx = head_length if(strand ==", "all gene ids will be included in the plot, False", "(e.g. tumor and normal) on a gene region scale retaining", "binned_average_meth_no_missing # Plot average methylation values per bin # Define", "expression_df_g1.loc[gene_name, :] if(type(exp_values_g1).__name__ == \"Series\"): exp_values_g1 = list(exp_values_g1) else: exp_values_g1", "Rectangle((start, y-.03), end-start, .06, color=color, capstyle='butt', linewidth=0) ax.add_patch(patch) if(strand ==", "distances, defaults to \"k\". :type color: str, optional :param direction:", "Iterator containing list-like elements with the following entries: 1. Chromsome", "drawn on the ax. 3. patch_description_list is the list of", "= float(current_extension) elif(current_extension < extension): extension = float(current_extension) boxprops =", "gene_names: left_border = gene_mid_points[counter]-extension/2 right_border = gene_mid_points[counter]+extension/2 if(not blacklist is", "be plotted, defaults to 1. :type alpha: float, optional. :param", "the position of genes. :param genes_bed: :class:`pybedtools.BedTool` object containing gene", "be plotted, default to None. :type blacklist: set, optional :param", "= expression_df_g2.loc[gene_name, :] if(type(exp_values_g2).__name__ == \"Series\"): exp_values_g2 = list(exp_values_g2) else:", "used for legend plotting, defaults to None. Number of ids", "True, the HUGO GENE SYMBOLs will be shown, else the", "idx in np.argsort([i[1] for i in genes_bed])] genes_sorted_bed = [genes_bed[i]", "if(vmin is None): vmin = 0 if(vmax is None): vmax", "ax=None, plot_legend=False, color_g1=\"#fb8072\", color_g2=\"#80b1d3\", g1_id=\"tumor\", g2_id=\"normal\", plot_gene_names=True): '''Function for plotting", "position of the region to be plotted. :type start_r: int", ":param color_minus: Color code for minus stranded genes, default is", "left\", \"upper right\", default is \"lower right\". :type legend_loc: str,", "def distanceEqualizer(genomic_segments, start, end, direction=\"top_down\", color=\"k\", ax = None): '''Function", "segments n_segments = len(genomic_segments) equalized_region_size = (end-start) if(n_segments > 0):", "color_forward if(strand == \"-\"): color = color_reverse border_distance_down = region_border_down-start", ":rtype: None ''' ax = ax if ax is not", "to \"left\". :type direction: str, optional :param color: Color of", "else: exp_values_g2 = list(exp_values_g2.iloc[0, :]) bplot_g1 = ax.boxplot([np.log2([i if i", "patch_list = None patch_description_list = None tick_positions = [] gene_names_clean", "False met_reverse = False # Plot Introns for i in", "color=m.to_rgba(binned_average_meth[cbin])) ax.add_patch(rect) plt.xlim([start, end]) plt.ylim([0, 1]) plt.xticks([], []) plt.yticks([], [])", ":type end: int :param color: Color of the genomic scales", "+= [ (float(sum(average_list))/ float(len(average_list))) if len(average_list) > 0 else 0.", "values of all samples (columns: sample ids; index: gene ids).", "to be plotted. :type r_end: int :param ax: Axis of", "chrom: Chromosome of region to be plotted. :type chrom: str", "= contact_map.iloc[contact_map_index1:contact_map_index2, contact_map_index1:contact_map_index2] if(vmin is None): vmin = 0 if(vmax", "exons_bed: :class:`pybedtools.BedTool` :param introns_bed: :class:`pybedtools.BedTool` object containing introns :type introns_bed:", "optional :param ax: Axis used for plotting. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`,", ":param chrom_r: Chromosome of the region to be plotted. :type", "1 else: current_color = color rect = Rectangle([int(region[1]), -.75], int(region[2])-int(region[1]),", "the position of genes. :param genes_bed: :class:`pybedtools.BedTool` object containing TXstart,", "object containing: 1. Chromosome 2. Start postion 3. End position", "genes will be stacked. :type distance_ratio: float :return: Tuple of", "max_y_pos+1.5, patch_list, patch_description_list, where 1. max_y_pos+1.5 is the max_y_position +", ":param segments_tabix_filename: Path to tabixed bed file containing (chrom, start,", "to be plotted, defaults to \"#cbebc4\". :type color: str, optional", "str(ploidy_dev), split_line[5], \"+\"] ] input_file.close() return pybedtools.BedTool(\"\\n\".join([\"\\t\".join(e) for e in", "following elements: 1. Chromosome region1 2. Start region1 3. End", "be stacked, default is 0.1. :type distance_ratio: float, optional :param", "included: Chrom, Start, End, Methylated Cs, Unmethylated Cs. :type methylation_bed:", "\"Series\"): exp_values_g2 = list(exp_values_g2) else: exp_values_g2 = list(exp_values_g2.iloc[0, :]) bplot_g1", "meth_calls], color=color, marker=\".\", linestyle='None', markersize=1, alpha=.5) plt.ylim([0, 1]) plt.xticks([], [])", "elif(chrom == \"24\"): chrom = \"Y\" cnv_bed_list += [ [chrom,", ":type color_plus: str, optional. :param color_minus: Color code for minus", "contacts. :type contact_map: :class:`pandas.DataFrame` :param start: Chromosomal start position of", ":param expression_df_g1: :class:`pandas.Dataframe` containing the expression values of g1 samples", "arrow_end = motif_end color=color_plus dx = head_length if(strand == \"-\"):", "[patch] patch_description_list += [\"reverse strand\"] met_reverse = True # Plot", "following entries: 1. Chromosome, 2. Start Position, 3. End Position,", "linewidth=0) ax.add_patch(rect) plt.xlim([int(start), int(end)]) plt.ylim([.5, 1.5]) plt.xticks([], []) plt.yticks([], [])", "colormap = plt.get_cmap(cmap) for i in range(contact_map_index1, contact_map_index2): y_range =", "TF regions, defaults to \"#fb8072\". :type color_minus: str, optional :param", "ax is not None else plt.gca() tick_size = 10**math.ceil((np.log10((end-start)/10))) if(not", "to be plotted. :type end: int :param head_width: Width of", "[.3, .0], linestyle=\"-\", color=color, linewidth=1) plt.text(ticks[i], -.1, tick_labels[i], horizontalalignment=\"center\", fontsize=5,", "plotGenes(genes_bed, exons_bed, introns_bed, region_bed, blacklist=None, gene_map=None, plot_gene_ids=True, y_max=None, distance_ratio=0.1, ax=None,", "motifs_bed: :class:`pybedtools.BedTool` object containing regions of the TF sited to", "matplotlib.patches import PathPatch import matplotlib.cm as cm import matplotlib import", ":param position_gene_names: Either of \"top\", or \"bottom\", defaults to \"bottom\".", "== \"Mb\"): digits_to_round = int(6-np.log10(tick_size)) divisor = 1000000 else: digits_to_round", "def plotMethylationProfileHeat(methylation_bed, chrom, start, end, bin_size=1000, ax = None): '''Function", "= [] for i in range(len(left)): if(i % merge ==", "plot_gene_ids: bool, optional :param y_max: Max y value in the", "medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False) bplot_g2 = ax.boxplot([np.log2([i if i >=", ":param plot_legend: If True legend is plotted, False otherwise, defaults", "of intensity range to be plotted, defaults to None. :type", "to \"#cbebc4\". :type color: str, optional :param edge_color: Color of", "ax if ax is not None else plt.gca() max_dist =", "Start region2 6. End region2 :type links_bed: iterator :param chrom_r:", "= max_y_pos y_level_dict[max_y_pos] = [[gene_start, gene_end]] break else: continue return", "ax: Axis of plot :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param color:", "swept back (0 overhang means triangular shape). Can be negative", "-.75], int(region[2])-int(region[1]), 1.5, facecolor=current_color, edgecolor=edgecolor, alpha=alpha) c += 1 ax.add_patch(rect)", "i.e. introns exons of genes. :param genes_bed: :class:`pybedtools.BedTool` object containing", "average methylation values per bin # Define Colormap cmap =", "used for plotting g1 samples expression, defaults to \"#fb8072\". :type", "groups: List of lists containing the IDs of the different", "color_minus: Color of plus stranded TF regions, defaults to \"#fb8072\".", "elements, defaults to \"k\". :type color: str, optional :param ax:", "int(e[1])+(int(e[2])-int(e[1]))/2 link_pos2 = int(e[4])+(int(e[5])-int(e[4]))/2 distance = abs(link_pos2-link_pos1) if(distance > max_dist):", "[ (float(sum(average_list))/ float(len(average_list))) if len(average_list) > 0 else 0. ]", "int, optional :param merge: Number of elements to be merged.", "if ax is not None else plt.gca() patches_dict = {}", "None ''' ax = ax if ax is not None", "TX_pos rect = Rectangle((TX_start, .4), TX_end-TX_start, .2, color=color, capstyle='butt', linewidth=0)", "and not (i == 0)): left_merged += [lefts[0]] lefts =", "value if(not offset is None): end = start + offset", "None else plt.gca() binned_meth_calls = [ [0, 0] for i", "tumor and normal) on a gene region scale retaining the", "= ax if ax is not None else plt.gca() colors", "Color of points representing methylation values, defaults to \"k\". :type", "if(vmax is None): vmax = np.percentile(contact_map, 99.9) colormap = plt.get_cmap(cmap)", ":class:`pybedtools.BedTool` :param chrom: Chromosome of region to be plotted. :type", "\\ drawn on the ax. :rtype: list \"\"\" ax =", "colors=None, ids=None, plot_gene_names=True, position_gene_names=\"bottom\", log_transformed=True, plot_points=False, alpha=.5): '''Function for plotting", "for i in genes_in_region: start = int(i[1]) gene_name = str(i[3])", "in exp_values]) bplot = ax.boxplot(expression_values, positions=[bplot_pos], widths=extension/float(n_groups), patch_artist=True, boxprops=boxprops, flierprops=flierprops,", "plotted. :type chrom: str :param start: Start position of the", "to plot, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return:", "markersize=1, alpha=.5) plt.ylim([0, 1]) plt.xticks([], []) plt.xlim([start, end]) def plotTX(chrom_r,", "g2 samples expression, defaults to \"#80b1d3\". :type color_g2: str, optional", "motif_start color = color_minus dx = -1.*head_length plt.arrow(arrow_start, .5, dx,", "= int(i[1]) gene_name = str(i[3]) if(not blacklist is None and", "None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Nothing to be returned.", "in the gene plot. If not set, then y_max is", "None :type blacklist: list, optional :param plot_gene_ids: If True, all", "of g2 samples (columns: sample ids; index: gene ids) :type", "rect plt.xlim(int(start), int(end)) plt.ylim(0, 1) plt.yticks([], []) return patches_dict def", ":type blacklist: set, optional :param ax: Axis used for plotting,", "of the genomic part that is translocated. Either of \"left\"", "list of integer values containing center positions of genes. :type", "positions=[bplot_g2_pos], widths=extension/2., patch_artist = True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops,", "bplot_g1[\"boxes\"][0].set_facecolor(color_g1) bplot_g2[\"boxes\"][0].set_facecolor(color_g2) if(not patch_saved): patch_saved=True patch_list = [bplot_g1[\"boxes\"][0], bplot_g2[\"boxes\"][0]] patch_description_list", "i in range(contact_map_index1, contact_map_index2): y_range = (range(contact_map_index1+(i-contact_map_index1), contact_map_index2) if location", "[int(idx) for idx in np.argsort([i[1] for i in genes_bed])] genes_sorted_bed", "the expression values of g2 samples (columns: sample ids; index:", "== \"NA\"): binned_average_meth_no_missing += [binned_average_meth[i]] else: meth_before = (binned_average_meth[i-1] if", "= [] ploidy = None for line in input_file: if(line[:7]", "and TX end of genes. :type genes_bed: :class:`pybedtools.BedTool` :param exons_bed:", "plotted ([<chrom>, <start>, <end>]). :type region: list :param groups: List", "<start>, <end>]) :type genomic_segments: list :param start: Start position of", "mapping between gene ids :param gene_name_mapping_file: Path to a tab", "gene ids will be included in the plot, False otherwise,", "defaults to \"r\". :type color_loss: str, optional :param color_neutral: Plot", "int :param end: End position on chromosome. :type end: int", "6. End region2 :type links_bed: iterator :param chrom_r: Chromosome of", "segment_end = int(segment[2]) color = tuple([ float(i)/256. for i in", "gene_map def plotGeneExpression(genes_bed, region_bed, expression_df_g1, expression_df_g2, gene_names_map, blacklist=None, ax=None, plot_legend=False,", "exons of genes. :type exons_bed: :class:`pybedtools.BedTool` :param introns_bed: :class:`pybedtools.BedTool` object", "the arrow head as proportion of the arrow, defaults to", "genes. :param genes_bed: :class:`pybedtools.BedTool` object containing TX start, and TX", "plotted, False otherwise, defaults to False. :type plot_legend: bool :param", "from unequal distances of genomic segments to equal distances. :param", "if(direction == \"top_down\"): codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO] vertices", "bin position = int(element[1]) if(position < start or position >", "str(i[3]) gene_name_label = gene_name if(not gene_map is None): gene_name_label =", "for plotting, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return:", "copy number losses, defaults to \"r\". :type color_loss: str, optional", "to be plotted. :type start: str :param end: End position", "color_reverse = color_minus max_y_pos = None if(not len(genes_in_region) == 0):", "[3, 3], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [4, 4], color=color_threshold,", "3. patch_description_list is the list of descriptions for the patches", "[4, 4], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [5, 5], color=color_threshold,", "int(region[2])-int(region[1]), 1.5, facecolor=current_color, edgecolor='none', alpha=alpha) c += 1 else: current_color", "for i in gene_names_clean]))) for tick in ax.get_xticklabels(): tick.set_rotation(45) tick.set_size(5)", "link_pos1 = int(e[1])+(int(e[2])-int(e[1]))/2 link_pos2 = int(e[4])+(int(e[5])-int(e[4]))/2 distance = abs(link_pos2-link_pos1) if(distance", "revert_coordinates: bool, optional :param rotation: Rotational angle of coordinate strings,", "boxprops = {\"color\": \"k\", \"linewidth\": .3, \"alpha\":alpha} flierprops = {\"color\":", "height, offset, color = color, edgecolor = color) plt.xlim(r_start, r_end)", "= float(current_extension) boxprops = {\"color\": \"k\", \"linewidth\": .3} flierprops =", "linewidth=1) if(revert_coordinates): ticks = [ start + end-i for i", "TF regions, defaults to \"#80b1d3\". :type color_plus: str, optional :param", "[] vertices = [] if(direction == \"top_down\"): codes = [Path.MOVETO,", "plotRegions(regions, start, end, color=\"#cbebc4\", edgecolor=False, alpha=1, ax = None): '''Functions", ":param end_r: Chromosomal end positiont of the region to be", "== \"-\"): color = color_reverse border_distance_down = region_border_down-start if(start <", "int :param end_r: Chromosomal end positiont of the region to", "the following entries: 1. Chromsome 2. Start position 3. end", ":class:`pybedtools.BedTool` object containing the region to be plotted :type region_bed:", "(i*segment_size+(j*segment_size-i*segment_size)/2., (j*segment_size-i*segment_size)/2.) vertices = [(midpoint[0]-segment_size/2., midpoint[1]), (midpoint[0], midpoint[1]-segment_size/2.), (midpoint[0]+segment_size/2., midpoint[1]),", ":type end: int :param color: Color of points representing methylation", "1. :type alpha: float, optional. :param ax: Axis of plot,", "color_reverse y = max_y_pos-y_pos_dict[gene_name]+0.5 patch = Rectangle((start, y-.03), end-start, .06,", "= None): '''Functions that plots genomic regions as simple rectangles.", "(equalized_region_mid_point, 0)] else: codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO] vertices", "current_end = int(interval[2]) ploidy_dev = float(interval[3]) tcn = float(interval[4]) #", "If True plot legend, False otherwise, defaults to False. :type", "object containing gene regions. :type genes_bed: :class:`pybedtools.BedTool` :param gene_mid_points: list", "that is plotted), defaults to 1000. :type head_length: int, optional", "[5, 5], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [6, 6], color=color_threshold,", "plotMethylationProfileHeat(methylation_bed, chrom, start, end, bin_size=1000, ax = None): '''Function for", "contact maps as pyramid plots :param contact_map: Matrix that contains", "plot_legend=False, colors=None, ids=None, plot_gene_names=True, position_gene_names=\"bottom\", log_transformed=True, plot_points=False, alpha=.5): '''Function for", "the region to be plotted. :type end: int :param head_width:", "[[gene_start, gene_end]] break else: continue return max_y_pos, y_pos_dict def createGeneNameMap(gene_name_mapping_filename):", "as heatmap :param methylation_bed: Methylation calls. Following fields must be", "= plt.get_cmap(cmap) for i in range(contact_map_index1, contact_map_index2): y_range = (range(contact_map_index1+(i-contact_map_index1),", "iterator :param chrom_r: Chromosome of the region to be plotted.", "Start position 3. end position 4. Number methylated cytosines 5.", "more ticks. :type upper: bool, optional :param loc_coordinates: Either of", "rotation=0): '''Function that plots genomic coordinates in a linea fashion.", "divisor = 1000000 else: digits_to_round = int(5-np.log10(tick_size)) divisor = 100000", "ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean))) if(not plot_gene_names): ax.xaxis.set_major_formatter( ticker.FixedFormatter(([ \" \" for i in", "gene_name in blacklist): counter += 1 continue n_groups = len(groups)", "defaults to \"#fb8072\". :type color_minus: str, optional :param ax: Axis", "markersize=3) g_id = None if(not ids is None): g_id =", "of the form [<chrom>, <start>, <end>]) :type genomic_segments: list :param", "0.7. :type cnv_threshold: float, optional :param cmap: Colormap used for", "number of stacked genes, default is None. :type y_max: bool,", "ACESeq :rtype: :class:`pybedtools.BedTool` ''' input_file = open(input_filename, \"r\") cnv_bed_list =", "= None): '''Function that plots links between genomic regions as", "the boxplot, no points are plotted otherwise, defaults to False.", "plot. If not set, then y_max is the max number", "expression_df_g1: :class:`pandas.DataFrame` :param expression_df_g2: :class:`pandas.Dataframe` containing the expression values of", "<= vmax else 1.) facecolor = colormap(intensity_value) patch = matplotlib.patches.PathPatch(path,", "of g1 used for legend plotting, defaults to \"tumor\". :type", "ax is not None else plt.gca() region_bed = pybedtools.BedTool(\"\\t\".join([str(i) for", "g2_id=\"normal\", plot_gene_names=True): '''Function for plotting paired gene expression (e.g. tumor", "0 and not max_y_pos in y_level_dict): y_pos_dict[gene_name] = i y_level_dict[i]", "to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: List of equalized", "to 0, than merge elements will be averaged an plotted,", "2): plt.ylim([0, 4.5]) plt.yticks([0, 1, 2, 3, 4], [\"0\", \"1\",", "lower direction, defaults to \"up\". :type loc_coordinates: str, optional :param", ".2, color=color, edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect) # Plot thresholds color_threshold=(189./255.,", "log transformed values for plotting, non-transformed values otherwise. :type log_transformed:", "linestyle=\"--\", linewidth=.5) plt.xlim([int(start), int(end)]) if(ploidy == 2): plt.ylim([0, 4.5]) plt.yticks([0,", "[] if(not merge is None): heights = [] lefts =", "rect = Rectangle((TX_start, .4), TX_end-TX_start, .2, color=color, capstyle='butt', linewidth=0) ax.add_patch(rect)", "shall be equalized (each segment is of the form [<chrom>,", "6.5]) plt.yticks([0, 2, 4, 6], [\"0\", \"2\", \"4\", \"6\"], size=6)", "all samples (columns: sample ids; index: gene ids). :type expression_df:", "Plot Exons for i in exons_in_region: start = int(i[1]) end", "[gene_names_map[gene_name_ens]] gene_regions += [[int(e[1]), int(e[2])]] region_right_border = int(region_bed[0][2]) region_left_border =", "ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Nothing to be returned :rtype: None", "plot, default is None :type blacklist: list, optional :param plot_gene_ids:", "Alpha value for the background color of the boxplots boxes,", "Axis of plot :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param color: color", "list of patches drawn on the ax. 3. patch_description_list is", "the region to be plotted :type region_bed: :class:`pybedtools.BedTool` :param expression_df_g1:", "rotation: Rotational angle of coordinate strings, defaults to 0. :type", "= tuple([ float(i)/256. for i in str(segment[-1]).split(\",\") ]+[1]) segment_type =", "int(region_bed[0][2]) region_left_border = int(region_bed[0][1]) # Determine minimal extension of barplot", "be negative or greater than one. Defaults to 0. :type", "part that is translocated. Either of \"left\" (upstream), or \"right\"", "Width of the arrow head as proportion of the arrow,", "\"chrom\"): continue split_line = line.rstrip().split(\"\\t\") ploidy_dev = float(split_line[5])-ploidy chrom =", "= [] gene_regions = [] for e in genes_in_region_bed: gene_name_ens", "merge == 0 and not (i == 0)): left_merged +=", "6], [\"0\", \"2\", \"4\", \"6\"], size=6) plt.xticks(rotation=45) def plotCNVsHeat(cnvs_bed, chromosome,", ":type color: str, optional. :param ax: Axis where the plot", "import pandas as pnd import numpy as np import tabix", "== \"top\"): ax.xaxis.set_ticks_position(\"top\") ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions))) ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean))) if(not plot_gene_names): ax.xaxis.set_major_formatter(ticker.FixedFormatter( ([ \"", "be used for plotting HiC intensities, defaults to \"Greys\". :type", "\"down\". If \"up\", plot ticks to upper direction, else if", "used for plotting g2 samples expression, defaults to \"#80b1d3\". :type", "True plot legend, False otherwise, defaults to False. :type plot_legend:", "to 0.7. :type cnv_threshold: float, optional :param cmap: Colormap used", "in ax.get_xticklabels(): tick.set_rotation(45) tick.set_size(6) for ytick in ax.get_yticklabels(): ytick.set_size(6) if(plot_legend):", "# Define midpoint of rectangle midpoint = (i*segment_size+(j*segment_size-i*segment_size)/2., (j*segment_size-i*segment_size)/2.) vertices", "plotCNVsHeat(cnvs_bed, chromosome, start, end, ploidy=2, cnv_threshold=0.7, cmap=\"bwr\", max_dev=None, ax=None): '''Function", "heights += [height[i]] lefts += [left[i]] if(not i % merge", "alpha: float, optional. :param ax: Axis of plot, defaults to", "SYMBOLs are hidden. :type plot_gene_names: bool. :return: Axis on which", "a mapping between gene ids :param gene_name_mapping_file: Path to a", "i == 0 else \"NA\") meth_after = (binned_average_meth[i+1] if not", "cnv_threshold=0.7, cmap=\"bwr\", max_dev=None, ax=None): '''Function for plotting CNV segments as", ":type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param plot_legend: If True, a legend", "i in exp_values]) bplot = ax.boxplot(expression_values, positions=[bplot_pos], widths=extension/float(n_groups), patch_artist=True, boxprops=boxprops,", "patch_list = [] patch_description_list = [] tick_positions = [] gene_names_clean", "r_start: Start position of region to be plotted. :type r_start:", "exp_values]) bplot = ax.boxplot(expression_values, positions=[bplot_pos], widths=extension/float(n_groups), patch_artist=True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops,", "direction, defaults to \"up\". :type loc_coordinates: str, optional :param revert_coordinates:", "set, then y_max is the max number of stacked genes,", "plotTX(chrom_r, start_r, end_r, TX_pos, direction=\"right\", color=\"k\", ax=None): '''Function that plots", "where 1. max_y_pos+1.5 is the max_y_position + 1.5. max_y_pos defines", "None ''' # Use given axis for plotting ax =", "vmax: Maximal value of intensity range to be plotted, defaults", "returned :rtype: None ''' # Use given axis for plotting", "colors[g] else: color = standard_colors[g] bplot[\"boxes\"][0].set_facecolor(color) if(plot_points): x_positions = [", "= False met_reverse = False # Plot Introns for i", "thresholds color_threshold=(189./255., 189./255., 189./255., 0.5) if(ploidy == 2): plt.plot([int(start), int(end)],", "ACESeq (\"*most_important*\") files and converts them to pybedtools.BedTool object :param", "= [] height = [] for signal in chip_signals: start", "= hugo_gene_symbol gene_name_mapping_file.close() return gene_map def plotGeneExpression(genes_bed, region_bed, expression_df_g1, expression_df_g2,", "of elements to be merged. If this value is not", "plt.xlim([int(start), int(end)]) plt.ylim([.5, 1.5]) plt.xticks([], []) plt.yticks([], []) def readACESeqAsBed(input_filename):", "the region to be plotted. :type end_r: int :param TX_pos:", "> y_level_dict[i][-1][1] and float(gene_start-y_level_dict[i][-1][0])/float(region_size) > distance_ratio): y_pos_dict[gene_name] = i y_level_dict[i]", "Color of plus stranded TF regions, defaults to \"#fb8072\". :type", "for e in cnv_bed_list]), from_string=True) def plotChIPSignals(chip_signals, r_chrom, r_start, r_end,", "of the genomic region. :type end: int :param color: Color", "with keys: ENSEMBL GENE IDs, and values: HUGO GENE SYMBOLs.", ":type end: int :param bin_size: size of bin to average", "ax is not None else plt.gca() for interval in cnvs_bed:", "r_end: End position of region to be plotted. :type r_end:", "else 1. for i in exp_values_g2])], positions=[bplot_g2_pos], widths=extension/2., patch_artist =", "is not None else plt.gca() n_entries = len(meth_calls[0]) if(n_entries ==", "(columns: sample ids; index: gene ids) :type expression_df_g2: :class:`pandas.DataFrame` :param", "in motifs_bed: motif_start = int(motif[1]) motif_end = int(motif[2]) strand =", "None): vmin = 0 if(vmax is None): vmax = np.percentile(contact_map,", ":param genes_bed: :class:`pybedtools.BedTool` object containing TX start, and TX end", "unmethylated cytosines Or 1. Chromsome 2. Start position 3. end", ":type gene_name_mapping_file: str :return: Dictionary containing the gene id mapping.", "float :return: Tuple of 1. max_y_pos: Defines the number of", "Length of the arrow in bp (depends on the region", "start = region_border_up border_distance_down = region_border_down-start if(not(float(border_distance_down)/float(region_size) < distance_ratio)): gene_name", "= float(interval[3]) tcn = float(interval[4]) # Smooth tcn, if ploidy_dev", "Position, 3. End Position, 4. Deviation from ploidy, 5. True", "color=\"k\", ax = None): '''Function that plots arcs from unequal", "make more ticks. :type upper: bool, optional :param loc_coordinates: Either", ":param color: color of bars, defaults to \"b\". :type color:", "Smooth tcn, if ploidy_dev is smaller than cnv_threshold if(abs(ploidy_dev) <", "ax: Axis used for plotting, defaults to None. :type ax:", "m = matplotlib.cm.ScalarMappable(norm = norm, cmap = cmap) for cbin", "containing gene ids not to be plotted, default to None.", "int :param color: Color of the genomic scales elements, defaults", "location == \"top\", the pyramid points upwards, else if location", "'''Function that plots bedGraph like iterators. :param chip_signals: Iterator for", "of the legend. Either of \"lower left\", \"lower right\", \"upper", "\"up\"): plt.plot([ticks[i], ticks[i]], [0., .3], linestyle=\"-\", color=color, linewidth=1) plt.text(ticks[i], .4,", ":type gene_names_map: dict. :param blacklist: Set containing gene ids not", "binned_average_meth_no_missing += [binned_average_meth[i]] else: meth_before = (binned_average_meth[i-1] if not i", "(e.g. tumor and normal) on a gene region scale equalizing", "midpoint[1]), (midpoint[0], midpoint[1]-segment_size/2.), (midpoint[0]+segment_size/2., midpoint[1]), (midpoint[0], midpoint[1]+segment_size/2.), (midpoint[0]-segment_size/2., midpoint[1]) ]", "ax if ax is not None else plt.gca() # Calculate", "number neutral regions, defaults to \"k\". :type color_neutral: str, optional", "of region to be plotted. :type end: int :param color:", "color_plus: str, optional. :param color_minus: Color code for minus stranded", "= y_max # Plot Exons for i in exons_in_region: start", "if(not patch_saved): patch_saved=True patch_list = [bplot_g1[\"boxes\"][0], bplot_g2[\"boxes\"][0]] patch_description_list = [g1_id,", "g_id = ids[g] else: g_id = \"group \"+str(g) if(not g_id", "be included: Chrom, Start, End, Methylated Cs, Unmethylated Cs. :type", "are plotted, default is None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param", "linestyle='None', markersize=1, alpha=.5) elif(n_entries == 4): plt.plot([ (float(m[1])+float(m[2]))/2. for m", "plot legend, False otherwise, defaults to False. :type plot_legend: bool,", ":type distance_ratio: float, optional :param ax: Axes instance on which", "to 1000. :type bin_size: int, optional :param ax: Axis to", "of plus stranded TF regions, defaults to \"#fb8072\". :type color_minus:", "= expression_df_g1.loc[gene_name, :] if(type(exp_values_g1).__name__ == \"Series\"): exp_values_g1 = list(exp_values_g1) else:", "defaults to \"#80b1d3\". :type color_g2: str, optional :param g1_id: ID", ":type color_gain: str, optional :param color_loss: Plot color of copy", "defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param upper: If", "if(strand == \"+\" and not(met_forward)): patch_list += [patch] patch_description_list +=", "= 100000 tick_labels = [ str(round(i/float(divisor), digits_to_round))+scale for i in", "plotted, False otherwise. Default is False. :type plot_legend: bool, optional", "grouped gene expression (e.g. tumor and normal) on a gene", "list, optional. :param plot_gene_names: True if gene names shall be", "contact_map_index2): y_range = (range(contact_map_index1+(i-contact_map_index1), contact_map_index2) if location == \"top\" else", "will be included in the plot, False otherwise, default is", "gene_mid_points[counter]+extension/2 if(not blacklist is None and gene_name in blacklist): counter", "rect = Rectangle((start, y-.2), end-start, .4, color=color, capstyle='butt', linewidth=0) ax.add_patch(rect)", "= binned_average_meth_no_missing # Plot average methylation values per bin #", "= head_length if(strand == \"-\"): arrow_start = motif_end arrow_end =", "if(position < start or position > end): continue n_meth =", "plots links between genomic regions as arcs. :param links_bed: Iterator,", "gene_name_mapping_file = open(gene_name_mapping_filename, \"r\") gene_map = {} for line in", "Color of the genomic scales elements, defaults to \"k\". :type", "color rect = Rectangle([int(region[1]), -.75], int(region[2])-int(region[1]), 1.5, facecolor=current_color, edgecolor=edgecolor, alpha=alpha)", "2, 3, 4], [\"0\", \"1\", \"2\", \"3\", \"4\"], size=6) elif(ploidy", "to False. :type plot_legend: bool, optional :param colors: List of", "''' input_file = open(input_filename, \"r\") cnv_bed_list = [] ploidy =", "range(max_y_pos+1): if(i == 0 and not max_y_pos in y_level_dict): y_pos_dict[gene_name]", "translocated. :param chrom_r: Chromosome of the region to be plotted.", "linewidth=.5) plt.plot([int(start), int(end)], [5, 5], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)],", "= [ j for j in [meth_before, meth_after] if not", "position 4. Number methylated cytosines 5. Number unmethylated cytosines Or", "if(not len(genes_in_region) == 0): # Determine y positions of genes", "< extension): extension = float(current_extension) boxprops = {\"color\": \"k\", \"linewidth\":", "for e in genes_in_region_bed: gene_name_ens = str(e[3]) gene_names += [gene_names_map[gene_name_ens]]", "= PathPatch(path, facecolor = \"None\", edgecolor = color, lw =", "if ax is not None else plt.gca() binned_meth_calls = [", "start: Start position of the region to be plotted. :type", "y_pos_dict[gene_name] = i y_level_dict[i] += [[gene_start, gene_end]] break elif(i ==", ":param distance_ratio: Minimal distance between two genes, as ratio of", "end: End position of the genomic region. :type end: int", "color_plus: Color code for plus stranded genes, default is \"#80b1d3\".", "chromosome. :type end: int :param ploidy: Assumed ploidy of tumor,", "pnd import numpy as np import tabix import matplotlib.ticker as", "ax.add_patch(rect) # Plot thresholds color_threshold=(189./255., 189./255., 189./255., 0.5) if(ploidy ==", "= len(meth_calls[0]) if(n_entries == 5): plt.plot([ (float(m[1])+float(m[2]))/2. for m in", "were called. :type segment_size: int :param cmap: Name of the", "position 3. End position :type regions: iterator :param start: Start", "i >= 1. else 1. for i in exp_values_g2])], positions=[bplot_g2_pos],", "+= [[int(e[1]), int(e[2])]] region_right_border = int(region_bed[0][2]) region_left_border = int(region_bed[0][1]) #", "i in gene_names_clean]))) for tick in ax.get_xticklabels(): tick.set_rotation(45) tick.set_size(6) for", "the ax. :rtype: list \"\"\" ax = ax if ax", "is not None else plt.gca() max_signal = 0 left =", "color_loss=\"r\", color_neutral=\"k\", ax=None): '''Function for plotting CNV segments :param cnvs_bed:", "is created. :type region_bed: :class:`pybedtools.BedTool` :param blacklist: List of gene", "(upstream), or \"right\" (downstream), defaults to \"left\". :type direction: str,", "0 for e in links_bed: link_pos1 = int(e[1])+(int(e[2])-int(e[1]))/2 link_pos2 =", ":type chrom: str :param start: Start position of region to", ":type head_length: int, optional :param overhang: Fraction that the arrow", "(0 overhang means triangular shape). Can be negative or greater", "minimal extension of barplot extension=None for i in range(len(gene_regions)): if(not", "boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False) color = None if(not", "Use given axis for plotting ax = ax if ax", "number losses, defaults to \"r\". :type color_loss: str, optional :param", "plt.plot([start, end], [0, 0], linestyle=\"-\", color=color, linewidth=1) else: plt.plot([start, end],", ":class:`pybedtools.BedTool` :param region_bed: :class:`pybedtools.BedTool` object containing the region to be", "= left_merged height = height_merged plt.bar(left, height, offset, color =", "exp_values_g2 = list(exp_values_g2.iloc[0, :]) bplot_g1 = ax.boxplot([np.log2([i if i >=", "chrom = \"Y\" cnv_bed_list += [ [chrom, split_line[1], split_line[2], str(ploidy_dev),", "for i in range(len(gene_regions)): if(not blacklist is None and gene_names[i]", "color: Color of points representing methylation values, defaults to \"k\".", "ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: List of equalized region midpoints. :rtype:", "containing the region to be plotted ([<chrom>, <start>, <end>]). :type", "otherwise, defaults to False. :type plot_legend: bool, optional :param colors:", "mid_point = link_pos1 + (link_pos2-link_pos1)/2 if(link_pos2 < link_pos2): mid_point =", "bool, optional :param plot_points: If True, a point per expression", "defaults to None :type vmin: float, optional :param vmax: Maximal", "called. :type segment_size: int :param cmap: Name of the colormap", "CNV, defaults to 0.7. :type cnv_threshold: float, optional :param color_gain:", "genes, default is None. :type y_max: bool, optional :param distance_ratio:", "ax=None): '''Function for plotting CNV segments :param cnvs_bed: :class:`pybedtools.BedTool` object", "gene_names_map, blacklist=None, ax=None, plot_legend=False, color_g1=\"#fb8072\", color_g2=\"#80b1d3\", g1_id=\"tumor\", g2_id=\"normal\", plot_gene_names=True): '''Function", "color = colors((ploidy_dev+max_dev)/(2*max_dev)) if(abs(ploidy_dev) < cnv_threshold): color=colors(.5) rect = Rectangle((current_start,", "to plot CNVs. :type chromosome: str :param start: Start position", "boxplot, no points are plotted otherwise, defaults to False. :type", ":param end: End position of the region to be plotted.", "capstyle='butt', linewidth=0) ax.add_patch(rect) else: rect = Rectangle((current_start, tcn-.1), current_end-current_start, .2,", "right\", color_plus=\"#80b1d3\", color_minus=\"#fb8072\"): \"\"\"Function for plotting gene structures, i.e. introns", "split_line[5], \"+\"] ] input_file.close() return pybedtools.BedTool(\"\\n\".join([\"\\t\".join(e) for e in cnv_bed_list]),", "merge elements will be averaged an plotted, defaults to 0.", "from ACESeq (\"*most_important*\") files and converts them to pybedtools.BedTool object", "genomic segments to equal distances. :param genomic_segments: List of segments", "or \"right\" (downstream), defaults to \"left\". :type direction: str, optional", "int(end)], [5, 5], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [6, 6],", "= 1000000 else: digits_to_round = int(5-np.log10(tick_size)) divisor = 100000 tick_labels", "exp_values_g1 = list(exp_values_g1) else: exp_values_g1 = list(exp_values_g1.iloc[0, :]) exp_values_g2 =", "line.rstrip().split(\"\\t\") ensembl_gene_id = split_line[0].split(\".\")[0] hugo_gene_symbol = split_line[1].split(\".\")[0] gene_map[ensembl_gene_id] = hugo_gene_symbol", "addition to the boxplot, no points are plotted otherwise, defaults", "int, optional :param cnv_threshold: Minimal deviation from ploidy to be", "genes_in_region_bed: gene_name_ens = str(e[3]) gene_names += [gene_names_map[gene_name_ens]] gene_regions += [[int(e[1]),", "1. Chromosome, 2. Start Position, 3. End Position, 4. Deviation", "start, end, name, score, strand, start, end, color). The color", "offset is None): end = start + offset left +=", "= int(interval[1]) current_end = int(interval[2]) ploidy_dev = float(interval[3]) tcn =", "{\"color\": \"k\", \"linewidth\": .3} whiskerprops = {\"color\": \"k\", \"linewidth\": .3}", "region_border_up = int(region_bed[0][1]) region_border_down = int(region_bed[0][2]) region_size = region_border_down-region_border_up color_forward", "max_dev=None, ax=None): '''Function for plotting CNV segments as heatmap :param", "be returned. :rtype: None ''' ax = ax if ax", "optional :param plot_gene_ids: If True, all gene ids will be", "CNVs from ACESeq :rtype: :class:`pybedtools.BedTool` ''' input_file = open(input_filename, \"r\")", "of the region to be plotted. :type chrom_r: str :param", "plot, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Nothing", "# Plot Gene Names if(plot_gene_ids): for i in genes_in_region: start", "GENE SYMBOLs are hidden. :type plot_gene_names: bool. :return: Axis on", "Gene Names if(plot_gene_ids): for i in genes_in_region: start = int(i[1])", "postion 3. End position 4. Value to be plotted as", "-1.*max_dev elif(tcn > max_dev): tcn = max_dev color = colors((ploidy_dev+max_dev)/(2*max_dev))", "considered as a CNV, defaults to 0.7. :type cnv_threshold: float,", ">= 1. else 1. for i in exp_values_g2])], positions=[bplot_g2_pos], widths=extension/2.,", "1.5, facecolor=current_color, edgecolor='none', alpha=alpha) c += 1 else: current_color =", "y_max: bool, optional :param distance_ratio: Minimal distance between two genes,", "\"r\") gene_map = {} for line in gene_name_mapping_file: split_line =", "Defines the number of stacked genes. 2. y_pos_dict: Dictionary with", "plt.gca() binned_meth_calls = [ [0, 0] for i in range(int(((end-start)/bin_size)+1))", "contact_map.iloc[contact_map_index1:contact_map_index2, contact_map_index1:contact_map_index2] if(vmin is None): vmin = 0 if(vmax is", "offset=None, merge=None): '''Function that plots bedGraph like iterators. :param chip_signals:", "determines the max y position for gene plotting via function", "part of the genome that is translocated. :param chrom_r: Chromosome", "-1.*head_length plt.arrow(arrow_start, .5, dx, 0, head_width=head_width, head_length=head_length, overhang=overhang, head_starts_at_zero=False, edgecolor=\"none\",", "defaults to 0.5. :type alpha: float, optional :return: Plots axis.", "copy number gains, defaults to \"g\". :type color_gain: str, optional", "regions as arcs. :param links_bed: Iterator, that contains bed-like structured", "input_file = open(input_filename, \"r\") cnv_bed_list = [] ploidy = None", "defaults to 1000. :type head_length: int, optional :param overhang: Fraction", "(bplot_pos+ (i-.5)* ((2*extension)/(float(n_groups)*3))) for i in list(np.random.rand(len(expression_values))) ] plt.plot(x_positions, expression_values,", "import matplotlib.pyplot as plt import pybedtools import pandas as pnd", "Default is False. :type plot_legend: bool, optional :param legend_loc: Location", "to tabixed bed file containing (chrom, start, end, name, score,", "patch_list += [bplot[\"boxes\"][0]] patch_description_list += [g_id] counter += 1 ax.set_xlim(region_left_border,", "for i in exp_values]) bplot = ax.boxplot(expression_values, positions=[bplot_pos], widths=extension/float(n_groups), patch_artist=True,", "TX_end = end_r if(direction == \"left\"): TX_start = start_r TX_end", "Start postion 3. End position 4. Value to be plotted", "the color for plotting (R,G,B). :type segments_Tabix_filename: str :param chrom:", "the region to be plotted. :type end: int :param color:", "plotted. :type start_r: int :param end_r: Chromosomal end positiont of", "determineYPosGene(genes_bed, region_size, distance_ratio): '''Function that determines the max y position", "merge: Number of elements to be merged. If this value", "region]), from_string=True) # Get gene names and regions genes_in_region_bed =", "0, head_width=head_width, head_length=head_length, overhang=overhang, head_starts_at_zero=False, edgecolor=\"none\", facecolor=color, length_includes_head=True) plt.xlim([start, end])", "in cnvs_bed]) for interval in cnvs_bed: current_start = int(interval[1]) current_end", "color=color, verticalalignment=\"top\", rotation=rotation) plt.xlim([start, end]) plt.yticks([], []) if(loc_coordinates == \"up\"):", "plt.gca() for interval in cnvs_bed: current_start = int(interval[1]) current_end =", "str, optional :param plot_gene_names: If True, the HUGO GENE SYMBOLs", "= color_forward if(strand == \"-\"): color = color_reverse y =", "float(line.rstrip().split(\":\")[1]) print(ploidy) if(line[0] == \"#\" or line[:5] == \"chrom\"): continue", "patch_list = [] patch_description_list = [] met_forward = False met_reverse", "in y_level_dict): y_pos_dict[gene_name] = i y_level_dict[i] = [[gene_start, gene_end]] break", "be plotted, defaults to None. :type vmax: float, optional :param", ":param end: Chromosomal end position of region to be plotted.", "be returned :rtype: None ''' ax = ax if ax", "side by side. If this ratio is underwent, the genes", "patch_description_list = None tick_positions = [] gene_names_clean = [] counter=0", ".0], linestyle=\"-\", color=color, linewidth=1) plt.text(ticks[i], -.1, tick_labels[i], horizontalalignment=\"center\", fontsize=5, color=color,", "+= [int(e[1])+(int(e[2])-int(e[1]))/2] for i in range(len(region_mid_points)): region_mid_point = region_mid_points[i] equalized_region_mid_point", "range to be plotted, defaults to None. :type vmax: float,", "to be plotted, defaults to None :type vmin: float, optional", "offset left += [start] height += [value] left_merged = []", "if ax is not None else plt.gca() for interval in", "True if gene names shall be plotted, False otherwise, defaults", "arcs. :param links_bed: Iterator, that contains bed-like structured lists with", "= genes_bed exons_in_region = exons_bed introns_in_region = introns_bed region_border_up =", "Determine minimal extension of barplot extension=None if(len(gene_mid_points) <= 1): extension=region[2]-region[1]", "gene_name_ens = str(e[3]) if(not gene_names_map is None): gene_names += [gene_names_map[gene_name_ens]]", "ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions))) ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean))) if(not plot_gene_names): ax.xaxis.set_major_formatter(ticker.FixedFormatter( ([ \" \" for i", "that plots arcs from unequal distances of genomic segments to", "+= [np.mean(heights)] heights = [] heights += [height[i]] lefts +=", "expression_df.loc[gene_name, groups[g]] if(type(exp_values).__name__ == \"Series\"): exp_values = list(exp_values) else: exp_values", "position for gene plotting via function plotGenes. :param genes_bed: :class:`pybedtools.BedTool`", "is None): gene_name_label = gene_map[gene_name] y = max_y_pos-y_pos_dict[gene_name]+.8 plt.text(start, y,", "facecolor=\"none\", edgecolor=color, linewidth=.5) ax.add_patch(path_patch) ax.axis(\"off\") plt.xlim([start, end]) plt.ylim([0, 1]) return", "regions, defaults to \"#80b1d3\". :type color_plus: str, optional :param color_minus:", "as a bar, showing the part of the genome that", "3], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [4, 4], color=color_threshold, linestyle=\"--\",", "be stacked. :type distance_ratio: float :return: Tuple of 1. max_y_pos:", "ax.add_patch(rect) patch_list = [] patch_description_list = [] met_forward = False", "left_merged height = height_merged plt.bar(left, height, offset, color = color,", "plot_points: If True, a point per expression value is plotted", "order, defaults to False. :type revert_coordinates: bool, optional :param rotation:", ":type plot_legend: bool :param color_g1: Color used for plotting g1", "column is a ensemble gene id, and the second column", "= ax if ax is not None else plt.gca() patches_dict", "left_border + 3*(extension/4.) tick_positions += [left_border + extension/2.] gene_names_clean +=", "+= 1 else: current_color = color rect = Rectangle([int(region[1]), -.75],", "rotation=rotation) plt.xlim([start, end]) plt.yticks([], []) if(loc_coordinates == \"up\"): plt.ylim([-.1, .8])", "float(split_line[5])-ploidy chrom = split_line[0] if(chrom == \"23\"): chrom=\"X\" elif(chrom ==", "is not None else plt.gca() genes_in_region = genes_bed exons_in_region =", "of the arc, defaults to \"k\". :type color: str, optional.", "upper: bool, optional :param loc_coordinates: Either of \"up\" | \"down\".", "if(line[0] == \"#\" or line[:5] == \"chrom\"): continue split_line =", "Plot color of copy number losses, defaults to \"r\". :type", "a CNV, defaults to 0.7. :type cnv_threshold: float, optional :param", "optional :param merge: Number of elements to be merged. If", "plotted. :type end: int :param bin_size: size of bin to", "start: int :param end: End position of the genomic region.", "float(gene_start-y_level_dict[i][-1][0])/float(region_size) > distance_ratio): y_pos_dict[gene_name] = i y_level_dict[i] += [[gene_start, gene_end]]", "equal to 0, than merge elements will be averaged an", "100000 tick_labels = [ str(round(i/float(divisor), digits_to_round))+scale for i in ticks", "plt.xticks([], []) plt.yticks([], []) plt.xlim([start, end]) plt.ylim([-1, 1]) def plotMotifDirections(motifs_bed,", "bool, optional :param alpha: Alpha value for the background color", "to be plotted. :type start: int :param end: Chromosomal end", "color_neutral: Plot color of copy number neutral regions, defaults to", "defaults to None. :type max_dev: float, optional :param ax: Axis", "is None :type blacklist: list, optional :param plot_gene_ids: If True,", ":type TX_pos: int :param direction: Direction of the genomic part", "max_y_pos, y_pos_dict def createGeneNameMap(gene_name_mapping_filename): '''Function that creates a mapping between", "color: str, optional :param offset: Length of intervals, defaults to", "len(binned_average_meth)-1 else \"NA\") average_list = [ j for j in", "# Smooth tcn, if ploidy_dev is smaller than cnv_threshold if(abs(ploidy_dev)", "plt.ylim([-1, 1]) def plotMotifDirections(motifs_bed, start, end, head_width=0.2, head_length=1000, overhang=0, color_plus=\"#80b1d3\",", "ticks ] if(loc_coordinates == \"up\"): plt.plot([start, end], [0, 0], linestyle=\"-\",", "plots genomic coordinates in a linea fashion. :param chrom: Chromosome", "= 0 if(vmax is None): vmax = np.percentile(contact_map, 99.9) colormap", "ax def plotGenomicSegments(segments_list, chrom, start, end, ax = None): '''Function", "contains bed-like structured lists with the following elements: 1. Chromosome", "of intensity range to be plotted, defaults to None :type", "Chromosome 2. Start postion 3. End position 4. Value to", "ploidy=2, cnv_threshold=0.7, cmap=\"bwr\", max_dev=None, ax=None): '''Function for plotting CNV segments", "be plotted. :type chrom_r: str :param start_r: Chromosomal start position", "flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False) color = None if(not colors", "if(plot_legend): ax.legend(patch_list, patch_description_list, fontsize=5, loc='lower left') return ax def plotGenomicSegments(segments_list,", "bplot_g2_pos = left_border + 3*(extension/4.) tick_positions += [left_border + extension/2.]", "= int(element[1]) if(position < start or position > end): continue", "gene_names_map: dict. :param expression_df: class:`pandas.DataFrame` object containing the expression values", "y_max=None, distance_ratio=0.1, ax=None, plot_legend=False, legend_loc=\"lower right\", color_plus=\"#80b1d3\", color_minus=\"#fb8072\"): \"\"\"Function for", "Nothing to be returned. :rtype: None ''' # Use given", ":param bin_size: size of bin to average methylation values, defaults", "to be plotted, defaults to None. :type vmax: float, optional", "= line.rstrip().split(\"\\t\") ploidy_dev = float(split_line[5])-ploidy chrom = split_line[0] if(chrom ==", "introns :type introns_bed: :class:`pybedtools.BedTool` :param region_bed: :class:`pybedtools.BedTool` object containing the", "max_signal): max_signal = value if(not offset is None): end =", "end, color=\"k\", ax=None): '''Function that plots methylation values as dot", "i in list(np.random.rand(len(expression_values))) ] plt.plot(x_positions, expression_values, \"k.\", markersize=3) g_id =", "direction, else if \"down\", plot ticks to lower direction, defaults", "ax.add_patch(rect) plt.xlim([start_r, end_r]) plt.ylim([0.3, 0.7]) def plotRegions(regions, start, end, color=\"#cbebc4\",", "r_chrom: str :param r_start: Start position of region to be", "\"top\" | \"bottom\". If location == \"top\", the pyramid points", "plotting. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Nothing to be returned", ":class:`pybedtools.BedTool` object containing CNVs with following entries: 1. Chromosome, 2.", "extension): extension = float(current_extension) boxprops = {\"color\": \"k\", \"linewidth\": .3}", "gene_mid_points: list of integer values containing center positions of genes.", "ax.add_patch(patch) #ax.spines[\"bottom\"].set_visible(False) ax.spines[\"top\"].set_visible(False) ax.spines[\"left\"].set_visible(False) ax.spines[\"right\"].set_visible(False) plt.xticks([], []) plt.yticks([], []) plt.xlim([start_r,", "= Rectangle((start, y-.2), end-start, .4, color=color, capstyle='butt', linewidth=0) ax.add_patch(rect) patch_list", "to be used for plotting, defaults to None. :type ax:", "if(location == \"top\"): ax.set_ylim(0, (end-start)/2.) else: ax.set_ylim(-1.*(end-start)/2., 0) def distanceEqualizer(genomic_segments,", "representing the region to be plotted, defaults to 1. :type", "tick_size = 10**int((np.log10((end-start)/10))) # Determine first tick position first_tick =", "continue left_border = gene_regions[i][0] right_border = None if(i < len(gene_names)-1):", "defaults to \"bottom\". :type position_gene_names: str, optional :param log_transformed: If", "per bin # Define Colormap cmap = cm.bwr norm =", "y_level_dict[max_y_pos] = [[gene_start, gene_end]] break else: continue return max_y_pos, y_pos_dict", "linewidth=.5) elif(ploidy == 4): plt.plot([int(start), int(end)], [1, 1], color=color_threshold, linestyle=\"--\",", "= 0 for interval in genes_sorted_bed: gene_name = interval[3] gene_start", "colors must be the same as the number of groups,", "that plots bedGraph like iterators. :param chip_signals: Iterator for which", "sited to be plotted. :type motifs_bed: :class:`pybedtools.BedTool` :param start: Start", "Path.LINETO, Path.LINETO, Path.LINETO] vertices = [(region_mid_point, 1), (region_mid_point, .8), (equalized_region_mid_point,", "color_forward = color_plus color_reverse = color_minus max_y_pos = None if(not", "elements with the following entries: 1. Chromosome 2. Start position", "merge=None): '''Function that plots bedGraph like iterators. :param chip_signals: Iterator", "in blacklist): continue # Define color for gene plotting strand", "'''Function that plots arcs from unequal distances of genomic segments", "containing CNVs with following entries: 1. Chromosome, 2. Start Position,", "a list-ike object containing: 1. Chromosome 2. Start postion 3.", "region1 4. Chromosome region2 5. Start region2 6. End region2", "end position of region to be plotted. :type end: int", "expression, defaults to \"#80b1d3\". :type color_g2: str, optional :param g1_id:", "gene_end]] break elif(gene_start > y_level_dict[i][-1][1] and float(gene_start-y_level_dict[i][-1][0])/float(region_size) > distance_ratio): y_pos_dict[gene_name]", "matplotlib.patches.PathPatch(path, facecolor=facecolor, edgecolor='none') ax.add_patch(patch) ax.set_xlim(start, end) if(location == \"top\"): ax.set_ylim(0,", "None else plt.gca() TX_start = TX_pos TX_end = end_r if(direction", "plotted. :type end: int :param color: Color of points representing", "3. end position 4. Number methylated cytosines 5. Number unmethylated", "introns_in_region = introns_bed region_border_up = int(region_bed[0][1]) region_border_down = int(region_bed[0][2]) region_size", "of genomic segments to equal distances. :param genomic_segments: List of", "capprops=capprops, showfliers=False) bplot_g2 = ax.boxplot([np.log2([i if i >= 1. else", "ticks = [ start + end-i for i in ticks", "tick.set_size(6) for ytick in ax.get_yticklabels(): ytick.set_size(6) if(plot_legend): ax.legend(patch_list, patch_description_list, fontsize=5,", "plotted in addition to the boxplot, no points are plotted", "object :param input_filename: Full path to ACESeq \"most_important\" file :type", "if(n_entries == 5): plt.plot([ (float(m[1])+float(m[2]))/2. for m in meth_calls ],", "+= [current_tick] current_tick = current_tick + tick_size scale = None", "line in gene_name_mapping_file: split_line = line.rstrip().split(\"\\t\") ensembl_gene_id = split_line[0].split(\".\")[0] hugo_gene_symbol", "genes_bed.intersect(region_bed, wa=True, u=True).sort() gene_names = [] gene_regions = [] for", "end, segment_size, cmap=\"Greys\", vmin=None, vmax=None, location=\"top\", ax=None): '''Function that plots", "range(len(ticks)): if(loc_coordinates == \"up\"): plt.plot([ticks[i], ticks[i]], [0., .3], linestyle=\"-\", color=color,", "False. :type plot_legend: bool, optional :param colors: List of colors", "linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [6, 6], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.xlim([int(start),", "in methylation_bed: # Determine bin position = int(element[1]) if(position <", "\"left\"): TX_start = start_r TX_end = TX_pos rect = Rectangle((TX_start,", "no edge is plotted, defaults to False. :type edge_color: str,", "] codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY, ] path", "default is None. :type y_max: bool, optional :param distance_ratio: Minimal", "1. for i in exp_values]) bplot = ax.boxplot(expression_values, positions=[bplot_pos], widths=extension/float(n_groups),", ":class:`pybedtools.BedTool` object containing genes to be plotted. :type genes_bed: :class:`pybedtools.BedTool`", "1, color=color, edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect) plt.xlim([int(start), int(end)]) plt.ylim([.5, 1.5])", ":param start_r: Start position of the region to be plotted.", "= [] height_merged = [] if(not merge is None): heights", "end_r: End position of the region to be plotted. :type", "Value to be plotted as bar :type chip_signals: iterator :param", "if False make more ticks. :type upper: bool, optional :param", "capstyle='butt', linewidth=0) ax.add_patch(rect) patch_list = [] patch_description_list = [] met_forward", "triangular shape). Can be negative or greater than one. Defaults", "0. for m in meth_calls], color=color, marker=\".\", linestyle='None', markersize=1, alpha=.5)", "None): gene_names += [gene_names_map[gene_name_ens]] else: gene_names += [gene_name_ens] gene_regions +=", "color, edgecolor = color) plt.xlim(r_start, r_end) def plotMethylationProfileHeat(methylation_bed, chrom, start,", "Rectangle((start, y-.2), end-start, .4, color=color, capstyle='butt', linewidth=0) ax.add_patch(rect) patch_list =", "defaults to 0. :type merge: int, optional :return: Nothing to", "start_r, end_r, lw=1, color=\"k\", ax = None): '''Function that plots", "two genes, as ratio of ax width, such that two", "= [] for e in genomic_segments: if(int(e[1]) < start): region_mid_points", "exp_values_g2])], positions=[bplot_g2_pos], widths=extension/2., patch_artist = True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops,", "ticks to upper direction, else if \"down\", plot ticks to", "expression. The number of colors must be the same as", "if(direction == \"left\"): TX_start = start_r TX_end = TX_pos rect", "the HUGO gene name :type gene_name_mapping_file: str :return: Dictionary containing", "same as the number of groups. :type ids: list, optional.", "\"top_down\"): codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO] vertices = [(region_mid_point,", "Plot thresholds color_threshold=(189./255., 189./255., 189./255., 0.5) if(ploidy == 2): plt.plot([int(start),", "gene_name in blacklist): counter += 1 continue if(counter < len(gene_names)-1):", "1. Chromsome 2. Start position 3. end position 4. Beta", "== 0.) else 0. for m in meth_calls], color=color, marker=\".\",", "[g_id] counter += 1 ax.set_xlim(region_left_border, region_right_border) if(position_gene_names == \"top\"): ax.xaxis.set_ticks_position(\"top\")", "genomic part that is translocated. Either of \"left\" (upstream), or", "for which distances shall be equalized (each segment is of", "5. True Copy Number) :type cnvs_bed: :class:`pybedtools.BedTool` :param chromosome: Chromosome", "defaults to None. :type offset: int, optional :param merge: Number", "ax is not None else plt.gca() n_entries = len(meth_calls[0]) if(n_entries", "gene plot is created. :type region_bed: :class:`pybedtools.BedTool` :param blacklist: List", "of \"top\", or \"bottom\", defaults to \"bottom\". :type position_gene_names: str,", "current_tick + tick_size scale = None if(first_tick > 1000000): scale", "i in exp_values_g1])], positions=[bplot_g1_pos], widths=extension/2., patch_artist=True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops,", "gene_name_label, size=5, color = color) gene_name = str(i[3]) gene_name_label =", "to lower direction, defaults to \"up\". :type loc_coordinates: str, optional", "color: str, optional :param direction: Direction of distance equalization (top_down", "for plotting g2 samples expression, defaults to \"#80b1d3\". :type color_g2:", ":param plot_gene_names: If True, the HUGO GENE SYMBOLs will be", "from matplotlib.patches import Rectangle from matplotlib.patches import Arrow from matplotlib.path", "is None): gene_names += [gene_names_map[gene_name_ens]] else: gene_names += [gene_name_ens] gene_regions", "\"2\", \"3\", \"4\"], size=6) elif(ploidy == 4): plt.ylim([0, 6.5]) plt.yticks([0,", "proportion of the arrow, defaults to 0.2 :type head_width: float,", "is \"#fb8072\". :type color_minus: str, optional. :return: Tuple of max_y_pos+1.5,", "exp_values_g1])], positions=[bplot_g1_pos], widths=extension/2., patch_artist=True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False)", "genes, as ratio of ax width, such that two genes", "1]) plt.xticks([], []) plt.yticks([], []) def plotMethylationProfile(meth_calls, chrom, start, end,", "of ids must be the same as the number of", "start): region_mid_points += [start+(int(e[2])-start)/2] elif(int(e[2]) > end): region_mid_points += [int(e[1])+(end-int(e[1]))/2]", "] if(loc_coordinates == \"up\"): plt.plot([start, end], [0, 0], linestyle=\"-\", color=color,", "TX_pos, direction=\"right\", color=\"k\", ax=None): '''Function that plots a translocation event", "gene id, and the second column is the HUGO gene", "interval in cnvs_bed: current_start = int(interval[1]) current_end = int(interval[2]) ploidy_dev", "= float(signal[3]) if(value > max_signal): max_signal = value if(not offset", "if i >= 1. else 1. for i in exp_values])", "plot_legend: If True plot legend, False otherwise, defaults to False.", ":param gene_names_map: Dictionary with keys: ENSEMBL GENE IDs, and values:", "color = tuple([ float(i)/256. for i in str(segment[-1]).split(\",\") ]+[1]) segment_type", "contact_map_index2 = ((end)/segment_size)+1 sliced_contact_map = contact_map.iloc[contact_map_index1:contact_map_index2, contact_map_index1:contact_map_index2] if(vmin is None):", "Path.LINETO, Path.LINETO, Path.LINETO] vertices = [(region_mid_point, 0), (region_mid_point, .2), (equalized_region_mid_point,", "< region_border_up): start = region_border_up border_distance_down = region_border_down-start if(not(float(border_distance_down)/float(region_size) <", "containing: 1. Chromosome 2. Start postion 3. End position 4.", "ax.boxplot(expression_values, positions=[bplot_pos], widths=extension/float(n_groups), patch_artist=True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False)", "ax: Axis used for plotting. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return:", "to None. :type max_dev: float, optional :param ax: Axis used", "be plotted. :type end: str :param ax: Axis used for", "not None else plt.gca() max_dist = 0 for e in", "= [] counter=0 for gene_name in gene_names: left_border = gene_mid_points[counter]-extension/2", "y_level_dict[i] = [[gene_start, gene_end]] break elif(gene_start > y_level_dict[i][-1][1] and float(gene_start-y_level_dict[i][-1][0])/float(region_size)", "g2 used for legend plotting, defaults to \"normal\". :type g2_id:", "1000000 else: digits_to_round = int(5-np.log10(tick_size)) divisor = 100000 tick_labels =", "= max_y_pos-y_pos_dict[gene_name]+0.5 rect = Rectangle((start, y-.2), end-start, .4, color=color, capstyle='butt',", "divisor = 100000 tick_labels = [ str(round(i/float(divisor), digits_to_round))+scale for i", "to 1000. :type head_length: int, optional :param overhang: Fraction that", "as the number of groups. :type ids: list, optional. :param", "ax is not None else plt.gca() colors = plt.cm.get_cmap(cmap) if(max_dev", ":rtype: :class:`pybedtools.BedTool` ''' input_file = open(input_filename, \"r\") cnv_bed_list = []", "3*(extension/4.) tick_positions += [left_border + extension/2.] gene_names_clean += [gene_name] exp_values_g1", "region to be plotted. :type chrom_r: str :param start_r: Start", "1)] path = Path(vertices, codes) path_patch = PathPatch(path, facecolor=\"none\", edgecolor=color,", "for minus stranded genes, default is \"#fb8072\". :type color_minus: str,", "Start, End, Methylated Cs, Unmethylated Cs. :type methylation_bed: :class:`pybedtools.BedTool` :param", "that the arrow is swept back (0 overhang means triangular", "\"bottom\". :type position_gene_names: str, optional :param log_transformed: If True use", "ax.add_patch(rect) plt.xlim([start, end]) plt.ylim([0, 1]) plt.xticks([], []) plt.yticks([], []) def", "return patches_dict def plotCNVs(cnvs_bed, chromosome, start, end, ploidy=2, cnv_threshold=0.7, color_gain=\"g\",", "(intensity_value/vmax if intensity_value <= vmax else 1.) facecolor = colormap(intensity_value)", "defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: List of", "methylation values, defaults to 1000. :type bin_size: int, optional :param", "optional :param ax: Axes instance on which the genes are", "else plt.gca() max_dist = 0 for e in links_bed: link_pos1", "HiC contacts. :type contact_map: :class:`pandas.DataFrame` :param start: Chromosomal start position", "region_border_down]) plt.ylim([0, max_y_pos+1.5]) plt.yticks([], []) if(plot_legend): plt.legend(patch_list, patch_description_list, loc=legend_loc, fontsize=5)", "[ float(m[4]) for m in m in meth_calls], color=color, marker=\".\",", "end]) plt.ylim([0.4, 0.6]) def plotHiCContactMap(contact_map, start, end, segment_size, cmap=\"Greys\", vmin=None,", "(mid_point, distance), (link_pos2, 0)] codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3] path", "int(end)], [6, 6], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.xlim([int(start), int(end)]) if(ploidy ==", "the region to be plotted, defaults to 1. :type alpha:", ":type color: str, optional :param offset: Length of intervals, defaults", "= None, upper=True, loc_coordinates=\"up\", revert_coordinates=False, rotation=0): '''Function that plots genomic", "facecolor=current_color, edgecolor=edgecolor, alpha=alpha) c += 1 ax.add_patch(rect) plt.xticks([], []) plt.yticks([],", "midpoint[1]) ] codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY, ]", "gene_name if(not gene_map is None): gene_name_label = gene_map[gene_name] y =", "linestyle='None', markersize=1, alpha=.5) plt.ylim([0, 1]) plt.xticks([], []) plt.xlim([start, end]) def", ":type region_bed: :class:`pybedtools.BedTool` :param expression_df_g1: :class:`pandas.Dataframe` containing the expression values", "bar representing the translocation, defaults to \"k\". :type color: str,", "Path.LINETO, Path.LINETO] vertices = [(region_mid_point, 0), (region_mid_point, .2), (equalized_region_mid_point, .8),", "str(round(i/float(divisor), digits_to_round))+scale for i in ticks ] if(loc_coordinates == \"up\"):", "+= 1 continue if(counter < len(gene_names)-1): right_border = gene_regions[counter+1][0] bplot_g1_pos", "genes. 2. y_pos_dict: Dictionary with keys = gene ids and", "color) gene_name = str(i[3]) gene_name_label = gene_name if(not gene_map is", "defaults to \"k\". :type color: str, optional :param direction: Direction", "if not i == 0 else \"NA\") meth_after = (binned_average_meth[i+1]", "start, end, bin_size=1000, ax = None): '''Function for plotting methylation", "widths=extension/2., patch_artist = True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False)", "left = left_merged height = height_merged plt.bar(left, height, offset, color", "End position on chromosome. :type end: int :param ploidy: Assumed", "(link_pos2-link_pos1)/2 if(link_pos2 < link_pos2): mid_point = link_pos2 + (link_pos1-link_pos2)/2 vertices", "= [] n = len(binned_average_meth) for i in range(n): if(not", "is not None else plt.gca() for interval in cnvs_bed: current_start", "0 and not (i == 0)): left_merged += [lefts[0]] lefts", "showing the part of the genome that is translocated. :param", "end: int :param color: Color of the rectangles representing the", "elements with the following entries: 1. Chromsome 2. Start position", "<end>]). :type region: list :param groups: List of lists containing", "ax is not None else plt.gca() patches_dict = {} for", "continue # Define color for gene plotting strand = str(i[5])", "\"4\"], size=6) elif(ploidy == 4): plt.ylim([0, 6.5]) plt.yticks([0, 2, 4,", "region_right_border = int(region_bed[0][2]) region_left_border = int(region_bed[0][1]) # Determine minimal extension", "max_y_pos defines the \\ number of stacked genes. 2. patch_list", "widths=extension/2., patch_artist=True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False) bplot_g2 =", "of region to be plotted. :type r_chrom: str :param r_start:", "r_chrom: Chromosome of region to be plotted. :type r_chrom: str", "= Rectangle((current_start, tcn-.1), current_end-current_start, .2, color=color, edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect)", "to the boxplot, no points are plotted otherwise, defaults to", "\"k\", \"linewidth\": .3} patch_list = None patch_description_list = None tick_positions", "vmax: float, optional :param location: Either of \"top\" | \"bottom\".", "plt.arrow(arrow_start, .5, dx, 0, head_width=head_width, head_length=head_length, overhang=overhang, head_starts_at_zero=False, edgecolor=\"none\", facecolor=color,", "The number of colors must be the same as the", "to ACESeq \"most_important\" file :type input_filename: str :return: :class:`pybedtools.BedTool` object", "element is a list-ike object containing: 1. Chromosome 2. Start", "Chromosome region1 2. Start region1 3. End region1 4. Chromosome", "the number of groups, defaults to None. :type colors: str,", "of the region to be plotted. :type end_r: int :param", "the bar representing the translocation, defaults to \"k\". :type color:", "defines the \\ number of stacked genes. 2. patch_list is", "fontsize=5, loc='lower left') return ax def plotGeneExpressionEqualDist(genes_bed, gene_mid_points, region, expression_df,", "= height_merged plt.bar(left, height, offset, color = color, edgecolor =", "[0, 0] for i in range(int(((end-start)/bin_size)+1)) ] counter = 0", "end: Chromosomal end position of region to be plotted. :type", "of intervals, defaults to None. :type offset: int, optional :param", "False otherwise, defaults to False. :type plot_legend: bool, optional :param", "region to be plotted. :type r_start: int :param r_end: End", "color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [5, 5], color=color_threshold, linestyle=\"--\", linewidth=.5)", "map, defaults to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Nothing", "color=color, linewidth=1) else: plt.plot([start, end], [0.3, 0.3], linestyle=\"-\", color=color, linewidth=1)", "to 0.2 :type head_width: float, optional :param head_length: Length of", "= None if(not colors is None): color = colors[g] else:", ":type start: str :param end: End position of the region", "True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops, showfliers=False) bplot_g1[\"boxes\"][0].set_facecolor(color_g1) bplot_g2[\"boxes\"][0].set_facecolor(color_g2) if(not", "color = (1,1,1,1) rect = Rectangle((segment_start, 0), segment_end-segment_start, 1, color=color)", "\"NA\" for i in binned_meth_calls ] binned_average_meth_no_missing = [] n", "y_pos_dict = {} y_level_dict = {} max_y_pos = 0 for", "genes_bed: :class:`pybedtools.BedTool` object containing genes to be plotted. :type genes_bed:", "regions genes_in_region_bed = genes_bed.intersect(region_bed, wa=True, u=True).sort() gene_names = [] gene_regions", "ytick.set_size(5) if(plot_legend): ax.legend(patch_list, patch_description_list, fontsize=5, loc='lower left') return ax def", "introns_in_region: start = int(i[1]) end = int(i[2]) gene_name = str(i[3])", "\"group \"+str(g) if(not g_id in patch_description_list): patch_list += [bplot[\"boxes\"][0]] patch_description_list", ":param cnvs_bed: :class:`pybedtools.BedTool` object containing CNVs with following entries: 1.", "= 0 for region in regions: if(not edgecolor): current_color =", "= merge*offset left = left_merged height = height_merged plt.bar(left, height,", "+= [\"reverse strand\"] met_reverse = True # Plot Gene Names", "[] gene_names_clean = [] counter=0 patch_saved = False for gene_name", "binned_average_meth_no_missing = [] n = len(binned_average_meth) for i in range(n):", "Number of ids must be the same as the number", "tabix import math def plotGenes(genes_bed, exons_bed, introns_bed, region_bed, blacklist=None, gene_map=None,", "expression values of g1 samples (columns: sample ids; index: gene", "Get gene names and regions genes_in_region_bed = genes_bed.intersect(region_bed, wa=True, u=True).sort()", ":type start: int :param end: End position of the region", "= [ float(i[0])/(float(i[0])+float(i[1])) if (float(i[0])+float(i[1])) > 0 else \"NA\" for", "to \"r\". :type color_loss: str, optional :param color_neutral: Plot color", "Either of \"up\" | \"down\". If \"up\", plot ticks to", "plotted. :type r_end: int :param ax: Axis of plot :type", "str, optional. :param ax: Axis where the plot is drawn,", "ax=None): '''Function for plotting CNV segments as heatmap :param cnvs_bed:", "patch_description_list, fontsize=5, loc='lower left') return ax def plotGeneExpressionEqualDist(genes_bed, gene_mid_points, region,", "# Determine minimal extension of barplot extension=None if(len(gene_mid_points) <= 1):", "right_border = gene_mid_points[counter]+extension/2 if(not blacklist is None and gene_name in", "Colormap used for plotting CNVs, defaults to \"bwr\". :type cmap:", "\"k\", \"linewidth\": .3} capprops={\"color\": \"k\", \"linewidth\": .3} patch_list = None", "Start position 3. End position :type regions: iterator :param start:", "legend is plotted, False otherwise, defaults to False. :type plot_legend:", "Path to a tab separated file, for which the first", ":type expression_df_g1: :class:`pandas.DataFrame` :param expression_df_g2: :class:`pandas.Dataframe` containing the expression values", "Rectangle([int(region[1]), -.75], int(region[2])-int(region[1]), 1.5, facecolor=current_color, edgecolor='none', alpha=alpha) c += 1", ":type alpha: float, optional. :param ax: Axis of plot, defaults", "1 y_pos_dict[gene_name] = max_y_pos y_level_dict[max_y_pos] = [[gene_start, gene_end]] break else:", ":class:`matplotlib.axes._subplots.AxesSubplot`, optional :param color: color of bars, defaults to \"b\".", "link_pos2 = int(e[4])+(int(e[5])-int(e[4]))/2 distance = abs(link_pos2-link_pos1) if(distance > max_dist): max_dist", "int(segment[2]) color = tuple([ float(i)/256. for i in str(segment[-1]).split(\",\") ]+[1])", "Names if(plot_gene_ids): for i in genes_in_region: start = int(i[1]) gene_name", "= region_border_down-start if(not(float(border_distance_down)/float(region_size) < distance_ratio)): gene_name = str(i[3]) gene_name_label =", "int :param bin_size: size of bin to average methylation values,", "than merge elements will be averaged an plotted, defaults to", "return pybedtools.BedTool(\"\\n\".join([\"\\t\".join(e) for e in cnv_bed_list]), from_string=True) def plotChIPSignals(chip_signals, r_chrom,", "# Plot Exons for i in exons_in_region: start = int(i[1])", "counter = 0 for element in methylation_bed: # Determine bin", "\"-\"): color = color_reverse y = max_y_pos-y_pos_dict[gene_name]+0.5 patch = Rectangle((start,", "region to be plotted. :type end_r: int :param TX_pos: Position", ":param ids: IDs used for legend plotting, defaults to None.", "value in the gene plot. If not set, then y_max", "= None patch_description_list = None tick_positions = [] gene_names_clean =", "not None else plt.gca() binned_meth_calls = [ [0, 0] for", "if ax is not None else plt.gca() TX_start = TX_pos", "current_tick = first_tick while(current_tick <= end): ticks += [current_tick] current_tick", "in gene_names: left_border = gene_mid_points[counter]-extension/2 right_border = gene_mid_points[counter]+extension/2 if(not blacklist", "ax is not None else plt.gca() contact_map_index1 = (start)/segment_size contact_map_index2", "containing gene ids not to be plotted, defaults to None,", "i in range(len(ticks)): if(loc_coordinates == \"up\"): plt.plot([ticks[i], ticks[i]], [0., .3],", "region_bed: :class:`pybedtools.BedTool` object containing the one region, for which the", "optional :param g1_id: ID of g1 used for legend plotting,", "gene_mid_points, region, expression_df, groups, gene_names_map=None, blacklist=None, ax=None, plot_legend=False, colors=None, ids=None,", "ploidy: int, optional :param cnv_threshold: Minimal deviation from ploidy to", "plt.xlim([start_r, end_r]) plt.ylim([0.3, 0.7]) def plotRegions(regions, start, end, color=\"#cbebc4\", edgecolor=False,", "[4, 4], color=color_threshold, linestyle=\"--\", linewidth=.5) elif(ploidy == 4): plt.plot([int(start), int(end)],", "if(ploidy_dev >= cnv_threshold): color=color_gain elif(ploidy_dev <= -1.*cnv_threshold): color = color_loss", "in range(n): if(not binned_average_meth[i] == \"NA\"): binned_average_meth_no_missing += [binned_average_meth[i]] else:", "blacklist: list, optional :param plot_gene_ids: If True, all gene ids", "= int(region_bed[0][2]) region_size = region_border_down-region_border_up color_forward = color_plus color_reverse =", "containing the gene id mapping. :rtype: dictionary ''' gene_name_mapping_file =", "region to be plotted in base pairs. :type region_size: int", "if(not g_id in patch_description_list): patch_list += [bplot[\"boxes\"][0]] patch_description_list += [g_id]", "ax if ax is not None else plt.gca() for motif", ":rtype: :class:`matplotlib.axes._subplots.AxesSubplot` ''' ax = ax if ax is not", "creates a mapping between gene ids :param gene_name_mapping_file: Path to", "= [] if(direction == \"top_down\"): codes = [Path.MOVETO, Path.LINETO, Path.LINETO,", "optional :return: Dictionary with keys = names of segments, and", ":param color_plus: Color code for plus stranded genes, default is", "1. Chromosome 2. Start position 3. End position :type regions:", "given axis for plotting ax = ax if ax is", "IDs used for legend plotting, defaults to None. Number of", "i == len(binned_average_meth)-1 else \"NA\") average_list = [ j for", "== \"Series\"): exp_values_g1 = list(exp_values_g1) else: exp_values_g1 = list(exp_values_g1.iloc[0, :])", "region_right_border) ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions))) ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean))) if(not plot_gene_names): ax.xaxis.set_major_formatter( ticker.FixedFormatter(([ \" \" for", "edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect) else: rect = Rectangle((current_start, tcn-.1), current_end-current_start,", "= Rectangle((current_start, tcn-.2), current_end-current_start, .4, color=color, edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect)", ":return: Axis on which plot was placed. :rtype: :class:`matplotlib.axes._subplots.AxesSubplot` '''", "vertices = [(midpoint[0]-segment_size/2., midpoint[1]), (midpoint[0], midpoint[1]-segment_size/2.), (midpoint[0]+segment_size/2., midpoint[1]), (midpoint[0], midpoint[1]+segment_size/2.),", "max_y_pos in y_level_dict): y_pos_dict[gene_name] = i y_level_dict[i] = [[gene_start, gene_end]]", "\"most_important\" file :type input_filename: str :return: :class:`pybedtools.BedTool` object containing CNVs", ":class:`matplotlib.axes._subplots.AxesSubplot`, optional :param plot_legend: If True, a legend describing plus", "+= [np.mean(heights)] heights = [] offset = merge*offset left =", "None): color = colors[g] else: color = standard_colors[g] bplot[\"boxes\"][0].set_facecolor(color) if(plot_points):", "Plot color of copy number neutral regions, defaults to \"k\".", "points upwards, else if location == \"bottom\" the pyramid points", "int :param end: Chromosomal end position of region to be", "distanceEqualizer(genomic_segments, start, end, direction=\"top_down\", color=\"k\", ax = None): '''Function that", "is not None else plt.gca() tick_size = 10**math.ceil((np.log10((end-start)/10))) if(not upper):", "= ax if ax is not None else plt.gca() tick_size", ":return: Plots axis. :rtype: :class:`matplotlib.axes._subplots.AxesSubplot` ''' standard_colors = [\"#66c2a5\", \"#fc8d62\",", "int(motif[2]) strand = str(motif[3]) arrow_start = motif_start arrow_end = motif_end", "= genes_bed.intersect(region_bed, wa=True, u=True).sort() gene_names = [] gene_regions = []", "segment_size: Size of the segments for which contacts were called.", "regions. :type genes_bed: :class:`pybedtools.BedTool` :param gene_mid_points: list of integer values", "int :param color: Color of points representing methylation values, defaults", "is plotted, False otherwise, defaults to False. :type plot_legend: bool", "drawn on the ax. :rtype: list \"\"\" ax = ax", "ticks. :type upper: bool, optional :param loc_coordinates: Either of \"up\"", "start+(tick_size-start%tick_size) ticks = [] current_tick = first_tick while(current_tick <= end):", "If True use log transformed values for plotting, non-transformed values", "names of segments, and values patch :rtype: dict ''' ax", "ticks ] ticks.reverse() tick_labels.reverse() print(tick_labels) for i in range(len(ticks)): if(loc_coordinates", "is \"lower right\". :type legend_loc: str, optional :param color_plus: Color", "containing the expression values of g2 samples (columns: sample ids;", "segments_tabix_filename: Path to tabixed bed file containing (chrom, start, end,", "= Rectangle([int(region[1]), -.75], int(region[2])-int(region[1]), 1.5, facecolor=current_color, edgecolor='none', alpha=alpha) c +=", "= gene_name if(not gene_map is None): gene_name_label = gene_map[gene_name] y", "alpha=.5) plt.ylim([0, 1]) plt.xticks([], []) plt.xlim([start, end]) def plotTX(chrom_r, start_r,", "= ax.boxplot(expression_values, positions=[bplot_pos], widths=extension/float(n_groups), patch_artist=True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops,", "color = color_reverse border_distance_down = region_border_down-start if(start < region_border_up): start", "is not None else plt.gca() # Calculate midpoints of original", "ploidy, 5. True Copy Number) :type cnvs_bed: :class:`pybedtools.BedTool` :param chromosome:", "= list(exp_values_g2) else: exp_values_g2 = list(exp_values_g2.iloc[0, :]) bplot_g1 = ax.boxplot([np.log2([i", "that plots HiC contact maps as pyramid plots :param contact_map:", "float(current_extension) elif(current_extension < extension): extension = float(current_extension) boxprops = {\"color\":", "not set, then y_max is the max number of stacked", "in gene_names_clean]))) for tick in ax.get_xticklabels(): tick.set_rotation(45) tick.set_size(6) for ytick", "the max y position for gene plotting via function plotGenes.", "start, end, direction=\"top_down\", color=\"k\", ax = None): '''Function that plots", "greater than one. Defaults to 0. :type overhang: float, optional", "codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO] vertices = [(region_mid_point, 0),", "= motif_end arrow_end = motif_start color = color_minus dx =", "regions: iterator :param start: Start position of the region to", "ax.add_patch(path_patch) ax.axis(\"off\") plt.xlim([start, end]) plt.ylim([0, 1]) return equalized_region_mid_points def plotCoordinates(chrom,", "defaults to top, :type location: str, optional :param ax: Axis", "= str(e[3]) gene_names += [gene_names_map[gene_name_ens]] gene_regions += [[int(e[1]), int(e[2])]] region_right_border", "that is translocated. Either of \"left\" (upstream), or \"right\" (downstream),", "if(int(e[1]) < start): region_mid_points += [start+(int(e[2])-start)/2] elif(int(e[2]) > end): region_mid_points", ":type plot_points: bool, optional :param alpha: Alpha value for the", "Color of plus stranded TF regions, defaults to \"#80b1d3\". :type", "= [] height_merged += [np.mean(heights)] heights = [] heights +=", "following entries: 1. Chromosome 2. Start position 3. End position", "'''Function that reads CNVs from ACESeq (\"*most_important*\") files and converts", "bplot_g2[\"boxes\"][0]] patch_description_list = [g1_id, g2_id] counter += 1 ax.set_xlim(region_left_border, region_right_border)", "int(region[2])-int(region[1]), 1.5, facecolor=current_color, edgecolor=edgecolor, alpha=alpha) c += 1 ax.add_patch(rect) plt.xticks([],", "that reads CNVs from ACESeq (\"*most_important*\") files and converts them", "of copy number neutral regions, defaults to \"k\". :type color_neutral:", "max_dev: float, optional :param ax: Axis used for plotting, defaults", "verticalalignment=\"top\", rotation=rotation) plt.xlim([start, end]) plt.yticks([], []) if(loc_coordinates == \"up\"): plt.ylim([-.1,", ":type distance_ratio: float :return: Tuple of 1. max_y_pos: Defines the", "float(i)/256. for i in str(segment[-1]).split(\",\") ]+[1]) segment_type = str(segment[3]) if(segment_type", ":param start: Start position of the region to be plotted.", "= rect plt.xlim(int(start), int(end)) plt.ylim(0, 1) plt.yticks([], []) return patches_dict", "color_minus: str, optional. :return: Tuple of max_y_pos+1.5, patch_list, patch_description_list, where", "g1 used for legend plotting, defaults to \"tumor\". :type g1_id:", "[]) plt.xlim([start, end]) def plotTX(chrom_r, start_r, end_r, TX_pos, direction=\"right\", color=\"k\",", "end: End position of region to be plotted. :type end:", "len(binned_average_meth) for i in range(n): if(not binned_average_meth[i] == \"NA\"): binned_average_meth_no_missing", "float(interval[4]) if(tcn < -1.*max_dev): tcn = -1.*max_dev elif(tcn > max_dev):", "ploidy_dev is smaller than cnv_threshold if(abs(ploidy_dev) < cnv_threshold): tcn =", "'''Function that plots TF motifs as arrows, indicating their directionality.", "\"#80b1d3\". :type color_plus: str, optional :param color_minus: Color of plus", "Cs. :type methylation_bed: :class:`pybedtools.BedTool` :param chrom: Chromosome of region to", "4. Beta Value :type meth_calles: iterator :param chrom: Chromosome of", "if ax is not None else plt.gca() genes_in_region = genes_bed", "for plotting paired gene expression (e.g. tumor and normal) on", "edge_color: str, optional :param alpha: Alpha value of the rectangle,", "color=color, edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect) plt.xlim([int(start), int(end)]) plt.ylim([.5, 1.5]) plt.xticks([],", "the arrow, defaults to 0.2 :type head_width: float, optional :param", "end: End position on chromosome. :type end: int :param ploidy:", "+ tick_size scale = None if(first_tick > 1000000): scale =", "= list(exp_values.iloc[0, :]) expression_values = exp_values if(log_transformed): expression_values = np.log2([i", "Define midpoint of rectangle midpoint = (i*segment_size+(j*segment_size-i*segment_size)/2., (j*segment_size-i*segment_size)/2.) vertices =", "str, optional :param color_g2: Color used for plotting g2 samples", "pybedtools import pandas as pnd import numpy as np import", "ids; index: gene ids) :type expression_df_g2: :class:`pandas.DataFrame` :param gene_names_map: Dictionary", "motif_start = int(motif[1]) motif_end = int(motif[2]) strand = str(motif[3]) arrow_start", "Or 1. Chromsome 2. Start position 3. end position 4.", "or greater than one. Defaults to 0. :type overhang: float,", ".2, color=color, capstyle='butt', linewidth=0) ax.add_patch(rect) plt.xlim([start_r, end_r]) plt.ylim([0.3, 0.7]) def", "lines equalizing distances, defaults to \"k\". :type color: str, optional", "= ids[g] else: g_id = \"group \"+str(g) if(not g_id in", "Start position on chromosome. :type start: int :param end: End", "<= 1): extension=region[2]-region[1] else: extension=gene_mid_points[1]-gene_mid_points[0] # Subtract a small percentage", "0. :type merge: int, optional :return: Nothing to be returned.", "lists containing the IDs of the different groups. :type groups:", "== 4): plt.ylim([0, 6.5]) plt.yticks([0, 2, 4, 6], [\"0\", \"2\",", "must be the same as the number of groups. :type", "coordinates in a linea fashion. :param chrom: Chromosome of the", "defaults to \"Greys\". :type cmap: str, optional :param vmin: Minimal", "base pairs. :type region_size: int :param distance_ratio: Minimal distance between", "3. End position 4. Value to be plotted as bar", "''' standard_colors = [\"#66c2a5\", \"#fc8d62\", \"#8da0cb\", \"#ec87c2\", \"#a6d854\", \"#ffd92f\", \"#e5c494\",", "stacked genes. 2. patch_list is the list of patches drawn", "str, optional :param ax: Axis on which to plot contact", "(midpoint[0]-segment_size/2., midpoint[1]) ] codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY,", "position \\ of gene. :rtype: tuple ''' sort_indices = [int(idx)", "edgecolor=edgecolor, alpha=alpha) c += 1 ax.add_patch(rect) plt.xticks([], []) plt.yticks([], [])", "max_dev): tcn = max_dev color = colors((ploidy_dev+max_dev)/(2*max_dev)) if(abs(ploidy_dev) < cnv_threshold):", "ax.spines[\"top\"].set_visible(False) ax.spines[\"left\"].set_visible(False) ax.spines[\"right\"].set_visible(False) def plotLinksAsArcs(links_bed, chrom_r, start_r, end_r, lw=1, color=\"k\",", "defaults to \"k\". :type color: str, optional :param ax: Axis", "else plt.gca() c = 0 for region in regions: if(not", "if not(float(m[3])+float(m[4]) == 0.) else 0. for m in meth_calls],", "current_end = int(interval[2]) ploidy_dev = float(interval[3]) tcn = float(interval[4]) if(tcn", "Determine minimal extension of barplot extension=None for i in range(len(gene_regions)):", "which the genes are plotted, default is None. :type ax:", "gene names, for genes that should not be shown on", "End Position, 4. Deviation from ploidy, 5. True Copy Number)", "is the list of descriptions for the patches \\ drawn", "= [] tick_positions = [] gene_names_clean = [] counter=0 for", "import PathPatch import matplotlib.cm as cm import matplotlib import tabix", "default is None :type blacklist: list, optional :param plot_gene_ids: If", "to None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Nothing to be", "region to be plotted. :type start: int :param end: End", "loc_coordinates: Either of \"up\" | \"down\". If \"up\", plot ticks", "False. :type plot_legend: bool :param color_g1: Color used for plotting", "'''Function that determines the max y position for gene plotting", "was placed. :rtype: :class:`matplotlib.axes._subplots.AxesSubplot` ''' ax = ax if ax", "= colors[g] else: color = standard_colors[g] bplot[\"boxes\"][0].set_facecolor(color) if(plot_points): x_positions =", "0. ] binned_average_meth = binned_average_meth_no_missing # Plot average methylation values", "+= [\"forward strand\"] met_forward = True elif(strand == \"-\" and", "True elif(strand == \"-\" and not(met_reverse)): patch_list += [patch] patch_description_list", "[gene_names_map[gene_name_ens]] else: gene_names += [gene_name_ens] gene_regions += [[int(e[1]), int(e[2])]] region_right_border", "99.9) colormap = plt.get_cmap(cmap) for i in range(contact_map_index1, contact_map_index2): y_range", "binned_meth_calls[current_bin][0] += n_meth binned_meth_calls[current_bin][1] += n_unmeth binned_average_meth = [ float(i[0])/(float(i[0])+float(i[1]))", "of the arrow in bp (depends on the region that", "for ytick in ax.get_yticklabels(): ytick.set_size(6) if(plot_legend): ax.legend(patch_list, patch_description_list, fontsize=5, loc='lower", "float, optional :param location: Either of \"top\" | \"bottom\". If", "= [(region_mid_point, 0), (region_mid_point, .2), (equalized_region_mid_point, .8), (equalized_region_mid_point, 1)] path", "int(interval[2]) for i in range(max_y_pos+1): if(i == 0 and not", "+ 1.5. max_y_pos defines the \\ number of stacked genes.", "blacklist: Set containing gene ids not to be plotted, default", "cnv_threshold if(abs(ploidy_dev) < cnv_threshold): tcn = ploidy color = color_neutral", "position of region to be plotted. :type r_end: int :param", "plt.text(ticks[i], .4, tick_labels[i], horizontalalignment=\"center\", verticalalignment=\"bottom\", fontsize=5, color=color, rotation=rotation) else: plt.plot([ticks[i],", "= str(i[3]) gene_name_label = gene_name if(not gene_map is None): gene_name_label", "rect = Rectangle([int(region[1]), -.75], int(region[2])-int(region[1]), 1.5, facecolor=current_color, edgecolor=edgecolor, alpha=alpha) c", "direction: Direction of the genomic part that is translocated. Either", "plotted. :type r_chrom: str :param r_start: Start position of region", "for m in m in meth_calls], color=color, marker=\".\", linestyle='None', markersize=1,", "ploidy color = color_neutral if(ploidy_dev >= cnv_threshold): color=color_gain elif(ploidy_dev <=", "for i in str(segment[-1]).split(\",\") ]+[1]) segment_type = str(segment[3]) if(segment_type ==", "negative or greater than one. Defaults to 0. :type overhang:", "to 0.7. :type cnv_threshold: float, optional :param color_gain: Plot color", "+= [int(e[1])+(end-int(e[1]))/2] else: region_mid_points += [int(e[1])+(int(e[2])-int(e[1]))/2] for i in range(len(region_mid_points)):", "lefts = [] height_merged += [np.mean(heights)] heights = [] offset", "patch_list, patch_description_list def determineYPosGene(genes_bed, region_size, distance_ratio): '''Function that determines the", "plot was placed. :rtype: :class:`matplotlib.axes._subplots.AxesSubplot` ''' ax = ax if", "of genes. :param genes_bed: :class:`pybedtools.BedTool` object containing gene regions. :type", "expression values of all samples (columns: sample ids; index: gene", "gene expression (e.g. tumor and normal) on a gene region", "end positiont of the region to be plotted. :type end_r:", "segment_type = str(segment[3]) if(segment_type == \"R\"): color = (1,1,1,1) rect", "linestyle=\"--\", linewidth=.5) elif(ploidy == 4): plt.plot([int(start), int(end)], [1, 1], color=color_threshold,", "optional :param position_gene_names: Either of \"top\", or \"bottom\", defaults to", "if(loc_coordinates == \"up\"): plt.plot([start, end], [0, 0], linestyle=\"-\", color=color, linewidth=1)", "optional :return: Nothing to be returned :rtype: None ''' #", "ploidy = float(line.rstrip().split(\":\")[1]) print(ploidy) if(line[0] == \"#\" or line[:5] ==", "value of intensity range to be plotted, defaults to None.", "y_level_dict): y_pos_dict[gene_name] = i y_level_dict[i] = [[gene_start, gene_end]] break elif(gene_start", "the intensity values of HiC contacts. :type contact_map: :class:`pandas.DataFrame` :param", "the translocation. :type TX_pos: int :param direction: Direction of the", "i*equalized_region_size)- equalized_region_size/2)] region_mid_points = [] for e in genomic_segments: if(int(e[1])", "gene_names += [gene_name_ens] gene_regions += [[int(e[1]), int(e[2])]] region_right_border = int(region_bed[0][2])", "second column is the HUGO gene name :type gene_name_mapping_file: str", ":type color: str, optional :param ax: Axis of plot, defaults", "start + end-i for i in ticks ] ticks.reverse() tick_labels.reverse()", "{} y_level_dict = {} max_y_pos = 0 for interval in", "= -1.*head_length plt.arrow(arrow_start, .5, dx, 0, head_width=head_width, head_length=head_length, overhang=overhang, head_starts_at_zero=False,", ":class:`pybedtools.BedTool` :param chromosome: Chromosome for which to plot CNVs. :type", "genes_bed: :class:`pybedtools.BedTool` object containing TX start, and TX end of", "int(end)], [4, 4], color=color_threshold, linestyle=\"--\", linewidth=.5) elif(ploidy == 4): plt.plot([int(start),", "tick_size = 10**math.ceil((np.log10((end-start)/10))) if(not upper): tick_size = 10**int((np.log10((end-start)/10))) # Determine", "equal distances. :param genomic_segments: List of segments for which distances", "strand = str(motif[3]) arrow_start = motif_start arrow_end = motif_end color=color_plus", "plt.plot([ticks[i], ticks[i]], [.3, .0], linestyle=\"-\", color=color, linewidth=1) plt.text(ticks[i], -.1, tick_labels[i],", "+ (2*g+1)*extension/float((n_groups*2.)) tick_positions += [left_border + extension/2.] gene_names_clean += [gene_name]", "genes_in_region_bed: gene_name_ens = str(e[3]) if(not gene_names_map is None): gene_names +=", "path = Path(vertices, codes) intensity_value = contact_map.iloc[i, j] intensity_value =", "color_minus=\"#fb8072\", ax=None): '''Function that plots TF motifs as arrows, indicating", "i in sort_indices] y_pos_dict = {} y_level_dict = {} max_y_pos", "== \"-\"): color = color_reverse y = max_y_pos-y_pos_dict[gene_name]+0.5 rect =", "plotGeneExpression(genes_bed, region_bed, expression_df_g1, expression_df_g2, gene_names_map, blacklist=None, ax=None, plot_legend=False, color_g1=\"#fb8072\", color_g2=\"#80b1d3\",", "ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions))) ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean))) if(not plot_gene_names): ax.xaxis.set_major_formatter( ticker.FixedFormatter(([ \" \" for i", "plotChIPSignals(chip_signals, r_chrom, r_start, r_end, ax=None, color=\"b\", offset=None, merge=None): '''Function that", "entries: 1. Chromosome 2. Start position 3. End position :type", "in list(np.random.rand(len(expression_values))) ] plt.plot(x_positions, expression_values, \"k.\", markersize=3) g_id = None", "lefts += [left[i]] if(not i % merge == 0): left_merged", "exp_values if(log_transformed): expression_values = np.log2([i if i >= 1. else", "small percentage of region size from extension extension=extension-(region[2]-region[1])*.01 boxprops =", "def plotMotifDirections(motifs_bed, start, end, head_width=0.2, head_length=1000, overhang=0, color_plus=\"#80b1d3\", color_minus=\"#fb8072\", ax=None):", "linestyle=\"--\", linewidth=.5) plt.plot([int(start), int(end)], [5, 5], color=color_threshold, linestyle=\"--\", linewidth=.5) plt.plot([int(start),", "1. Chromosome region1 2. Start region1 3. End region1 4.", "gene_names += [gene_names_map[gene_name_ens]] gene_regions += [[int(e[1]), int(e[2])]] region_right_border = int(region_bed[0][2])", "first column is a ensemble gene id, and the second", "first tick position first_tick = start+(tick_size-start%tick_size) ticks = [] current_tick", "bottom_up), defaults to \"top_down\". :type direction: str, optional. :param ax:", "\"k\". :type color: str, optional :param ax: Axis of plot,", "= str(i[3]) if(not blacklist is None and gene_map[gene_name] in blacklist):", "optional :param color_plus: Color code for plus stranded genes, default", "Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY, ] path = Path(vertices, codes) intensity_value", ":class:`pybedtools.BedTool` :param region_size: Size of region to be plotted in", "defaults to None. :type vmax: float, optional :param location: Either", "is used to determine the color for plotting (R,G,B). :type", "left_border + (2*g+1)*extension/float((n_groups*2.)) tick_positions += [left_border + extension/2.] gene_names_clean +=", "cbin in range(len(binned_average_meth)): rect = Rectangle((start+cbin*bin_size, 0), bin_size, 1, color=m.to_rgba(binned_average_meth[cbin]))", "end]) def plotTX(chrom_r, start_r, end_r, TX_pos, direction=\"right\", color=\"k\", ax=None): '''Function", "be plotted as bar :type chip_signals: iterator :param r_chrom: Chromosome", "color of bars, defaults to \"b\". :type color: str, optional", "0.) else 0. for m in meth_calls], color=color, marker=\".\", linestyle='None',", "1. else 1. for i in exp_values]) bplot = ax.boxplot(expression_values,", "= contact_map.iloc[i, j] intensity_value = (intensity_value/vmax if intensity_value <= vmax", "ax=None): '''Function that plots a translocation event as a bar,", "optional :param ax: Axis used for plotting, defaults to None.", "gene_name_mapping_file: Path to a tab separated file, for which the", "heatmap :param cnvs_bed: :class:`pybedtools.BedTool` object containing CNVs with following entries:", "height = height_merged plt.bar(left, height, offset, color = color, edgecolor", "plots bedGraph like iterators. :param chip_signals: Iterator for which each", "introns_bed: :class:`pybedtools.BedTool` object containing introns :type introns_bed: :class:`pybedtools.BedTool` :param region_bed:", "position of region to be plotted. :type r_start: int :param", "color of copy number neutral regions, defaults to \"k\". :type", "containing CNVs from ACESeq :rtype: :class:`pybedtools.BedTool` ''' input_file = open(input_filename,", "Chromosome 2. Start position 3. End position :type regions: iterator", "edgecolor): current_color = color rect = Rectangle([int(region[1]), -.75], int(region[2])-int(region[1]), 1.5,", "gene_name_mapping_file: split_line = line.rstrip().split(\"\\t\") ensembl_gene_id = split_line[0].split(\".\")[0] hugo_gene_symbol = split_line[1].split(\".\")[0]", "= color_minus max_y_pos = None if(not len(genes_in_region) == 0): #", "False otherwise, defaults to False. :type plot_legend: bool :param color_g1:", "len(groups) for g in range(n_groups): bplot_pos = left_border + (2*g+1)*extension/float((n_groups*2.))", "to be merged. If this value is not equal to", "of region to be plotted in base pairs. :type region_size:", "i in genes_bed])] genes_sorted_bed = [genes_bed[i] for i in sort_indices]", "on which plot was placed. :rtype: :class:`matplotlib.axes._subplots.AxesSubplot` ''' ax =", "left_border + extension/4. bplot_g2_pos = left_border + 3*(extension/4.) tick_positions +=", "= 10**math.ceil((np.log10((end-start)/10))) if(not upper): tick_size = 10**int((np.log10((end-start)/10))) # Determine first", "\"24\"): chrom = \"Y\" cnv_bed_list += [ [chrom, split_line[1], split_line[2],", "is None): g_id = ids[g] else: g_id = \"group \"+str(g)", "> 1000000): scale = \"Mb\" else: scale=\"Kb\" digits_to_round = None", ":param direction: Direction of the genomic part that is translocated.", "+= [gene_name] exp_values_g1 = expression_df_g1.loc[gene_name, :] if(type(exp_values_g1).__name__ == \"Series\"): exp_values_g1", "tab separated file, for which the first column is a", ":type methylation_bed: :class:`pybedtools.BedTool` :param chrom: Chromosome of region to be", "r_chrom, r_start, r_end, ax=None, color=\"b\", offset=None, merge=None): '''Function that plots", "position of region to be plotted. :type start: int :param", "\"lower right\", \"upper left\", \"upper right\", default is \"lower right\".", "start, end, color=\"k\", ax=None): '''Function that plots methylation values as", "of the translocation. :type TX_pos: int :param direction: Direction of", "reverted to decreasing order. Else, coordinates stay in increasing order,", "vmin = 0 if(vmax is None): vmax = np.percentile(contact_map, 99.9)", "color_minus dx = -1.*head_length plt.arrow(arrow_start, .5, dx, 0, head_width=head_width, head_length=head_length,", "containing the expression values of g1 samples (columns: sample ids;", "region_border_up), distance_ratio) if(not y_max is None): max_y_pos = y_max #", "+ (link_pos1-link_pos2)/2 vertices = [(link_pos1, 0), (mid_point, distance), (link_pos2, 0)]", "vertices = [(region_mid_point, 0), (region_mid_point, .2), (equalized_region_mid_point, .8), (equalized_region_mid_point, 1)]", "capstyle='butt', linewidth=0) ax.add_patch(patch) if(strand == \"+\" and not(met_forward)): patch_list +=", "int(signal[2]) value = float(signal[3]) if(value > max_signal): max_signal = value", "Number methylated cytosines 5. Number unmethylated cytosines Or 1. Chromsome", "loc='lower left') return ax def plotGenomicSegments(segments_list, chrom, start, end, ax", "gene_map is None): gene_name_label = gene_map[gene_name] y = max_y_pos-y_pos_dict[gene_name]+.8 plt.text(start,", "region_mid_points += [start+(int(e[2])-start)/2] elif(int(e[2]) > end): region_mid_points += [int(e[1])+(end-int(e[1]))/2] else:", ":type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Nothing to be returned :rtype:", "equalized_region_mid_points[i] codes = [] vertices = [] if(direction == \"top_down\"):", "IDs, and values: HUGO GENE SYMBOLs. :type gene_names_map: dict. :param", "ax if ax is not None else plt.gca() patches_dict =", "if intensity_value <= vmax else 1.) facecolor = colormap(intensity_value) patch", "continue return max_y_pos, y_pos_dict def createGeneNameMap(gene_name_mapping_filename): '''Function that creates a", "to \"#fb8072\". :type color_g1: str, optional :param color_g2: Color used", "cnv_threshold: float, optional :param color_gain: Plot color of copy number", "patch_description_list = [] met_forward = False met_reverse = False #", ":class:`matplotlib.axes._subplots.AxesSubplot`, optional :return: Nothing to be returned. :rtype: None '''", "end], [0.3, 0.3], linestyle=\"-\", color=color, linewidth=1) if(revert_coordinates): ticks = [", "which plot was placed. :rtype: :class:`matplotlib.axes._subplots.AxesSubplot` ''' ax = ax", "dx = -1.*head_length plt.arrow(arrow_start, .5, dx, 0, head_width=head_width, head_length=head_length, overhang=overhang,", "of region to be plotted. :type r_start: int :param r_end:", "= max_dev color = colors((ploidy_dev+max_dev)/(2*max_dev)) if(abs(ploidy_dev) < cnv_threshold): color=colors(.5) rect", "shall be plotted, False otherwise, defaults to True. :type plot_gene_names:", ":type color_g2: str, optional :param g1_id: ID of g1 used", "not to be plotted, defaults to None, :type blacklist: set,", "plotted, default is None. :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional :param plot_legend:", "plotting CNV segments :param cnvs_bed: :class:`pybedtools.BedTool` object containing CNVs with", "True use log transformed values for plotting, non-transformed values otherwise.", "position of the genomic region. :type start: int :param end:", "= [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY, ] path = Path(vertices,", "bool, optional :param legend_loc: Location of the legend. Either of", "== \"top\", the pyramid points upwards, else if location ==", "= start + offset left += [start] height += [value]", "[ start + end-i for i in ticks ] ticks.reverse()", "to \"k\". :type color: str, optional :param ax: Axis of", "== \"up\"): plt.plot([start, end], [0, 0], linestyle=\"-\", color=color, linewidth=1) else:", ":type vmin: float, optional :param vmax: Maximal value of intensity", "[np.mean(heights)] heights = [] offset = merge*offset left = left_merged", "loc_coordinates: str, optional :param revert_coordinates: If True, coordinates are reverted", "= None if(first_tick > 1000000): scale = \"Mb\" else: scale=\"Kb\"", "colors :param segments_tabix_filename: Path to tabixed bed file containing (chrom,", "region to be plotted. :type end: str :param ax: Axis", "gene_map=None, plot_gene_ids=True, y_max=None, distance_ratio=0.1, ax=None, plot_legend=False, legend_loc=\"lower right\", color_plus=\"#80b1d3\", color_minus=\"#fb8072\"):", "is not equal to 0, than merge elements will be", "end, color=\"k\", ax = None, upper=True, loc_coordinates=\"up\", revert_coordinates=False, rotation=0): '''Function", "two genes are plotted side by side. If this ratio", "[g1_id, g2_id] counter += 1 ax.set_xlim(region_left_border, region_right_border) ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions))) ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean))) if(not", "y, gene_name_label, size=5, color = color) gene_name = str(i[3]) gene_name_label", "the second column is the HUGO gene name :type gene_name_mapping_file:", "plotLinksAsArcs(links_bed, chrom_r, start_r, end_r, lw=1, color=\"k\", ax = None): '''Function", "# Define Colormap cmap = cm.bwr norm = matplotlib.colors.Normalize(vmin=0., vmax=1.)", "[]) if(plot_legend): plt.legend(patch_list, patch_description_list, loc=legend_loc, fontsize=5) return max_y_pos+1.5, patch_list, patch_description_list", "exons_in_region = exons_bed introns_in_region = introns_bed region_border_up = int(region_bed[0][1]) region_border_down", "plots :param contact_map: Matrix that contains the intensity values of", "to be returned. :rtype: None ''' ax = ax if", "merge is None): heights = [] lefts = [] for", "which contacts were called. :type segment_size: int :param cmap: Name", "y_pos_dict: Dictionary with keys = gene ids and values =", "color=colors(.5) rect = Rectangle((current_start, .5), current_end-current_start, 1, color=color, edgecolor='none', capstyle='butt',", "in range(contact_map_index1, contact_map_index2): y_range = (range(contact_map_index1+(i-contact_map_index1), contact_map_index2) if location ==", "is the HUGO gene name :type gene_name_mapping_file: str :return: Dictionary", "plt.xticks([], []) plt.yticks([], []) def readACESeqAsBed(input_filename): '''Function that reads CNVs", "color_minus: Color code for minus stranded genes, default is \"#fb8072\".", "plus stranded TF regions, defaults to \"#80b1d3\". :type color_plus: str,", "max_signal = 0 left = [] height = [] for", "plt.gca() patches_dict = {} for segment in segments_list: segment_start =", "lw = lw) ax.add_patch(patch) #ax.spines[\"bottom\"].set_visible(False) ax.spines[\"top\"].set_visible(False) ax.spines[\"left\"].set_visible(False) ax.spines[\"right\"].set_visible(False) plt.xticks([], [])", "segments for which distances shall be equalized (each segment is", "[Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY, ] path = Path(vertices, codes)", "position of the region to be plotted. :type start: int", "\"top\", the pyramid points upwards, else if location == \"bottom\"", ":type links_bed: iterator :param chrom_r: Chromosome of the region to", "color = None if(not colors is None): color = colors[g]", "else: codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO] vertices = [(region_mid_point,", "= TX_pos rect = Rectangle((TX_start, .4), TX_end-TX_start, .2, color=color, capstyle='butt',", "Chromosomal start position of the region to be plotted. :type", "float(current_extension) boxprops = {\"color\": \"k\", \"linewidth\": .3} flierprops = {\"color\":", "End position of the region to be plotted. :type end_r:", "e in genes_in_region_bed: gene_name_ens = str(e[3]) gene_names += [gene_names_map[gene_name_ens]] gene_regions", "str :param start: Start position of region to be plotted.", "stacked, default is 0.1. :type distance_ratio: float, optional :param ax:", "motifs_bed: :class:`pybedtools.BedTool` :param start: Start position of the region to", "[gene_name] exp_values_g1 = expression_df_g1.loc[gene_name, :] if(type(exp_values_g1).__name__ == \"Series\"): exp_values_g1 =", "norm = matplotlib.colors.Normalize(vmin=0., vmax=1.) m = matplotlib.cm.ScalarMappable(norm = norm, cmap", "= \"group \"+str(g) if(not g_id in patch_description_list): patch_list += [bplot[\"boxes\"][0]]", "in cnv_bed_list]), from_string=True) def plotChIPSignals(chip_signals, r_chrom, r_start, r_end, ax=None, color=\"b\",", "= link_pos2 + (link_pos1-link_pos2)/2 vertices = [(link_pos1, 0), (mid_point, distance),", "Set containing gene ids not to be plotted, default to", "+= [start] height += [value] left_merged = [] height_merged =", "to be returned. :rtype: None ''' # Use given axis", "i % merge == 0): left_merged += [lefts[0]] lefts =", "exp_values = list(exp_values) else: exp_values = list(exp_values.iloc[0, :]) expression_values =", "m in meth_calls ], [ float(m[3])/(float(m[3])+float(m[4])) if not(float(m[3])+float(m[4]) == 0.)", ":param genes_bed: :class:`pybedtools.BedTool` object containing genes to be plotted. :type", "+= 1 y_pos_dict[gene_name] = max_y_pos y_level_dict[max_y_pos] = [[gene_start, gene_end]] break", "of rectangle midpoint = (i*segment_size+(j*segment_size-i*segment_size)/2., (j*segment_size-i*segment_size)/2.) vertices = [(midpoint[0]-segment_size/2., midpoint[1]),", "plotted. :type r_start: int :param r_end: End position of region", "Chromsome 2. Start position 3. end position 4. Number methylated", "containing the region to be plotted :type region_bed: :class:`pybedtools.BedTool` :param", "else plt.gca() n_entries = len(meth_calls[0]) if(n_entries == 5): plt.plot([ (float(m[1])+float(m[2]))/2.", "= (range(contact_map_index1+(i-contact_map_index1), contact_map_index2) if location == \"top\" else range(contact_map_index1, contact_map_index2-(contact_map_index2-i)))", "to \"bwr\". :type cmap: str, optional :param max_dev: Maximal deviation", "plotting paired gene expression (e.g. tumor and normal) on a", "float(len(average_list))) if len(average_list) > 0 else 0. ] binned_average_meth =", "that plots TF motifs as arrows, indicating their directionality. :param", "else: extension=gene_mid_points[1]-gene_mid_points[0] # Subtract a small percentage of region size", "current_end-current_start, .4, color=color, edgecolor='none', capstyle='butt', linewidth=0) ax.add_patch(rect) else: rect =", "of the arrow, defaults to 0.2 :type head_width: float, optional", "Start Position, 3. End Position, 4. Deviation from ploidy, 5.", "= str(i[5]) color = color_forward if(strand == \"-\"): color =", "= 0 for element in methylation_bed: # Determine bin position", "linewidth=0) ax.add_patch(rect) plt.xlim([start_r, end_r]) plt.ylim([0.3, 0.7]) def plotRegions(regions, start, end,", "4. Number methylated cytosines 5. Number unmethylated cytosines Or 1.", "list(exp_values_g2) else: exp_values_g2 = list(exp_values_g2.iloc[0, :]) bplot_g1 = ax.boxplot([np.log2([i if", "\"NA\") meth_after = (binned_average_meth[i+1] if not i == len(binned_average_meth)-1 else", "n_entries = len(meth_calls[0]) if(n_entries == 5): plt.plot([ (float(m[1])+float(m[2]))/2. for m", "ax.spines[\"left\"].set_visible(False) ax.spines[\"right\"].set_visible(False) def plotLinksAsArcs(links_bed, chrom_r, start_r, end_r, lw=1, color=\"k\", ax", "defaults to None, :type blacklist: set, optional :param ax: (default:", "that plots a translocation event as a bar, showing the", "# Use given axis for plotting ax = ax if", ":param region_bed: :class:`pybedtools.BedTool` object containing the region to be plotted", "1.5]) plt.xticks([], []) plt.yticks([], []) def readACESeqAsBed(input_filename): '''Function that reads", "norm, cmap = cmap) for cbin in range(len(binned_average_meth)): rect =", "+= 1 binned_meth_calls[current_bin][0] += n_meth binned_meth_calls[current_bin][1] += n_unmeth binned_average_meth =", "== \"23\"): chrom=\"X\" elif(chrom == \"24\"): chrom = \"Y\" cnv_bed_list", "1) plt.yticks([], []) return patches_dict def plotCNVs(cnvs_bed, chromosome, start, end,", "'''Function that plots methylation values as dot plots. :param meth_calls:", "0.5) if(ploidy == 2): plt.plot([int(start), int(end)], [1, 1], color=color_threshold, linestyle=\"--\",", "end: str :param ax: Axis used for plotting, defaults to", "normal) on a gene region scale retaining the position of", "is plotted), defaults to 1000. :type head_length: int, optional :param", "plt.gca() n_entries = len(meth_calls[0]) if(n_entries == 5): plt.plot([ (float(m[1])+float(m[2]))/2. for", "means triangular shape). Can be negative or greater than one.", "str, optional :param ids: IDs used for legend plotting, defaults", "= (binned_average_meth[i-1] if not i == 0 else \"NA\") meth_after", "= [Path.MOVETO, Path.CURVE3, Path.CURVE3] path = Path(vertices, codes) patch =", "ids is None): g_id = ids[g] else: g_id = \"group", "whiskerprops = {\"color\": \"k\", \"linewidth\": .3} capprops={\"color\": \"k\", \"linewidth\": .3}", "\"linewidth\": .3} whiskerprops = {\"color\": \"k\", \"linewidth\": .3} capprops={\"color\": \"k\",", "str :param end: End position of the region to be", "position 4. Value to be plotted as bar :type chip_signals:", "methylation values as dot plots. :param meth_calls: Iterator containing list-like", "head_length: int, optional :param overhang: Fraction that the arrow is", "1. else 1. for i in exp_values_g1])], positions=[bplot_g1_pos], widths=extension/2., patch_artist=True,", "1. for i in exp_values_g1])], positions=[bplot_g1_pos], widths=extension/2., patch_artist=True, boxprops=boxprops, flierprops=flierprops,", "the rectangle, representing the region to be plotted, defaults to", "if(not plot_gene_names): ax.xaxis.set_major_formatter( ticker.FixedFormatter(([ \" \" for i in gene_names_clean])))", "links_bed: iterator :param chrom_r: Chromosome of the region to be", "if(not colors is None): color = colors[g] else: color =", "of descriptions for the patches \\ drawn on the ax.", "for plotting g1 samples expression, defaults to \"#fb8072\". :type color_g1:", "of coordinate strings, defaults to 0. :type rotation: int, optional", "= \"Y\" cnv_bed_list += [ [chrom, split_line[1], split_line[2], str(ploidy_dev), split_line[5],", "in gene_names_clean]))) for tick in ax.get_xticklabels(): tick.set_rotation(45) tick.set_size(5) for ytick", "linewidth=0) ax.add_patch(rect) # Plot thresholds color_threshold=(189./255., 189./255., 189./255., 0.5) if(ploidy", "int(interval[2]) ploidy_dev = float(interval[3]) tcn = float(interval[4]) # Smooth tcn,", "left_merged += [lefts[0]] lefts = [] height_merged += [np.mean(heights)] heights", "iterator :param r_chrom: Chromosome of region to be plotted. :type", "= (i*segment_size+(j*segment_size-i*segment_size)/2., (j*segment_size-i*segment_size)/2.) vertices = [(midpoint[0]-segment_size/2., midpoint[1]), (midpoint[0], midpoint[1]-segment_size/2.), (midpoint[0]+segment_size/2.,", "bool, optional :param colors: List of colors used for plotting", ":class:`pybedtools.BedTool` :param introns_bed: :class:`pybedtools.BedTool` object containing introns :type introns_bed: :class:`pybedtools.BedTool`", "in exp_values_g2])], positions=[bplot_g2_pos], widths=extension/2., patch_artist = True, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops,", "downwards, defaults to top, :type location: str, optional :param ax:", "= max_y_pos-y_pos_dict[gene_name]+.8 plt.text(start, y, gene_name_label, size=5, color = color) plt.xlim([region_border_up,", ":param ax: Axes instance on which the genes are plotted,", "+ offset left += [start] height += [value] left_merged =", "= ax if ax is not None else plt.gca() binned_meth_calls", "optional :return: Nothing to be returned :rtype: None ''' ax", "< distance_ratio)): gene_name = str(i[3]) gene_name_label = gene_name if(not gene_map", "defaults to 0.7. :type cnv_threshold: float, optional :param cmap: Colormap" ]
[ "operator_stack[-1] not in PARENTHE and compare(char) <= 0 ): binary_operate()", "string is liable to be much more complicated than necessary;", "and which states/symbols are the source of the problem. \"\"\"", "'€' if len(regex) == 1: return regex + '*' return", "machine_stack: List[NFA] = [] operator_stack = ['sentinel'] def binary_operate() ->", "is not the case that every pair of a state", "set(self.states), gnfa_start, gnfa_accept) def _good_range(self) -> None: transition_range = set(self.transition_function.values())", "the power-set of the nfa's state set --- i.e., the", "add_empty_transitions(self, other) new_self = NFA( transition_function=self_tf, start_state=self.start_state, accept_states=self.accept_states ) new_other", "Regex) -> int: return ( OPERATORS.index(operator) - OPERATORS.index(operator_stack[-1]) ) regex", "value in self.transition_function.items() } return NFA( transition_function=nd_transition_function, start_state=self.start_state, accept_states=self.accept_states )", "def binary_operate() -> None: right_operand = machine_stack.pop() left_operand = machine_stack.pop()", "of th following are true: * the start state is", "== 'Ø': return 'Ø' if regex1 == '€': return regex2", "start_state=self.start_state, accept_states=self.accept_states ) new_other = NFA( transition_function=other_tf, start_state=other.start_state, accept_states=other.accept_states )", "regex string that generates A. That regex string is liable", "symbol in the transition function. Note that the empty string", "Symbol) -> NFA: tf: MutableNfaTF = { pair: set() for", "self.start_state = start_state self.accept_state = accept_state self.states = ( self.body_states", "NFA(_FSA): \"\"\" A nondeterministic finite automaton class. Takes three keyword", "= ['(', ')'] EMPTIES = ['€', 'Ø'] NOT_SYMBOLS = OPERATORS", "raise ValueError( \"Left parenthesis occurs in regex without matching right", "implicit, as is usual; no need to write '•'' explicitly", "be the language recognised by dfa2. `dfa1 + dfa2` returns", "= set() if empty == 'Ø' else {'q1'} return NFA(", "_error_message( bad_set=bad_range, message_singular=(\"Value {} in the range of the transition", "for symbol in union_alphabet: new_tf[(error_state, symbol)] = error_state for symbol", "\"\"\" # powerset code an itertools recipe, from # https://docs.python.org/3/library/itertools.html#recipes", "sym), frozenset()) empty: FrozenSet[State] = frozenset() # This avoids a", "-> None: right_operand = machine_stack.pop() left_operand = machine_stack.pop() machine =", "> 0: raise ValueError( \"Left parenthesis occurs in regex without", "= ( self.body_states | {self.start_state} | {self.accept_state} ) def reduce(self)", "NFA( transition_function=star_tf, start_state=star_start, accept_states=star_accepts ) @staticmethod def fit( regex: Regex,", "= accept_state self.states = ( self.body_states | {self.start_state} | {self.accept_state}", "symbols that aren't in the nfa's alphabet. \"\"\" _check_input(string=string, alphabet=self.alphabet)", "range of the transition function is not a subset of", "- self.states _error_message( bad_set=bad_range, message_singular=(\"State {} in the range of", "== set() def determinize(self) -> \"DFA\": \"\"\"Returns a DFA that", "into NFAs, uses the NFA '+', then converts the result", "new_other, union_tf = self._combine(other) union_start_state = _get_new_state(new_self.states | new_other.states) union_tf[(union_start_state,", "for item in chain.from_iterable( combinations(s, r) for r in range(len(s)+1)", "of states inferred from the transition function; 3. a member", "Set[Optional[AbstractSet[State]]] = set.union( *self.transition_function.values() ) _error_message( bad_set=transition_range - self.states, message_singular=(\"State", "the alphabet of the NFA. (My apologies to speakers of", "dfa2 are ordered pairs of states from dfa1 and dfa2.", "self.accept_states | {star_start} return NFA( transition_function=star_tf, start_state=star_start, accept_states=star_accepts ) @staticmethod", "'€': return regex1 if union_main_scope(regex1): regex1 = f'({regex1})' if union_main_scope(regex2):", "name=\"alphabet\") self._good_accept() self._good_domain(self.alphabet) def _good_accept(self) -> None: bad_accept_states = self.accept_states", "= self.transition_function[(state1, state2)] new_regex = regex_union( regex_concat(regex_concat(r1, regex_star(r2)), r3), r4", "missing cases -- i.e., it is not the case that", "r in range(len(s)+1) ) } state_sets = powerset(self.states) determinized_tf =", "the range of the transition \" \"function are not in", "(Quick challenge: it's not totally obvious how to match the", "I will explain presently. As of now, the syntax of", "-> NFA: tf: MutableNfaTF = { pair: set() for pair", "by dfa1, and B be the language recognized by dfa2.", "else: concat_tf[(state, '')] = {new_other.start_state} return NFA( transition_function=concat_tf, start_state=new_self.start_state, accept_states=new_other.accept_states", "Regex) -> bool: paren_count = 0 for char in regex:", "FrozenSet, Iterable, List, Mapping, MutableMapping, Optional, Set, Tuple, Union, cast", "an nfa that recognizes A* -- i.e., the set of", "_error_message( bad_set=bad_range, message_singular=(\"State {} in the range of the transition", "== ')': paren_count -= 1 elif char == '|': if", "not the case that every pair of a state and", "self._well_defined() @property def alphabet(self) -> FrozenSet[Symbol]: return self._alphabet @property def", "return regex1 return f\"{regex1}|{regex2}\" rip = self.body_states.pop() r2 = self.transition_function[(rip,", "message_singular=(\"State {} in the range of the transition \" \"function", "syntax of the regular expressions that this method takes as", "states, and the second elements are the symbols in the", "dfa2. `dfa1 | dfa2` returns a dfa that recognizes A", "NFA: copy_tf = {} for state, symbol in nfa.transition_function.keys(): copy_tf[(prime(state),", "NFA, nfa2: NFA) -> NfaTransitionFunction: new_tf = nfa1.transition_function extra_symbols =", "is missing a case -- i.e., it is not the", "NFA: tf: NfaTransitionFunction = { pair: set() for pair in", "otherwise. Will raise a ValueError exception is the string contains", "char in OPERATORS and processed[-1] in {'|', '•'}: raise ValueError(", "union_transition_function[(state1 + state2, symbol)] = ( self_tf[(state1, symbol)] + other_tf[(state2,", "state_set=epsilon_neighbours, symbol='' ) return frozenset(state_set) def _transition(self, state_set: AbstractSet[State], symbol:", "\"\"\" Output a GNFA equivalent to `self` with one less", "but, sadly, computationally expensive algorith. For that reason, I recommend", "union_alphabet: new_tf[(error_state, symbol)] = error_state for symbol in extra_symbols: for", "state2)] = symbol gnfa_start = _get_new_state(self.states) gnfa_accept = _get_new_state(self.states |", "number of members of A. \"\"\" star_start = _get_new_state(self.states) star_tf", "the range of the transition \" \"function is not in", "transition_function=copy_tf, start_state=copy_start, accept_states=copy_accept ) overlap = self.states & other.states while", "the domain of the transition function. The exception message will", "union_start_state = self.start_state + other.start_state union_accept_states = { _stringify(item) for", "There is no problem with the input NFAs having different", "that regular expression and that alphabet. The alphabet parameter is", "states. \"\"\" return (self.non_determinize() + other.non_determinize()).determinize() def _gnfize(self) -> _GNFA:", "Note that the empty string will not be inferred to", "not a member of the set of states inferred from", ") if char in OPERATORS and processed[-1] in {'|', '•'}:", "import collections.abc from itertools import product, chain, combinations from string", "-> Regex: if regex1 == \"Ø\": return regex2 if regex2", "nfa2: NFA ) -> Tuple[NfaTransitionFunction, NfaTransitionFunction]: def add_one_way(nfa1: NFA, nfa2:", "\"DFA\": \"\"\" Let A be the language recognised by dfa1,", "state_set = state_set | epsilon_neighbours epsilon_neighbours = self._get_successors( state_set=epsilon_neighbours, symbol=''", "inferred to be a member of the alphabet (and hence", "The empty set is a valid value; in fact, you", "the fsa's \" \"state set.\"), message_plural=(\"Accept states {} are not", "will change the symbol for empty-set.) In the absence of", "None: super()._well_defined() _good_alphabet(alphabet=self.alphabet, name=\"alphabet\") self._good_accept() self._good_domain(self.alphabet) def _good_accept(self) -> None:", "one-character string; * the transition function is missing a case", "message_plural=\"Alphabet cannot contain characters {}.\" ) def fit_empty(empty: Regex) ->", "regex: if char == '(': paren_count += 1 elif char", "will raise a ValueError exception on instantiation if any of", "= error_state return new_states, new_tf self_states, self_tf = maybe_add_state(self, other)", "set() for pair in product({'q1', 'q2'}, alphabet) } tf[('q1', symbol)]", "modification to make the return a set of frozensets). def", "parentheses, vertical bar and star mean what you'd expect them", "{}.\" ) def fit_empty(empty: Regex) -> NFA: tf: NfaTransitionFunction =", "= self.accept_states | {star_start} return NFA( transition_function=star_tf, start_state=star_start, accept_states=star_accepts )", "concatenating any number of members of A. \"\"\" star_start =", "not a set; 5. the range of the transition function", "overlap: other = copy(other) overlap = self.states & other.states def", "uses the NFA '+', then converts the result back to", "Optional, Set, Tuple, Union, cast ) from .base import (", "alphabet. The domain of the transition function is the power-set", "set of nfa1 plus the cardinality of the state-set of", "to `self` with one less state in it. \"\"\" def", "in state_set: counter += 1 new_state = \"new_state\" + str(counter)", "empty set is a valid value; in fact, you are", "dfa1.states extra_symbols = dfa2.alphabet - dfa1.alphabet if extra_symbols: error_state =", "not quite right -- the default value is string.printable *minus*", "def _add_epsilons(self, state_set: AbstractSet[State]) -> FrozenSet[State]: epsilon_neighbours = self._get_successors( state_set=state_set,", "the alphabet contains any of the verboten characters -- i.e.,`(`", "in self.states - {self.accept_state, rip}: r1 = self.transition_function[(state1, rip)] for", "in the fsa's state set.\") ) def _get_successors( self, *,", "nd_transition_function = { key: {value} for key, value in self.transition_function.items()", "self, transition_function: GnfaTransitionFunction, body_states: Set[State], start_state: State, accept_state: State ):", "and 'Ø'. The parentheses, vertical bar and star mean what", "FrozenSet[State] = frozenset() # This avoids a mypy bug. return", "true: * the start state is not a member of", "-> Regex: if regex1 == 'Ø' or regex2 == 'Ø':", "exponential in the number of states of the NFA. Don't", "self.body_states - {rip}, self.start_state, self.accept_state ) NfaTransitionFunction = Mapping[Tuple[State, Symbol],", "the power set of states inferred from the transition function;", "paren_count == 0: return True return False def regex_star(regex: Regex)", "alphabet. The class will raise a ValueError exception on instantiation", "return self._alphabet @property def accept_states(self) -> AbstractSet[State]: return self._accept_states def", "File containing DFA and NFA public classes \"\"\" import collections.abc", "dfa1: DFA, dfa2: DFA ) -> Tuple[FrozenSet[State], DfaTransitionFunction]: new_tf =", ") _error_message( bad_set=transition_range - self.states, message_singular=(\"State {} in the range", "new_states, new_tf self_states, self_tf = maybe_add_state(self, other) other_states, other_tf =", "specify which of these six conditions things triggered the exception,", "= _get_new_state(self.states) star_tf = self.transition_function star_tf[(star_start, '')] = {self.start_state} for", "the input regex contain a binary operator followed by an", "from dfa1 and dfa2. There is no problem with the", "symbol in extra_symbols: for state in dfa1.states: new_tf[(state, symbol)] =", "overlap = self.states & other.states while overlap: other = copy(other)", "State - `accept_state`: AbstractSet[State] (where States are strings and Symbols", "exception is the string contains symbols that aren't in the", "English chauvinism, but your letter is so very close to", "extra_symbols: error_state = _get_new_state(dfa1.states) new_states = dfa1.states | {error_state} for", "dfa1.transition_function new_states = dfa1.states extra_symbols = dfa2.alphabet - dfa1.alphabet if", "is no problem with the input DFAs having different alphabets.", "by an \" \"operator; not cool.\" ) if char ==", "to speakers of Scandinavian languages for the last one; I", "return regex + '*' return f\"({regex})*\" def regex_concat(regex1: Regex, regex2:", "<filename>toc/fsa/fsa.py<gh_stars>0 \"\"\" File containing DFA and NFA public classes \"\"\"", "member of the set of states inferred from the transition", "copy(other) overlap = self.states & other.states def add_empty_transitions( nfa1: NFA,", "are not members of the fsa's \" \"state set.\") )", "0: processed += ( '•' if processed[-1] not in {'(',", "of a state and a symbol is in the domain", "empty-set symbol. If, by some miracle, there is someone who", "ValueError( \"Right parenthesis occurs in regex withour matching \" \"left", "is not a one-character string; 4. a member of the", "nfa1 | nfa2 is the cardinality of the state set", "and digits, and most common punctuation and white space. Actually,", "return '€' if len(regex) == 1: return regex + '*'", "string will not be inferred to be a member of", "state set.\") ) def accepts(self, string: str) -> bool: \"\"\"", "\"\"\" Takes a regular expression and an alphabet (i.e., a", "self) state_pairs = product(self_states, other_states) union_transition_function = {} for (state1,", "= _get_new_state(dfa1.states) new_states = dfa1.states | {error_state} for symbol in", "= {} for (state1, state2), symbol in product(state_pairs, union_alphabet): union_transition_function[(state1", "recognized by nfa2. `nfa1 + nfa2` returns an nfa that", "an alphabet symbol in the transition function. Note that the", "get_successor(state: State, sym: Symbol) -> AbstractSet[State]: self._transition_function = cast( NfaTransitionFunction,", "Set[State]] class NFA(_FSA): \"\"\" A nondeterministic finite automaton class. Takes", "self.alphabet: star_tf[(star_start, symbol)] = set() for state in self.accept_states: star_tf[(state,", "regex1 + regex2 def regex_union(regex1: Regex, regex2: Regex) -> Regex:", "that recognizes the same same language as the NFA instance.", "nfa's alphabet. \"\"\" _check_input(string=string, alphabet=self.alphabet) current_states = self._add_epsilons({self.start_state}) for symbol", "state set of nfa1 plus the cardinality of the state-set", "occurs in regex withour matching \" \"left parenthesis.\" ) processed", "letters and digits, and most common punctuation and white space.", "large numbers of states. \"\"\" return (self.non_determinize() + other.non_determinize()).determinize() def", "machine_stack[-1] = machine_stack[-1].star() elif char in OPERATORS: if operator_stack[-1] in", "return ( OPERATORS.index(operator) - OPERATORS.index(operator_stack[-1]) ) regex = _pre_process(regex, alphabet)", "chain, combinations from string import printable from typing import (", "message will specify which of these above conditions things triggered", "True return False def regex_star(regex: Regex) -> Regex: if regex", "not sets.\") ) transition_range: Set[Optional[AbstractSet[State]]] = set.union( *self.transition_function.values() ) _error_message(", "'|' + symbol else: gnfa_tf[(state1, state2)] = symbol gnfa_start =", "'•': NFA.__add__ } _error_message( bad_set=set(NOT_SYMBOLS) & alphabet, message_singular=\"Alphabet cannot contain", "B. Note that this `+` operation is not commutative. \"\"\"", "of DFA states has the cardinality of the power-set of", "{} determinized_accept = set() for (state_set, symbol) in product(state_sets, self._alphabet):", "not a set.\"), message_plural=(\"Values {} in the range of the", ") def _get_successors( self, *, state_set: AbstractSet[State], symbol: Symbol )", "= set() union_accept_states = new_self.accept_states | new_other.accept_states return NFA( transition_function=union_tf,", "\"\"\" A deterministic finite automaton class. Takes three keyword arguments:", "FrozenSet[Symbol]: return self._alphabet @property def accept_states(self) -> AbstractSet[State]: return self._accept_states", "of the tuples represent the nfa's states, and the second", "(state1, state2), symbol in product(state_pairs, union_alphabet): union_transition_function[(state1 + state2, symbol)]", "symbol)) for state in state_set] ) def _add_epsilons(self, state_set: AbstractSet[State])", "i.e., the language consisting of the set of strings of", "Symbol], Union[State, AbstractSet[State]] ] class _FSA(_Base): def __init__( self, *,", "== '€': return regex1 if union_main_scope(regex1): regex1 = f'({regex1})' if", "that this method takes as input is very simple --", "paren_count = 0 for char in regex: if char in", ") -> Tuple[FrozenSet[State], DfaTransitionFunction]: new_tf = dfa1.transition_function new_states = dfa1.states", "be much more complicated than necessary; maybe I'll figure out", "processed[-1] in {'|', '•'}: raise ValueError( \"Regex contains binary operator", "empty set, if it is. You can define epsilon-moves by", "= { _stringify(item) for item in ( set(product(self.accept_states, other_states)) |", ") def accepts(self, string: str) -> bool: \"\"\" `my_dfa.accepts(\"some string\")`", "char == ')': paren_count -= 1 if paren_count < 0:", "* the set of accept states is not a subset", "looks like an epsilon); there's no other way to match,", "transition function; 6. the transition function is missing cases --", "= '€' for state in self.accept_states: gnfa_tf[(state, gnfa_accept)] = '€'", "regex_star(r2)), r3), r4 ) reduced_tf[(state1, state2)] = new_regex return _GNFA(", "input is very simple -- much simpler than the standard", "function; 6. the transition function is missing cases -- i.e.,", "is not a member of the set of states inferred", "strings and Symbols are one-char strings). The keys of the", "reasons, the time complexity of this method is exponential in", "= Mapping[ Tuple[State, Symbol], Union[State, AbstractSet[State]] ] class _FSA(_Base): def", "these six conditions things triggered the exception, and which states/symbols", "A deterministic finite automaton class. Takes three keyword arguments: -", "is the string contains symbols that aren't in the nfa's", "a version of Dijkstra's shunting yard algorithm to parse the", "transition_range - self.states _error_message( bad_set=bad_range, message_singular=(\"State {} in the range", "\"\"\" _check_input(string=string, alphabet=self.alphabet) current_state = self.start_state for symbol in string:", "*, state_set: AbstractSet[State], symbol: Symbol ) -> FrozenSet[State]: def get_successor(state:", "as input; returns an NFA that recognises the language defined", "dfa that recognizes A union B. The states of dfa1", "0: return True return False def regex_star(regex: Regex) -> Regex:", "are strings, and Symbols are one-char strings.) The transition function'", "nfa2.alphabet - nfa1.alphabet if extra_symbols: for pair in product(nfa1.states, extra_symbols):", "fsa's \" \"state set.\"), message_plural=(\"Accept states {} are not members", "required to specify that the successor set for a given", "return new_self, new_other, combination_tf def _good_range(self) -> None: bad_range =", "powerset(iterable: Iterable) -> Set[FrozenSet]: s = list(iterable) return { frozenset(item)", "regex contain a binary operator followed by an operator, or", "state in self.accept_states: gnfa_tf[(state, gnfa_accept)] = '€' for state1, state2", "string.printable -- i.e., the set of \"printable\" characters, which includes", "function; 3. a member of the alphabet inferred from the", "in the alphabet of the NFA. (My apologies to speakers", "Set[FrozenSet]: s = list(iterable) return { frozenset(item) for item in", "] class _FSA(_Base): def __init__( self, *, transition_function: FsaTransitionFunction, start_state:", "MutableNfaTF = MutableMapping[Tuple[State, Symbol], Set[State]] class NFA(_FSA): \"\"\" A nondeterministic", "a DFA instance and returns an NFA instance. \"\"\" nd_transition_function", "any of the following are true: 1. the start state", "the star symbol, and the tilde, for reasons that I", "\"state set.\") ) def _good_range(self): raise NotImplementedError GnfaTransitionFunction = Mapping[Tuple[State,", "Symbols are one-char strings). The keys of the `transition_function` implicitly", "not a subset of the set of states inferred from", "regex + '*' return f\"({regex})*\" def regex_concat(regex1: Regex, regex2: Regex)", "the NFA. The method will raise a ValueError exception if", "the fsa's state set.\") ) def _get_successors( self, *, state_set:", "of nfa1 | nfa2 is the cardinality of the state", "other: \"NFA\") -> Tuple[\"NFA\", \"NFA\", MutableNfaTF]: def prime(state: State): return", "The keys of the `transition_function` implicitly define the dfa's state-set", "function; * the set of accept states is not a", "define epsilon-moves by using the empty string in place of", "set() union_accept_states = new_self.accept_states | new_other.accept_states return NFA( transition_function=union_tf, start_state=union_start_state,", "will not be inferred to be a member of the", "product, chain, combinations from string import printable from typing import", "the NFA '+', then converts the result back to a", "paren_count = 0 for char in regex: if char ==", "= 'new_state1' while new_state in state_set: counter += 1 new_state", "with it. For reaons related to the above, the characters", "`|`, `*`, `•`, `€` and `Ø`, 2. the input regex", "the input regex string contains a character not in the", "one less state in it. \"\"\" def union_main_scope(regex: Regex) ->", "value is string.printable -- i.e., the set of \"printable\" characters,", "state_pairs = product(self_states, other_states) union_transition_function = {} for (state1, state2),", "symbol='' ) return frozenset(state_set) def _transition(self, state_set: AbstractSet[State], symbol: Symbol):", "self._add_epsilons({self.start_state}) for symbol in string: current_states = self._transition(current_states, symbol) return", "concatenation. You can leave concatentation implicit, as is usual; no", "def _good_range(self): raise NotImplementedError GnfaTransitionFunction = Mapping[Tuple[State, State], Regex] MutableGnfaTF", "you'd expect them to mean if you are familiar with", "to be a member of the alphabet (and hence the", "maybe I'll figure out how to improve on average simplicity,", "and dfa2. There is no problem with the input DFAs", "Tuple[NfaTransitionFunction, NfaTransitionFunction]: def add_one_way(nfa1: NFA, nfa2: NFA) -> NfaTransitionFunction: new_tf", "symbol else: gnfa_tf[(state1, state2)] = symbol gnfa_start = _get_new_state(self.states) gnfa_accept", "I am very against English chauvinism, but your letter is", "dfa2` returns a DFA that recognises the set of all", "def _combine(self, other: \"NFA\") -> Tuple[\"NFA\", \"NFA\", MutableNfaTF]: def prime(state:", "sadly, computationally expensive algorith. For that reason, I recommend you", "place of an alphabet symbol in the transition function. Note", "input NFAs having different alphabets. \"\"\" new_self, new_other, union_tf =", "the `transition_function` implicitly define the dfa's state-set and alphabet. The", "- `transition_function`: Mapping[Tuple[State, Symbol], AbstractSet[State]] - `start_state`: State - `accept_states`:", "Regex = str FsaTransitionFunction = Mapping[ Tuple[State, Symbol], Union[State, AbstractSet[State]]", "symbol)] } copy_start = prime(nfa.start_state) copy_accept = {prime(x) for x", "symbol)] = error_state for symbol in extra_symbols: for state in", "while new_state in state_set: counter += 1 new_state = \"new_state\"", "in new_self.accept_states: if (state, '') in concat_tf: concat_tf[(state, '')].add(new_other.start_state) else:", "{ x for x in self.transition_function.values() if not isinstance(x, collections.abc.Set)", "DFAs into NFAs, uses the NFA '+', then converts the", "above veboten characters, 3. the input regex contain a binary", "== '€': return regex2 if regex2 == '€': return regex1", "dfa2.alphabet - dfa1.alphabet if extra_symbols: error_state = _get_new_state(dfa1.states) new_states =", "other: \"NFA\") -> \"NFA\": \"\"\" Let A be the language", "alphabet symbol in the transition function. Note that the empty", "'*': machine_stack[-1] = machine_stack[-1].star() elif char in OPERATORS: if operator_stack[-1]", "state set.\"), message_plural=(\"States {} in the range of the transition", "-> FrozenSet[State]: def get_successor(state: State, sym: Symbol) -> AbstractSet[State]: self._transition_function", "has the cardinality of the power-set of the set of", "_check_input(string=string, alphabet=self.alphabet) current_states = self._add_epsilons({self.start_state}) for symbol in string: current_states", "if regex in EMPTIES: return '€' if len(regex) == 1:", "the set of states inferred from the transition function; 2.", "paren_count < 0: raise ValueError( \"Right parenthesis occurs in regex", "contain a binary operator followed by an operator, or 4.", "str) -> bool: \"\"\" Determines whether nfa accepts input string.", "(minor modification to make the return a set of frozensets).", "= ( set(printable) - {'(', ')', '|', '*'} ) )", "tf[('q1', symbol)] = {'q2'} return NFA( transition_function=tf, start_state='q1', accept_states={'q2'} )", "NFA( transition_function=self_tf, start_state=self.start_state, accept_states=self.accept_states ) new_other = NFA( transition_function=other_tf, start_state=other.start_state,", "are familiar with regular expressions. '•' (option-8 on a mac", "expect them to mean if you are familiar with regular", "self._alphabet): determinzed_state = _stringify(state_set) determinized_tf[(determinzed_state, symbol)] = _stringify( self._transition(state_set, symbol)", "0: raise ValueError( \"Right parenthesis occurs in regex withour matching", "other) other_states, other_tf = maybe_add_state(other, self) state_pairs = product(self_states, other_states)", "The exception message will specify which of these six conditions", "either, though it can be done; give it a go.)", "x in self.transition_function.values() if not isinstance(x, collections.abc.Set) } _error_message( bad_set=bad_range,", "= Mapping[Tuple[State, State], Regex] MutableGnfaTF = MutableMapping[Tuple[State, State], Regex] class", "bad_set=bad_range, message_singular=(\"Value {} in the range of the transition \"", "set.\"), message_plural=(\"Values {} in the range of the transition \"", "of A. \"\"\" star_start = _get_new_state(self.states) star_tf = self.transition_function star_tf[(star_start,", "state2)] r4 = self.transition_function[(state1, state2)] new_regex = regex_union( regex_concat(regex_concat(r1, regex_star(r2)),", "List, Mapping, MutableMapping, Optional, Set, Tuple, Union, cast ) from", "NFA) -> NFA: copy_tf = {} for state, symbol in", "\"Right parenthesis occurs in regex withour matching \" \"left parenthesis.\"", "the problem. \"\"\" def __or__(self, other: \"DFA\") -> \"DFA\": \"\"\"", "AbstractSet[State]] - `start_state`: State - `accept_states`: AbstractSet[State] (Where States are", "start state is not a member of the set of", "self.transition_function.values() if not isinstance(x, collections.abc.Set) } _error_message( bad_set=bad_range, message_singular=(\"Value {}", "- {'(', ')', '|', '*'} ) ) -> \"NFA\": \"\"\"", "new_regex = regex_union( regex_concat(regex_concat(r1, regex_star(r2)), r3), r4 ) reduced_tf[(state1, state2)]", "are not in the fsa's state set.\") ) def _get_successors(", "of dfa1 | dfa2 are ordered pairs of states from", "from the transition function; * the range of the transition", "NFA( transition_function=tf, start_state='q1', accept_states={'q2'} ) machine_stack: List[NFA] = [] operator_stack", "and the tilde, for reasons that I will explain presently.", "(state, '') in concat_tf: concat_tf[(state, '')].add(new_other.start_state) else: concat_tf[(state, '')] =", "| {gnfa_start}) gnfa_tf[(gnfa_start, self.start_state)] = '€' for state in self.accept_states:", "of these six conditions things triggered the exception, and which", "self.alphabet | other.alphabet def maybe_add_state( dfa1: DFA, dfa2: DFA )", "symbol)] + other_tf[(state2, symbol)] ) union_start_state = self.start_state + other.start_state", "a valid value; in fact, you are required to specify", "\"Ø\": return regex2 if regex2 == \"Ø\": return regex1 return", "language as the NFA instance. WARNING: The set of DFA", "operation is not commutative. \"\"\" new_self, new_other, concat_tf = self._combine(other)", "the tilde, for reasons that I will explain presently. As", "EMPTIES def _pre_process(regex: Regex, alphabet: AbstractSet[Symbol]) -> Regex: first_char =", "'')', '|', '*', '•', '€' and 'Ø'. The parentheses, vertical", "then converts the result back to a DFA. That makes", "'')] = {new_other.start_state} return NFA( transition_function=concat_tf, start_state=new_self.start_state, accept_states=new_other.accept_states ) def", "star mean what you'd expect them to mean if you", "the fsa's \" \"state set.\") ) def _good_range(self): raise NotImplementedError", "be a member of the alphabet (and hence the checks", "the language recognised by nfa1, and B be the language", "-> NFA: \"\"\" Convenience method that takes a DFA instance", "Container) -> State: counter = 1 new_state = 'new_state1' while", "the problem. \"\"\" def __or__(self, other: \"NFA\") -> \"NFA\": \"\"\"", ") } return DFA( transition_function=union_transition_function, start_state=union_start_state, accept_states=union_accept_states ) def __add__(self,", "a ValueError exception on instantiation if any of th following", "concat_tf = self._combine(other) for state in new_self.accept_states: if (state, '')", "an itertools recipe, from # https://docs.python.org/3/library/itertools.html#recipes # (minor modification to", "for empty-set.) In the absence of parentheses, the order of", "other.alphabet def maybe_add_state( dfa1: DFA, dfa2: DFA ) -> Tuple[FrozenSet[State],", "for item in ( set(product(self.accept_states, other_states)) | set(product(self_states, other.accept_states)) )", "def _good_accept(self) -> None: bad_accept_states = self.accept_states - self.states _error_message(", "dfa2` returns a dfa that recognizes A union B. The", "NFAs. \"\"\" # powerset code an itertools recipe, from #", "as input is very simple -- much simpler than the", "'|'} else '' ) if char not in alphabet |", "set; 5. the range of the transition function is not", "for pair in product({'q1'}, alphabet) } accept_states = set() if", "to be much more complicated than necessary; maybe I'll figure", "but your letter is so very close to the empty-set", "| other.alphabet def maybe_add_state( dfa1: DFA, dfa2: DFA ) ->", "= maybe_add_state(self, other) other_states, other_tf = maybe_add_state(other, self) state_pairs =", "determinized_tf = {} determinized_accept = set() for (state_set, symbol) in", "in self.transition_function.keys(): state2 = self.transition_function[(state1, symbol)] if (state1, state2) in", "regex_union(regex1: Regex, regex2: Regex) -> Regex: if regex1 == \"Ø\":", "AbstractSet[State], symbol: Symbol ) -> FrozenSet[State]: def get_successor(state: State, sym:", "paren_count -= 1 if paren_count < 0: raise ValueError( \"Right", "itertools recipe, from # https://docs.python.org/3/library/itertools.html#recipes # (minor modification to make", "def star(self) -> \"NFA\": \"\"\" Let A be the language", "though it can be done; give it a go.) 'Ø'", "= f'({regex1})' if union_main_scope(regex2): regex2 = f'({regex2})' return regex1 +", "explain presently. As of now, the syntax of the regular", "_extract_states_alphabet, _error_message, _good_alphabet, _check_input ) State = str Symbol =", "in {'(', '|'} else '' ) if char not in", "epsilon_neighbours epsilon_neighbours = self._get_successors( state_set=epsilon_neighbours, symbol='' ) return frozenset(state_set) def", "having different alphabets. \"\"\" new_self, new_other, union_tf = self._combine(other) union_start_state", "the nfa's alphabet. \"\"\" _check_input(string=string, alphabet=self.alphabet) current_states = self._add_epsilons({self.start_state}) for", "will explain presently. As of now, the syntax of the", ") machine_stack: List[NFA] = [] operator_stack = ['sentinel'] def binary_operate()", "self.transition_function.items() } return NFA( transition_function=nd_transition_function, start_state=self.start_state, accept_states=self.accept_states ) def _stringify(states:", "and processed[-1] in {'|', '•'}: raise ValueError( \"Regex contains binary", "nfa that recognizes A union B. The cardinality of the", "NFA ) -> Tuple[NfaTransitionFunction, NfaTransitionFunction]: def add_one_way(nfa1: NFA, nfa2: NFA)", "a state and a symbol is in the domain of", "regex1 return f\"{regex1}|{regex2}\" rip = self.body_states.pop() r2 = self.transition_function[(rip, rip)]", "exception on instantiation if any of th following are true:", "not a one-character string; * the transition function is missing", "{self.start_state} star_accepts = self.accept_states | {star_start} return NFA( transition_function=star_tf, start_state=star_start,", "to the empty-set symbol. If, by some miracle, there is", "state1, state2 in product( self.states | {gnfa_start}, self.states | {gnfa_accept}", "1 elif char == ')': paren_count -= 1 elif char", "_good_range(self) -> None: transition_range = set(self.transition_function.values()) bad_range = transition_range -", "a member of the alphabet inferred from the transition function", "\"operator; not cool.\" ) if char == '(': paren_count +=", "recognised by dfa1, B be the language recognised by dfa2.", "None: right_operand = machine_stack.pop() left_operand = machine_stack.pop() machine = operator_to_operation[operator_stack.pop()](", "it gets used internally. '€' (option-shift-2) is used to match", ") if char == '(': paren_count += 1 if char", "is in the domain of the transition function. The exception", "processed = '' paren_count = 0 for char in regex:", "of the nfa's state set --- i.e., the values of", ") if set(state_set) & self.accept_states: determinized_accept.add(determinzed_state) determinized_start = _stringify( self._add_epsilons({self._start_state})", "function is missing a case -- i.e., it is not", "+ other.start_state union_accept_states = { _stringify(item) for item in (", "NfaTransitionFunction = Mapping[Tuple[State, Symbol], AbstractSet[State]] MutableNfaTF = MutableMapping[Tuple[State, Symbol], Set[State]]", "= {new_other.start_state} return NFA( transition_function=concat_tf, start_state=new_self.start_state, accept_states=new_other.accept_states ) def _combine(self,", "set.\") ) def _get_successors( self, *, state_set: AbstractSet[State], symbol: Symbol", "where a is an element of A and b is", "-- much simpler than the standard python regular expresssions. All", "OPERATORS: if operator_stack[-1] in PARENTHE or compare(char) > 0: operator_stack.append(char)", "that I will explain presently. As of now, the syntax", "sets (or frozensets). The empty set is a valid value;", "a regex string that generates A. That regex string is", "transition function; * the range of the transition function is", "expresssions. All characters are intepreted as literals for symbols in", "transition_function=tf, start_state='q1', accept_states={'q2'} ) machine_stack: List[NFA] = [] operator_stack =", "= product(self_states, other_states) union_transition_function = {} for (state1, state2), symbol", "don't want to. But it gets used internally. '€' (option-shift-2)", "state2)] new_regex = regex_union( regex_concat(regex_concat(r1, regex_star(r2)), r3), r4 ) reduced_tf[(state1,", "= '€' for state1, state2 in product( self.states | {gnfa_start},", "symbol in self.alphabet: star_tf[(star_start, symbol)] = set() for state in", "instance. \"\"\" nd_transition_function = { key: {value} for key, value", "new_tf return add_one_way(nfa1, nfa2), add_one_way(nfa2, nfa1) self_tf, other_tf = add_empty_transitions(self,", "= _get_new_state(new_self.states | new_other.states) union_tf[(union_start_state, '')] = { new_self.start_state, new_other.start_state", "return current_state in self.accept_states def encode(self) -> Regex: \"\"\" Let", "return NFA( transition_function=union_tf, start_state=union_start_state, accept_states=union_accept_states ) def __add__(self, other: \"NFA\")", "for state in self.accept_states: gnfa_tf[(state, gnfa_accept)] = '€' for state1,", "The class will raise a ValueError exception on instantiation if", ") overlap = self.states & other.states while overlap: other =", "This method uses a version of Dijkstra's shunting yard algorithm", "input; returns an NFA that recognises the language defined by", "states. For related reasons, the time complexity of this method", "{'(', ')', '|', '*'} ) ) -> \"NFA\": \"\"\" Takes", "function; * a member of the alphabet inferred from the", "following are true: 1. the start state is not a", "'•', '*'] PARENTHE = ['(', ')'] EMPTIES = ['€', 'Ø']", "self._get_successors( state_set=state_set, symbol='' ) while epsilon_neighbours - state_set: state_set =", "\"\"\" File containing DFA and NFA public classes \"\"\" import", "{ new_self.start_state, new_other.start_state } for symbol in new_self.alphabet | new_other.alphabet:", "in the number of states of the NFA. Don't determinize", "function. The exception message will specify which of these above", "union_tf = self._combine(other) union_start_state = _get_new_state(new_self.states | new_other.states) union_tf[(union_start_state, '')]", "NFAs having different alphabets. \"\"\" new_self, new_other, union_tf = self._combine(other)", "{ pair: set() for pair in product({'q1', 'q2'}, alphabet) }", "\"DFA\") -> \"DFA\": \"\"\" Let A be the language recognised", "-> None: bad_range = { x for x in self.transition_function.values()", "recognised by nfa1, and B be the language recognized by", "def _get_successors( self, *, state_set: AbstractSet[State], symbol: Symbol ) ->", "MutableGnfaTF = {} for state1, symbol in self.transition_function.keys(): state2 =", "*, transition_function: FsaTransitionFunction, start_state: State, accept_states: AbstractSet[State] ): super().__init__( transition_function=transition_function,", "= transition_function self.body_states = body_states self.start_state = start_state self.accept_state =", "white space. Actually, that's not quite right -- the default", "of the state set of nfa1 plus the cardinality of", "language recognized by dfa2. `dfa1 | dfa2` returns a dfa", "printable from typing import ( AbstractSet, Container, FrozenSet, Iterable, List,", "nfa1.alphabet if extra_symbols: for pair in product(nfa1.states, extra_symbols): new_tf[pair] =", "3. a member of the alphabet inferred from the transition", "the following are true: 1. the start state is not", "State = str Symbol = str Regex = str FsaTransitionFunction", "determinized_tf[(determinzed_state, symbol)] = _stringify( self._transition(state_set, symbol) ) if set(state_set) &", "who cares about this, I will change the symbol for", "set() for pair in product({'q1'}, alphabet) } accept_states = set()", "problem with the input DFAs having different alphabets. \"\"\" union_alphabet", "& self.accept_states == set() def determinize(self) -> \"DFA\": \"\"\"Returns a", "copy(nfa: NFA) -> NFA: copy_tf = {} for state, symbol", ") processed += char if paren_count > 0: raise ValueError(", "accept_states self._states, self._alphabet = _extract_states_alphabet( self._transition_function.keys() ) self._well_defined() @property def", "set.\") ) def _good_range(self): raise NotImplementedError GnfaTransitionFunction = Mapping[Tuple[State, State],", "empty.union( *[frozenset(get_successor(state, symbol)) for state in state_set] ) def _add_epsilons(self,", "def __init__( self, *, transition_function: FsaTransitionFunction, start_state: State, accept_states: AbstractSet[State]", "in string: current_state = self.transition_function[(current_state, symbol)] return current_state in self.accept_states", "_GNFA: def __init__( self, transition_function: GnfaTransitionFunction, body_states: Set[State], start_state: State,", "transition_range: Set[Optional[AbstractSet[State]]] = set.union( *self.transition_function.values() ) _error_message( bad_set=transition_range - self.states,", "+= '|' + symbol else: gnfa_tf[(state1, state2)] = symbol gnfa_start", "\"function are not sets.\") ) transition_range: Set[Optional[AbstractSet[State]]] = set.union( *self.transition_function.values()", "determinized_accept = set() for (state_set, symbol) in product(state_sets, self._alphabet): determinzed_state", "an nfa that recognizes A union B. The cardinality of", "= _get_new_state(self.states) gnfa_accept = _get_new_state(self.states | {gnfa_start}) gnfa_tf[(gnfa_start, self.start_state)] =", "parentheses, the order of operations is: `*`, then `•`, then", "not in \" \"alphabet and not an accepted regex character.\"", "self.accept_state ) NfaTransitionFunction = Mapping[Tuple[State, Symbol], AbstractSet[State]] MutableNfaTF = MutableMapping[Tuple[State,", "in the domain of the transition function. The exception message", "Takes a regular expression and an alphabet (i.e., a set", "def regex_concat(regex1: Regex, regex2: Regex) -> Regex: if regex1 ==", "`transition_function` implicitly define the dfa's state-set and alphabet. The class", "the transition function; 3. a member of the alphabet inferred", "B be the language recognised by dfa2. `dfa1 + dfa2`", "State): return state + '`' def copy(nfa: NFA) -> NFA:", "symbol is in the domain of the transition function. The", "& self.accept_states: determinized_accept.add(determinzed_state) determinized_start = _stringify( self._add_epsilons({self._start_state}) ) return DFA(", "0: operator_stack.append(char) else: while ( operator_stack[-1] not in PARENTHE and", "message_singular=(\"Accept state {} is not a member of the fsa's", "OPERATORS.index(operator_stack[-1]) ) regex = _pre_process(regex, alphabet) for char in regex:", "A be the language recognised by dfa1, and B be", "import ( _Base, _extract_states_alphabet, _error_message, _good_alphabet, _check_input ) State =", "the set of strings of the form a concat b,", "the transition \" \"function are not sets.\") ) transition_range: Set[Optional[AbstractSet[State]]]", "the transition function is not a subset of the set", "in nfa.transition_function[(state, symbol)] } copy_start = prime(nfa.start_state) copy_accept = {prime(x)", "and Symbols are one-char strings.) The transition function' keys implicitly", "-> AbstractSet[State]: self._transition_function = cast( NfaTransitionFunction, self._transition_function ) return self._transition_function.get((state,", "by nfa. `nfa.self()` returns an nfa that recognizes A* --", "a GNFA equivalent to `self` with one less state in", "recommend you don't `+` dfas with large numbers of states.", "transition_function=self_tf, start_state=self.start_state, accept_states=self.accept_states ) new_other = NFA( transition_function=other_tf, start_state=other.start_state, accept_states=other.accept_states", "member of the fsa's \" \"state set.\"), message_plural=(\"Accept states {}", "followed by an operator, or 4. the input regex does", "= nfa2.alphabet - nfa1.alphabet if extra_symbols: for pair in product(nfa1.states,", "frozenset() # This avoids a mypy bug. return empty.union( *[frozenset(get_successor(state,", "is no problem with the input NFAs having different alphabets.", "than necessary; maybe I'll figure out how to improve on", "B -- i.e., the language consisting of the set of", "+ state2, symbol)] = ( self_tf[(state1, symbol)] + other_tf[(state2, symbol)]", "= self._transition(current_states, symbol) return not current_states & self.accept_states == set()", "a mac keyboard) means concatenation. You can leave concatentation implicit,", "set(product(self_states, other.accept_states)) ) } return DFA( transition_function=union_transition_function, start_state=union_start_state, accept_states=union_accept_states )", "following are true: * the start state is not a", "return NFA( transition_function=nd_transition_function, start_state=self.start_state, accept_states=self.accept_states ) def _stringify(states: Iterable[State]) ->", "bad_set=transition_range - self.states, message_singular=(\"State {} in the range of the", "expect). The class will raise a ValueError exception on instantiation", "other.states while overlap: other = copy(other) overlap = self.states &", "{new_other.start_state} return NFA( transition_function=concat_tf, start_state=new_self.start_state, accept_states=new_other.accept_states ) def _combine(self, other:", "def __or__(self, other: \"NFA\") -> \"NFA\": \"\"\" Let A be", "List[NFA] = [] operator_stack = ['sentinel'] def binary_operate() -> None:", "the regular expressions that this method takes as input is", ") def star(self) -> \"NFA\": \"\"\" Let A be the", "'(': binary_operate() operator_stack.pop() while len(operator_stack) > 1: binary_operate() return machine_stack.pop()", "char == '*': machine_stack[-1] = machine_stack[-1].star() elif char in OPERATORS:", "there's no other way to match, for instance, {'', '0'}", "_error_message( bad_set=set(NOT_SYMBOLS) & alphabet, message_singular=\"Alphabet cannot contain character {}.\", message_plural=\"Alphabet", "of the set of NFA states. For related reasons, the", "self_tf[(state1, symbol)] + other_tf[(state2, symbol)] ) union_start_state = self.start_state +", "different alphabets. \"\"\" new_self, new_other, union_tf = self._combine(other) union_start_state =", "raise ValueError( f\"Regex contains character '{char}' that is not in", "Union, cast ) from .base import ( _Base, _extract_states_alphabet, _error_message,", "_error_message, _good_alphabet, _check_input ) State = str Symbol = str", "combination_tf def _good_range(self) -> None: bad_range = { x for", "'')] = {self.start_state} star_accepts = self.accept_states | {star_start} return NFA(", "self.transition_function = transition_function self.body_states = body_states self.start_state = start_state self.accept_state", "the set of \"printable\" characters, which includes the standard ASCII", "function; 2. the set of accept states is not a", "4. a member of the transition function's range is not", "on a mac keyboard) means concatenation. You can leave concatentation", "char in EMPTIES: machine_stack.append(fit_empty(char)) elif char in alphabet: machine_stack.append(fit_symbol(char)) elif", "by concatenating any number of members of A. \"\"\" star_start", "{} combination_tf.update(new_self.transition_function) combination_tf.update(new_other.transition_function) return new_self, new_other, combination_tf def _good_range(self) ->", "from .base import ( _Base, _extract_states_alphabet, _error_message, _good_alphabet, _check_input )", "B be the language recognized by nfa2. `nfa1 | nfa2`", "of the state-set of nfa1 | nfa2 is the cardinality", "default value is string.printable *minus* parentheses, the vertical bar, the", "alphabet, message_singular=\"Alphabet cannot contain character {}.\", message_plural=\"Alphabet cannot contain characters", "The exception message will specify which of these above conditions", "FrozenSet[State]: def get_successor(state: State, sym: Symbol) -> AbstractSet[State]: self._transition_function =", "OPERATORS + PARENTHE + EMPTIES def _pre_process(regex: Regex, alphabet: AbstractSet[Symbol])", "def fit_empty(empty: Regex) -> NFA: tf: NfaTransitionFunction = { pair:", "regex2: Regex) -> Regex: if regex1 == \"Ø\": return regex2", "strings) as input; returns an NFA that recognises the language", "char not in alphabet | set(NOT_SYMBOLS): raise ValueError( f\"Regex contains", "regex[0] if first_char in OPERATORS: raise ValueError(f\"Regex cannot start with", "self.start_state for symbol in string: current_state = self.transition_function[(current_state, symbol)] return", "recognised by dfa1, and B be the language recognized by", "= frozenset() # This avoids a mypy bug. return empty.union(", "to improve on average simplicity, eventually. \"\"\" gnfa = self._gnfize()", "gnfa_tf[(state1, state2)] += '|' + symbol else: gnfa_tf[(state1, state2)] =", "\"\"\" gnfa = self._gnfize() while len(gnfa.states) > 2: gnfa =", "regex2 == 'Ø': return 'Ø' if regex1 == '€': return", "bug. return empty.union( *[frozenset(get_successor(state, symbol)) for state in state_set] )", "state2)] += '|' + symbol else: gnfa_tf[(state1, state2)] = symbol", "= _stringify( self._add_epsilons({self._start_state}) ) return DFA( transition_function=determinized_tf, start_state=determinized_start, accept_states=determinized_accept )", "the language recognised by dfa2. `dfa1 + dfa2` returns a", "like an epsilon); there's no other way to match, for", "language with it. For reaons related to the above, the", "start_state='q1', accept_states={'q2'} ) machine_stack: List[NFA] = [] operator_stack = ['sentinel']", "exception message will specify which of these six conditions things", "in the range of the transition \" \"function are not", "- {self.start_state, rip}: r3 = self.transition_function[(rip, state2)] r4 = self.transition_function[(state1,", "in self.transition_function.values() if not isinstance(x, collections.abc.Set) } _error_message( bad_set=bad_range, message_singular=(\"Value", "= self.states & other.states def add_empty_transitions( nfa1: NFA, nfa2: NFA", "or compare(char) > 0: operator_stack.append(char) else: while ( operator_stack[-1] not", "ValueError exception is the string contains symbols that aren't in", "of one-character strings) as input; returns an NFA that recognises", "`transition_function`: Mapping[Tuple[State, Symbol], State] - `start_state`: State - `accept_state`: AbstractSet[State]", "input DFAs having different alphabets. \"\"\" union_alphabet = self.alphabet |", "frozensets). def powerset(iterable: Iterable) -> Set[FrozenSet]: s = list(iterable) return", "( '•' if processed[-1] not in {'(', '|'} else ''", "of frozensets). def powerset(iterable: Iterable) -> Set[FrozenSet]: s = list(iterable)", "by dfa2. `dfa1 + dfa2` returns a DFA that recognises", "\"Ø\": return regex1 return f\"{regex1}|{regex2}\" rip = self.body_states.pop() r2 =", "= { pair: set() for pair in product({'q1'}, alphabet) }", "from the transition function; 6. the transition function is missing", "of B. Note that this `+` operation is not commutative.", "\" \"alphabet and not an accepted regex character.\" ) if", "for symbols in the alphabet except for '(', '')', '|',", "-> Tuple[FrozenSet[State], DfaTransitionFunction]: new_tf = dfa1.transition_function new_states = dfa1.states extra_symbols", "the transition function is the power-set of the nfa's state", "new_tf = dfa1.transition_function new_states = dfa1.states extra_symbols = dfa2.alphabet -", "dfa2. `dfa1 + dfa2` returns a DFA that recognises the", "and not one of the above veboten characters, 3. the", "symbols that aren't in the DFA's alphabet. \"\"\" _check_input(string=string, alphabet=self.alphabet)", "related reasons, the time complexity of this method is exponential", "`dfa1 | dfa2` returns a dfa that recognizes A union", "inferred from the transition function; * the set of accept", "gnfa = self._gnfize() while len(gnfa.states) > 2: gnfa = gnfa.reduce()", "} _error_message( bad_set=bad_range, message_singular=(\"Value {} in the range of the", "nfa1) self_tf, other_tf = add_empty_transitions(self, other) new_self = NFA( transition_function=self_tf,", "<= 0 ): binary_operate() operator_stack.append(char) elif char == '(': operator_stack.append(char)", "inferred from the transition function; * the range of the", "DFA. That makes for a relatively simple but, sadly, computationally", "of the regular expressions that this method takes as input", "{'', '0'} with the current syntax. (Quick challenge: it's not", "in B. This DFA operator is parasitic on the NFA", "alphabet=self.alphabet) current_states = self._add_epsilons({self.start_state}) for symbol in string: current_states =", "of the NFA. (My apologies to speakers of Scandinavian languages", "contains any of the verboten characters -- i.e.,`(` , `)`,", "def _good_range(self) -> None: bad_range = { x for x", "symbol)] = set() for state in self.accept_states: star_tf[(state, '')] =", "machine = operator_to_operation[operator_stack.pop()]( left_operand, right_operand ) machine_stack.append(machine) def compare(operator: Regex)", "* a member of the alphabet inferred from the transition", "( AbstractSet, Container, FrozenSet, Iterable, List, Mapping, MutableMapping, Optional, Set,", "regex: if char in EMPTIES: machine_stack.append(fit_empty(char)) elif char in alphabet:", "self.states _error_message( bad_set=bad_accept_states, message_singular=(\"Accept state {} is not a member", "= symbol gnfa_start = _get_new_state(self.states) gnfa_accept = _get_new_state(self.states | {gnfa_start})", "self.accept_states: gnfa_tf[(state, gnfa_accept)] = '€' for state1, state2 in product(", "+= ( '•' if processed[-1] not in {'(', '|'} else", "transition \" \"function is not in the fsa's state set.\"),", "elements of the tuples represent the nfa's states, and the", "state set.\") ) def _get_successors( self, *, state_set: AbstractSet[State], symbol:", "Regex) -> Regex: if regex in EMPTIES: return '€' if", "= self._combine(other) for state in new_self.accept_states: if (state, '') in", "state-set of nfa1 | nfa2 is the cardinality of the", "the language defined by that regular expression and that alphabet.", "is the empty set, if it is. You can define", "accepts(self, string: str) -> bool: \"\"\" Determines whether nfa accepts", "= {} determinized_accept = set() for (state_set, symbol) in product(state_sets,", "new_self.accept_states: if (state, '') in concat_tf: concat_tf[(state, '')].add(new_other.start_state) else: concat_tf[(state,", "dfas with large numbers of states. \"\"\" return (self.non_determinize() +", "the set of accept states is not a subset of", "concatentation implicit, as is usual; no need to write '•''", "average simplicity, eventually. \"\"\" gnfa = self._gnfize() while len(gnfa.states) >", "if union_main_scope(regex1): regex1 = f'({regex1})' if union_main_scope(regex2): regex2 = f'({regex2})'", "Actually, that's not quite right -- the default value is", "means concatenation. You can leave concatentation implicit, as is usual;", "regex does not have properly matching parentheses. \"\"\" operator_to_operation =", "given state-symbol pair is the empty set, if it is.", "it. \"\"\" def union_main_scope(regex: Regex) -> bool: paren_count = 0", "self.accept_states: star_tf[(state, '')] = {self.start_state} star_accepts = self.accept_states | {star_start}", "A. That regex string is liable to be much more", "and compare(char) <= 0 ): binary_operate() operator_stack.append(char) elif char ==", "return gnfa.transition_function[(gnfa.start_state, gnfa.accept_state)] def non_determinize(self) -> NFA: \"\"\" Convenience method", "cannot contain character {}.\", message_plural=\"Alphabet cannot contain characters {}.\" )", "States are strings and Symbols are one-char strings). The keys", "operator_stack.append(char) else: while ( operator_stack[-1] not in PARENTHE and compare(char)", "case that every pair of a state and a symbol", "to. But it gets used internally. '€' (option-shift-2) is used", "EMPTIES: return '€' if len(regex) == 1: return regex +", "return f\"{regex1}|{regex2}\" rip = self.body_states.pop() r2 = self.transition_function[(rip, rip)] reduced_tf", "\"\"\" Let A be the language recognised by dfa1, B", "for x in nfa.accept_states} return NFA( transition_function=copy_tf, start_state=copy_start, accept_states=copy_accept )", "elif char == '(': operator_stack.append(char) else: while operator_stack[-1] != '(':", "new_state in state_set: counter += 1 new_state = \"new_state\" +", "regex: if char in alphabet or char == '(': if", "prime(state: State): return state + '`' def copy(nfa: NFA) ->", "it's default value is string.printable -- i.e., the set of", "internally. '€' (option-shift-2) is used to match the empty string", "way to match, for instance, {'', '0'} with the current", "dfa1.states | {error_state} for symbol in union_alphabet: new_tf[(error_state, symbol)] =", "contain character {}.\", message_plural=\"Alphabet cannot contain characters {}.\" ) def", "transition function' keys implicitly define the nfa's state-set and alphabet;", "of A and b is an element of B. Note", "exception message will specify which of these above conditions things", "-= 1 elif char == '|': if paren_count == 0:", "a member of the fsa's \" \"state set.\"), message_plural=(\"Accept states", "'€' for state in self.accept_states: gnfa_tf[(state, gnfa_accept)] = '€' for", "'|', '*'} ) ) -> \"NFA\": \"\"\" Takes a regular", "if you are familiar with regular expressions. '•' (option-8 on", "import ( AbstractSet, Container, FrozenSet, Iterable, List, Mapping, MutableMapping, Optional,", "item in chain.from_iterable( combinations(s, r) for r in range(len(s)+1) )", "match the empty string (because it kind of looks like", "_gnfize(self) -> _GNFA: gnfa_tf: MutableGnfaTF = {} for state1, symbol", "PARENTHE and compare(char) <= 0 ): binary_operate() operator_stack.append(char) elif char", "not an accepted regex character.\" ) if char in OPERATORS", "bool: \"\"\" Determines whether nfa accepts input string. Will raise", "six conditions things triggered the exception, and which states/symbols are", "elements are the symbols in the alphabet. The domain of", "same language as the NFA instance. WARNING: The set of", "any of the verboten characters -- i.e.,`(` , `)`, `|`,", "nfa2. `nfa1 + nfa2` returns an nfa that recognizes A", "\"\"\" union_alphabet = self.alphabet | other.alphabet def maybe_add_state( dfa1: DFA,", "str: if not isinstance(states, collections.abc.Sequence): states = list(states) states.sort() return", "_FSA(_Base): def __init__( self, *, transition_function: FsaTransitionFunction, start_state: State, accept_states:", "three keyword arguments: - `transition_function`: Mapping[Tuple[State, Symbol], State] - `start_state`:", "gnfa_accept) def _good_range(self) -> None: transition_range = set(self.transition_function.values()) bad_range =", "Mapping[Tuple[State, Symbol], State] - `start_state`: State - `accept_state`: AbstractSet[State] (where", "= {prime(x) for x in nfa.accept_states} return NFA( transition_function=copy_tf, start_state=copy_start,", "current_states = self._transition(current_states, symbol) return not current_states & self.accept_states ==", "'|': if paren_count == 0: return True return False def", "self.states & other.states def add_empty_transitions( nfa1: NFA, nfa2: NFA )", "raise a ValueError exception is the string contains symbols that", "strings formed by concatenating any number of members of A.", ") @staticmethod def fit( regex: Regex, alphabet: AbstractSet[Symbol] = (", "char in regex: if char in alphabet or char ==", "don't `+` dfas with large numbers of states. \"\"\" return", "parenthesis occurs in regex without matching right \" \"parenthesis.\" )", "-> \"NFA\": \"\"\" Takes a regular expression and an alphabet", "set of states inferred from the transition function; 3. a", "self.transition_function[(current_state, symbol)] return current_state in self.accept_states def encode(self) -> Regex:", "of the transition \" \"function are not sets.\") ) transition_range:", "set() for (state_set, symbol) in product(state_sets, self._alphabet): determinzed_state = _stringify(state_set)", "-= 1 if paren_count < 0: raise ValueError( \"Right parenthesis", "-> \"DFA\": \"\"\"Returns a DFA that recognizes the same same", "PARENTHE or compare(char) > 0: operator_stack.append(char) else: while ( operator_stack[-1]", "not commutative. \"\"\" new_self, new_other, concat_tf = self._combine(other) for state", "'Ø': return 'Ø' if regex1 == '€': return regex2 if", "= state_set | epsilon_neighbours epsilon_neighbours = self._get_successors( state_set=epsilon_neighbours, symbol='' )", "returns a regex string that generates A. That regex string", "the source of the problem. \"\"\" def __or__(self, other: \"NFA\")", "(where States are strings and Symbols are one-char strings). The", "- `start_state`: State - `accept_states`: AbstractSet[State] (Where States are strings,", "be done; give it a go.) 'Ø' (option-shift-o) represents the", "+ '`' def copy(nfa: NFA) -> NFA: copy_tf = {}", "of accept states is not a subset of the set", "for symbol in self.alphabet: star_tf[(star_start, symbol)] = set() for state", "determinzed_state = _stringify(state_set) determinized_tf[(determinzed_state, symbol)] = _stringify( self._transition(state_set, symbol) )", "state is not a member of the set of states", "no problem with the input NFAs having different alphabets. \"\"\"", "not isinstance(states, collections.abc.Sequence): states = list(states) states.sort() return \"\".join(states) def", "inferred from the transition function is not a one-character string;", "symbol)] = _stringify( self._transition(state_set, symbol) ) if set(state_set) & self.accept_states:", "Regex: if regex1 == 'Ø' or regex2 == 'Ø': return", "{} in the range of the transition \" \"function are", "self._transition_function.get((state, sym), frozenset()) empty: FrozenSet[State] = frozenset() # This avoids", "bad_set=bad_range, message_singular=(\"State {} in the range of the transition \"", "_combine(self, other: \"NFA\") -> Tuple[\"NFA\", \"NFA\", MutableNfaTF]: def prime(state: State):", "for (state1, state2), symbol in product(state_pairs, union_alphabet): union_transition_function[(state1 + state2,", "} return DFA( transition_function=union_transition_function, start_state=union_start_state, accept_states=union_accept_states ) def __add__(self, other:", "is so very close to the empty-set symbol. If, by", "itertools import product, chain, combinations from string import printable from", "that aren't in the nfa's alphabet. \"\"\" _check_input(string=string, alphabet=self.alphabet) current_states", "alphabet) } tf[('q1', symbol)] = {'q2'} return NFA( transition_function=tf, start_state='q1',", "== 0: return True return False def regex_star(regex: Regex) ->", "State - `accept_states`: AbstractSet[State] (Where States are strings, and Symbols", "= error_state for symbol in extra_symbols: for state in dfa1.states:", "not be inferred to be a member of the alphabet", "right_operand ) machine_stack.append(machine) def compare(operator: Regex) -> int: return (", ") self._accept_states = accept_states self._states, self._alphabet = _extract_states_alphabet( self._transition_function.keys() )", "regex_concat(regex_concat(r1, regex_star(r2)), r3), r4 ) reduced_tf[(state1, state2)] = new_regex return", "set of states inferred from the transition function; * a", "a ValueError exception is the string contains symbols that aren't", "empty == 'Ø' else {'q1'} return NFA( transition_function=tf, start_state='q1', accept_states=accept_states", "state2 in self.states - {self.start_state, rip}: r3 = self.transition_function[(rip, state2)]", "def __init__( self, transition_function: GnfaTransitionFunction, body_states: Set[State], start_state: State, accept_state:", "in regex withour matching \" \"left parenthesis.\" ) processed +=", "form a concat b, where a is an element of", "keyword arguments: - `transition_function`: Mapping[Tuple[State, Symbol], AbstractSet[State]] - `start_state`: State", "new_other, concat_tf = self._combine(other) for state in new_self.accept_states: if (state,", "AbstractSet[Symbol]) -> Regex: first_char = regex[0] if first_char in OPERATORS:", "result back to a DFA. That makes for a relatively", "back to a DFA. That makes for a relatively simple", "| set(NOT_SYMBOLS): raise ValueError( f\"Regex contains character '{char}' that is", "add_empty_transitions( nfa1: NFA, nfa2: NFA ) -> Tuple[NfaTransitionFunction, NfaTransitionFunction]: def", "gnfa.accept_state)] def non_determinize(self) -> NFA: \"\"\" Convenience method that takes", ") from .base import ( _Base, _extract_states_alphabet, _error_message, _good_alphabet, _check_input", "new_self.alphabet | new_other.alphabet: union_tf[(union_start_state, symbol)] = set() union_accept_states = new_self.accept_states", "`nfa.self()` returns an nfa that recognizes A* -- i.e., the", "= f'({regex2})' return regex1 + regex2 def regex_union(regex1: Regex, regex2:", "new_tf = nfa1.transition_function extra_symbols = nfa2.alphabet - nfa1.alphabet if extra_symbols:", "= self.transition_function[(state1, symbol)] if (state1, state2) in gnfa_tf.keys(): gnfa_tf[(state1, state2)]", "empty string will not be inferred to be a member", "def fit( regex: Regex, alphabet: AbstractSet[Symbol] = ( set(printable) -", "Scandinavian languages for the last one; I am very against", "'|', '•', '*'] PARENTHE = ['(', ')'] EMPTIES = ['€',", "the NFA instance. WARNING: The set of DFA states has", "so very close to the empty-set symbol. If, by some", "processed += ( '•' if processed[-1] not in {'(', '|'}", "are true: * the start state is not a member", "new_tf[(error_state, symbol)] = error_state for symbol in extra_symbols: for state", "raise ValueError( \"Right parenthesis occurs in regex withour matching \"", "the nfa's state set --- i.e., the values of the", "state_set: counter += 1 new_state = \"new_state\" + str(counter) return", "\"\"\" new_self, new_other, concat_tf = self._combine(other) for state in new_self.accept_states:", "= list(states) states.sort() return \"\".join(states) def _get_new_state(state_set: Container) -> State:", "power-set of the nfa's state set --- i.e., the values", "= self.accept_states - self.states _error_message( bad_set=bad_accept_states, message_singular=(\"Accept state {} is", "else: while operator_stack[-1] != '(': binary_operate() operator_stack.pop() while len(operator_stack) >", "return self._accept_states def _well_defined(self) -> None: super()._well_defined() _good_alphabet(alphabet=self.alphabet, name=\"alphabet\") self._good_accept()", "Iterable) -> Set[FrozenSet]: s = list(iterable) return { frozenset(item) for", "0: raise ValueError( \"Left parenthesis occurs in regex without matching", "it a go.) 'Ø' (option-shift-o) represents the empty set; you", "from typing import ( AbstractSet, Container, FrozenSet, Iterable, List, Mapping,", "the transition function dictionary should be sets (or frozensets). The", "regex2: Regex) -> Regex: if regex1 == 'Ø' or regex2", "empty string in normal python regex syntax either, though it", "is exponential in the number of states of the NFA.", "the set of states inferred from the transition function; *", "for symbol in new_self.alphabet | new_other.alphabet: union_tf[(union_start_state, symbol)] = set()", "power-set of the set of NFA states. For related reasons,", "contains symbols that aren't in the DFA's alphabet. \"\"\" _check_input(string=string,", ") State = str Symbol = str Regex = str", "( self_tf[(state1, symbol)] + other_tf[(state2, symbol)] ) union_start_state = self.start_state", "A* -- i.e., the set of all strings formed by", "-- i.e., the set of \"printable\" characters, which includes the", "DFA that recognizes the same same language as the NFA", "the start state is not a member of the set", "operator followed by an \" \"operator; not cool.\" ) if", "self.states _error_message( bad_set=bad_range, message_singular=(\"State {} in the range of the", "function. Note that the empty string will not be inferred", "Mapping[Tuple[State, State], Regex] MutableGnfaTF = MutableMapping[Tuple[State, State], Regex] class _GNFA:", "elif char == ')': paren_count -= 1 elif char ==", "valid value; in fact, you are required to specify that", "self._accept_states def _well_defined(self) -> None: super()._well_defined() _good_alphabet(alphabet=self.alphabet, name=\"alphabet\") self._good_accept() self._good_domain(self.alphabet)", "in self.states - {self.start_state, rip}: r3 = self.transition_function[(rip, state2)] r4", "{ prime(x) for x in nfa.transition_function[(state, symbol)] } copy_start =", "without matching right \" \"parenthesis.\" ) return processed DfaTransitionFunction =", "can leave concatentation implicit, as is usual; no need to", "first_char in OPERATORS: raise ValueError(f\"Regex cannot start with '{first_char}'.\") processed", "used to match the empty string (because it kind of", "the symbols in the alphabet. The domain of the transition", "gnfa.transition_function[(gnfa.start_state, gnfa.accept_state)] def non_determinize(self) -> NFA: \"\"\" Convenience method that", "regex_concat(regex1: Regex, regex2: Regex) -> Regex: if regex1 == 'Ø'", "char in regex: if char == '(': paren_count += 1", "Symbol], AbstractSet[State]] - `start_state`: State - `accept_states`: AbstractSet[State] (Where States", "symbols in the alphabet of the NFA. (My apologies to", "some miracle, there is someone who cares about this, I", "the transition function; * the range of the transition function", "other: \"DFA\") -> \"DFA\": \"\"\" Let A be the language", "using the empty string in place of an alphabet symbol", ") def __add__(self, other: \"DFA\") -> \"DFA\": \"\"\" Let A", "is an element of B. Note that this `+` operation", "set.\") ) def accepts(self, string: str) -> bool: \"\"\" `my_dfa.accepts(\"some", "int: return ( OPERATORS.index(operator) - OPERATORS.index(operator_stack[-1]) ) regex = _pre_process(regex,", "`*`, `•`, `€` and `Ø`, 2. the input regex string", "paren_count > 0: raise ValueError( \"Left parenthesis occurs in regex", "instantiation if any of the following are true: 1. the", "of looks like an epsilon); there's no other way to", "with '{first_char}'.\") processed = '' paren_count = 0 for char", "`accept_states`: AbstractSet[State] (Where States are strings, and Symbols are one-char", "for instance, {'', '0'} with the current syntax. (Quick challenge:", "recognizes A concat B -- i.e., the language consisting of", "set of frozensets). def powerset(iterable: Iterable) -> Set[FrozenSet]: s =", "Mapping[ Tuple[State, Symbol], Union[State, AbstractSet[State]] ] class _FSA(_Base): def __init__(", "state_set: AbstractSet[State], symbol: Symbol ) -> FrozenSet[State]: def get_successor(state: State,", "{} is not a member of the fsa's \" \"state", "to parse the regex and build the NFA. The method", "a set of one-character strings) as input; returns an NFA", "concat_tf: concat_tf[(state, '')].add(new_other.start_state) else: concat_tf[(state, '')] = {new_other.start_state} return NFA(", "A union B. The states of dfa1 | dfa2 are", "B be the language recognized by dfa2. `dfa1 | dfa2`", "regex in EMPTIES: return '€' if len(regex) == 1: return", "optional; it's default value is string.printable -- i.e., the set", "gnfa_tf.keys(): gnfa_tf[(state1, state2)] += '|' + symbol else: gnfa_tf[(state1, state2)]", "in regex without matching right \" \"parenthesis.\" ) return processed", "That regex string is liable to be much more complicated", "member of the transition function's range is not a set;", "NFA.__add__ } _error_message( bad_set=set(NOT_SYMBOLS) & alphabet, message_singular=\"Alphabet cannot contain character", "fsa's state set.\") ) def _get_successors( self, *, state_set: AbstractSet[State],", "formed by concatenating any number of members of A. \"\"\"", ") def fit_empty(empty: Regex) -> NFA: tf: NfaTransitionFunction = {", "def prime(state: State): return state + '`' def copy(nfa: NFA)", "\"NFA\") -> Tuple[\"NFA\", \"NFA\", MutableNfaTF]: def prime(state: State): return state", "-> Regex: first_char = regex[0] if first_char in OPERATORS: raise", "in union_alphabet: new_tf[(error_state, symbol)] = error_state for symbol in extra_symbols:", "empty-set.) In the absence of parentheses, the order of operations", "- OPERATORS.index(operator_stack[-1]) ) regex = _pre_process(regex, alphabet) for char in", "> 0: operator_stack.append(char) else: while ( operator_stack[-1] not in PARENTHE", "Regex, alphabet: AbstractSet[Symbol]) -> Regex: first_char = regex[0] if first_char", "= accept_states self._states, self._alphabet = _extract_states_alphabet( self._transition_function.keys() ) self._well_defined() @property", "class _FSA(_Base): def __init__( self, *, transition_function: FsaTransitionFunction, start_state: State,", "= [] operator_stack = ['sentinel'] def binary_operate() -> None: right_operand", "accepts \"some string\", and `False` otherwise. Will raise a ValueError", ") return frozenset(state_set) def _transition(self, state_set: AbstractSet[State], symbol: Symbol): return", "{} in the range of the transition \" \"function is", "punctuation and white space. Actually, that's not quite right --", "the absence of parentheses, the order of operations is: `*`,", "char if paren_count > 0: raise ValueError( \"Left parenthesis occurs", "'')] = {self.start_state} for symbol in self.alphabet: star_tf[(star_start, symbol)] =", "operator_to_operation[operator_stack.pop()]( left_operand, right_operand ) machine_stack.append(machine) def compare(operator: Regex) -> int:", "time complexity of this method is exponential in the number", "class. Takes three keyword arguments: - `transition_function`: Mapping[Tuple[State, Symbol], AbstractSet[State]]", "you are required to specify that the successor set for", "mypy bug. return empty.union( *[frozenset(get_successor(state, symbol)) for state in state_set]", "fact, you are required to specify that the successor set", "machine_stack.pop() left_operand = machine_stack.pop() machine = operator_to_operation[operator_stack.pop()]( left_operand, right_operand )", "in it. \"\"\" def union_main_scope(regex: Regex) -> bool: paren_count =", "is not a set.\"), message_plural=(\"Values {} in the range of", "extra_symbols: for state in dfa1.states: new_tf[(state, symbol)] = error_state return", "function' keys implicitly define the nfa's state-set and alphabet; the", "`nfa1 | nfa2` returns an nfa that recognizes A union", "is the cardinality of the state set of nfa1 plus", "complicated than necessary; maybe I'll figure out how to improve", "states is not a subset of the set of states", "binary_operate() -> None: right_operand = machine_stack.pop() left_operand = machine_stack.pop() machine", "set() return new_tf return add_one_way(nfa1, nfa2), add_one_way(nfa2, nfa1) self_tf, other_tf", "if you don't want to. But it gets used internally.", "input string. Will raise a ValueError exception is the string", "return new_states, new_tf self_states, self_tf = maybe_add_state(self, other) other_states, other_tf", "NFA. The method will raise a ValueError exception if any", "character {}.\", message_plural=\"Alphabet cannot contain characters {}.\" ) def fit_empty(empty:", "char in regex: if char in EMPTIES: machine_stack.append(fit_empty(char)) elif char", "'(': operator_stack.append(char) else: while operator_stack[-1] != '(': binary_operate() operator_stack.pop() while", "collections.abc.Sequence): states = list(states) states.sort() return \"\".join(states) def _get_new_state(state_set: Container)", "or regex2 == 'Ø': return 'Ø' if regex1 == '€':", "range of the transition \" \"function are not in the", "\"NFA\": \"\"\" Takes a regular expression and an alphabet (i.e.,", "-- the default value is string.printable *minus* parentheses, the vertical", "recognised by nfa. `nfa.self()` returns an nfa that recognizes A*", "are the symbols in the alphabet. The domain of the", "represent the nfa's states, and the second elements are the", "nfa2 plus 1. There is no problem with the input", "Container, FrozenSet, Iterable, List, Mapping, MutableMapping, Optional, Set, Tuple, Union,", "product(nfa1.states, extra_symbols): new_tf[pair] = set() return new_tf return add_one_way(nfa1, nfa2),", "uses a version of Dijkstra's shunting yard algorithm to parse", "done; give it a go.) 'Ø' (option-shift-o) represents the empty", "intepreted as literals for symbols in the alphabet except for", "_transition(self, state_set: AbstractSet[State], symbol: Symbol): return self._add_epsilons(self._get_successors(state_set=state_set, symbol=symbol)) def accepts(self,", "the empty string in normal python regex syntax either, though", "return processed DfaTransitionFunction = Mapping[Tuple[State, Symbol], State] class DFA(_FSA): \"\"\"", "__init__( self, transition_function: GnfaTransitionFunction, body_states: Set[State], start_state: State, accept_state: State", "challenge: it's not totally obvious how to match the empty", "states {} are not members of the fsa's \" \"state", "the NFA. Don't determinize big NFAs. \"\"\" # powerset code", "self._accept_states = accept_states self._states, self._alphabet = _extract_states_alphabet( self._transition_function.keys() ) self._well_defined()", "case -- i.e., it is not the case that every", "if (state, '') in concat_tf: concat_tf[(state, '')].add(new_other.start_state) else: concat_tf[(state, '')]", "== '(': if len(processed) > 0: processed += ( '•'", "string contains a character not in the alphabet, and not", "transition function. Note that the empty string will not be", "to mean if you are familiar with regular expressions. '•'", "= str Regex = str FsaTransitionFunction = Mapping[ Tuple[State, Symbol],", "char == '(': if len(processed) > 0: processed += (", "binary_operate() operator_stack.append(char) elif char == '(': operator_stack.append(char) else: while operator_stack[-1]", "cool.\" ) if char == '(': paren_count += 1 if", "of the following conditions hold: 1. the alphabet contains any", "on instantiation if any of the following are true: 1.", "while len(operator_stack) > 1: binary_operate() return machine_stack.pop() OPERATORS = ['sentinel',", "while ( operator_stack[-1] not in PARENTHE and compare(char) <= 0", "NFA instance. \"\"\" nd_transition_function = { key: {value} for key,", "while epsilon_neighbours - state_set: state_set = state_set | epsilon_neighbours epsilon_neighbours", "-> FrozenSet[Symbol]: return self._alphabet @property def accept_states(self) -> AbstractSet[State]: return", "of the transition function is not a subset of the", "instantiation if any of th following are true: * the", "star_tf[(star_start, '')] = {self.start_state} for symbol in self.alphabet: star_tf[(star_start, symbol)]", "encode(self) -> Regex: \"\"\" Let A be the language accepted", "gnfa_tf: gnfa_tf[(state1, state2)] = 'Ø' return _GNFA(gnfa_tf, set(self.states), gnfa_start, gnfa_accept)", "inferred from the transition function; 3. a member of the", "string; * the transition function is missing a case --", "contains a character not in the alphabet, and not one", "self.start_state)] = '€' for state in self.accept_states: gnfa_tf[(state, gnfa_accept)] =", "a set.\"), message_plural=(\"Values {} in the range of the transition", "last one; I am very against English chauvinism, but your", "for state in new_self.accept_states: if (state, '') in concat_tf: concat_tf[(state,", "PARENTHE + EMPTIES def _pre_process(regex: Regex, alphabet: AbstractSet[Symbol]) -> Regex:", "def _gnfize(self) -> _GNFA: gnfa_tf: MutableGnfaTF = {} for state1,", "`False` otherwise. Will raise a ValueError exception is the string", "pairs of states from dfa1 and dfa2. There is no", "input regex string contains a character not in the alphabet,", "mean if you are familiar with regular expressions. '•' (option-8", "'€' and 'Ø'. The parentheses, vertical bar and star mean", "go.) 'Ø' (option-shift-o) represents the empty set; you can match", "nfa2), add_one_way(nfa2, nfa1) self_tf, other_tf = add_empty_transitions(self, other) new_self =", "DFA that recognises the set of all concatenations of strings", "MutableMapping[Tuple[State, Symbol], Set[State]] class NFA(_FSA): \"\"\" A nondeterministic finite automaton", "bar, the star symbol, and the tilde, for reasons that", "set() def determinize(self) -> \"DFA\": \"\"\"Returns a DFA that recognizes", "occurs in regex without matching right \" \"parenthesis.\" ) return", "self, *, state_set: AbstractSet[State], symbol: Symbol ) -> FrozenSet[State]: def", "= self.transition_function[(state1, rip)] for state2 in self.states - {self.start_state, rip}:", "NOT_SYMBOLS = OPERATORS + PARENTHE + EMPTIES def _pre_process(regex: Regex,", "if char == ')': paren_count -= 1 if paren_count <", "`start_state`: State - `accept_states`: AbstractSet[State] (Where States are strings, and", "= set() return new_tf return add_one_way(nfa1, nfa2), add_one_way(nfa2, nfa1) self_tf,", "does not have properly matching parentheses. \"\"\" operator_to_operation = {", "the input DFAs into NFAs, uses the NFA '+', then", "return NFA( transition_function=tf, start_state='q1', accept_states=accept_states ) def fit_symbol(symbol: Symbol) ->", "nfa1.transition_function extra_symbols = nfa2.alphabet - nfa1.alphabet if extra_symbols: for pair", "Let A be the language recognised by nfa. `nfa.self()` returns", "3. the input regex contain a binary operator followed by", "regex2 == \"Ø\": return regex1 return f\"{regex1}|{regex2}\" rip = self.body_states.pop()", ") -> Tuple[NfaTransitionFunction, NfaTransitionFunction]: def add_one_way(nfa1: NFA, nfa2: NFA) ->", "def reduce(self) -> \"_GNFA\": \"\"\" Output a GNFA equivalent to", "other_states, other_tf = maybe_add_state(other, self) state_pairs = product(self_states, other_states) union_transition_function", "union_tf[(union_start_state, symbol)] = set() union_accept_states = new_self.accept_states | new_other.accept_states return", "start_state=copy_start, accept_states=copy_accept ) overlap = self.states & other.states while overlap:", "right \" \"parenthesis.\" ) return processed DfaTransitionFunction = Mapping[Tuple[State, Symbol],", "i.e., it is not the case that every pair of", "@property def alphabet(self) -> FrozenSet[Symbol]: return self._alphabet @property def accept_states(self)", "copy_accept = {prime(x) for x in nfa.accept_states} return NFA( transition_function=copy_tf,", "aren't in the nfa's alphabet. \"\"\" _check_input(string=string, alphabet=self.alphabet) current_states =", "set; you can match to the empty language with it.", "most common punctuation and white space. Actually, that's not quite", "explicitly if you don't want to. But it gets used", "'' ) if char not in alphabet | set(NOT_SYMBOLS): raise", "nfa2: NFA) -> NfaTransitionFunction: new_tf = nfa1.transition_function extra_symbols = nfa2.alphabet", "epsilon_neighbours = self._get_successors( state_set=state_set, symbol='' ) while epsilon_neighbours - state_set:", "WARNING: The set of DFA states has the cardinality of", "of these above conditions things triggered the exception, and which", ".base import ( _Base, _extract_states_alphabet, _error_message, _good_alphabet, _check_input ) State", "{ frozenset(item) for item in chain.from_iterable( combinations(s, r) for r", "keyboard) means concatenation. You can leave concatentation implicit, as is", "`nfa1 + nfa2` returns an nfa that recognizes A concat", "in self.accept_states def encode(self) -> Regex: \"\"\" Let A be", "* the range of the transition function is not a", "standard python regular expresssions. All characters are intepreted as literals", "the form a concat b, where a is an element", "(state_set, symbol) in product(state_sets, self._alphabet): determinzed_state = _stringify(state_set) determinized_tf[(determinzed_state, symbol)]", "bad_range = { x for x in self.transition_function.values() if not", "* the start state is not a member of the", "fit( regex: Regex, alphabet: AbstractSet[Symbol] = ( set(printable) - {'(',", "A be the language recognised by nfa1, and B be", "def get_successor(state: State, sym: Symbol) -> AbstractSet[State]: self._transition_function = cast(", "the verboten characters -- i.e.,`(` , `)`, `|`, `*`, `•`,", "= machine_stack.pop() machine = operator_to_operation[operator_stack.pop()]( left_operand, right_operand ) machine_stack.append(machine) def", "accept_states(self) -> AbstractSet[State]: return self._accept_states def _well_defined(self) -> None: super()._well_defined()", "self.start_state + other.start_state union_accept_states = { _stringify(item) for item in", "Mapping[Tuple[State, Symbol], AbstractSet[State]] MutableNfaTF = MutableMapping[Tuple[State, Symbol], Set[State]] class NFA(_FSA):", "self.states, message_singular=(\"State {} in the range of the transition \"", "dfa1, and B be the language recognized by dfa2. `dfa1", "def maybe_add_state( dfa1: DFA, dfa2: DFA ) -> Tuple[FrozenSet[State], DfaTransitionFunction]:", "transition_function=union_tf, start_state=union_start_state, accept_states=union_accept_states ) def __add__(self, other: \"NFA\") -> \"NFA\":", "Let A be the language accepted by dfa. `dfa.encode()` returns", "-> bool: paren_count = 0 for char in regex: if", "-> FrozenSet[State]: epsilon_neighbours = self._get_successors( state_set=state_set, symbol='' ) while epsilon_neighbours", "union_accept_states = { _stringify(item) for item in ( set(product(self.accept_states, other_states))", "Regex] class _GNFA: def __init__( self, transition_function: GnfaTransitionFunction, body_states: Set[State],", "B. The cardinality of the state-set of nfa1 | nfa2", "other) new_self = NFA( transition_function=self_tf, start_state=self.start_state, accept_states=self.accept_states ) new_other =", "that the successor set for a given state-symbol pair is", "\"\"\" operator_to_operation = { '|': NFA.__or__, '•': NFA.__add__ } _error_message(", "Set, Tuple, Union, cast ) from .base import ( _Base,", "symbol in string: current_state = self.transition_function[(current_state, symbol)] return current_state in", "maybe_add_state(other, self) state_pairs = product(self_states, other_states) union_transition_function = {} for", "on average simplicity, eventually. \"\"\" gnfa = self._gnfize() while len(gnfa.states)", "Will raise a ValueError exception is the string contains symbols", "inferred from the transition function; 6. the transition function is", "of parentheses, the order of operations is: `*`, then `•`,", "if empty == 'Ø' else {'q1'} return NFA( transition_function=tf, start_state='q1',", "if char in EMPTIES: machine_stack.append(fit_empty(char)) elif char in alphabet: machine_stack.append(fit_symbol(char))", "or 4. the input regex does not have properly matching", "an operator, or 4. the input regex does not have", "the language recognised by dfa1, B be the language recognised", "'Ø' cannot be symbols in the alphabet of the NFA.", "State] class DFA(_FSA): \"\"\" A deterministic finite automaton class. Takes", "a binary operator followed by an operator, or 4. the", "in OPERATORS: raise ValueError(f\"Regex cannot start with '{first_char}'.\") processed =", "= {'q2'} return NFA( transition_function=tf, start_state='q1', accept_states={'q2'} ) machine_stack: List[NFA]", "else '' ) if char not in alphabet | set(NOT_SYMBOLS):", "chain.from_iterable( combinations(s, r) for r in range(len(s)+1) ) } state_sets", "DFA( transition_function=union_transition_function, start_state=union_start_state, accept_states=union_accept_states ) def __add__(self, other: \"DFA\") ->", "- self.states, message_singular=(\"State {} in the range of the transition", "want to. But it gets used internally. '€' (option-shift-2) is", "a DFA that recognises the set of all concatenations of", "of states inferred from the transition function; * the set", "\"Regex contains binary operator followed by an \" \"operator; not", "regex_star(regex: Regex) -> Regex: if regex in EMPTIES: return '€'", "then `•`, then `|`. This method uses a version of", "'(': paren_count += 1 if char == ')': paren_count -=", "state_set] ) def _add_epsilons(self, state_set: AbstractSet[State]) -> FrozenSet[State]: epsilon_neighbours =", "related to the above, the characters '(', ')', '|', '*',", "regex character.\" ) if char in OPERATORS and processed[-1] in", "syntax. (Quick challenge: it's not totally obvious how to match", "state in it. \"\"\" def union_main_scope(regex: Regex) -> bool: paren_count", "class _GNFA: def __init__( self, transition_function: GnfaTransitionFunction, body_states: Set[State], start_state:", "the standard ASCII letters and digits, and most common punctuation", "state2 in product( self.states | {gnfa_start}, self.states | {gnfa_accept} ):", "nfa2. `nfa1 | nfa2` returns an nfa that recognizes A", "product(state_pairs, union_alphabet): union_transition_function[(state1 + state2, symbol)] = ( self_tf[(state1, symbol)]", "_extract_states_alphabet( self._transition_function.keys() ) self._well_defined() @property def alphabet(self) -> FrozenSet[Symbol]: return", ") NfaTransitionFunction = Mapping[Tuple[State, Symbol], AbstractSet[State]] MutableNfaTF = MutableMapping[Tuple[State, Symbol],", "Symbol], State] class DFA(_FSA): \"\"\" A deterministic finite automaton class.", "right_operand = machine_stack.pop() left_operand = machine_stack.pop() machine = operator_to_operation[operator_stack.pop()]( left_operand,", "set(state_set) & self.accept_states: determinized_accept.add(determinzed_state) determinized_start = _stringify( self._add_epsilons({self._start_state}) ) return", "of the `transition_function` implicitly define the dfa's state-set and alphabet.", "literals for symbols in the alphabet except for '(', '')',", "string\", and `False` otherwise. Will raise a ValueError exception is", "the empty string will not be inferred to be a", "\"NFA\": \"\"\" Let A be the language recognised by nfa.", "with one less state in it. \"\"\" def union_main_scope(regex: Regex)", "the string contains symbols that aren't in the DFA's alphabet.", "the range of the transition function is not a subset", "new_other.accept_states return NFA( transition_function=union_tf, start_state=union_start_state, accept_states=union_accept_states ) def __add__(self, other:", "in nfa.accept_states} return NFA( transition_function=copy_tf, start_state=copy_start, accept_states=copy_accept ) overlap =", "NFA. (My apologies to speakers of Scandinavian languages for the", "language recognised by nfa. `nfa.self()` returns an nfa that recognizes", "successor set for a given state-symbol pair is the empty", "in PARENTHE or compare(char) > 0: operator_stack.append(char) else: while (", "ordered pairs of states from dfa1 and dfa2. There is", "'•', '€' and 'Ø'. The parentheses, vertical bar and star", "returns an nfa that recognizes A* -- i.e., the set", "symbol in new_self.alphabet | new_other.alphabet: union_tf[(union_start_state, symbol)] = set() union_accept_states", "The alphabet parameter is optional; it's default value is string.printable", "concat_tf[(state, '')].add(new_other.start_state) else: concat_tf[(state, '')] = {new_other.start_state} return NFA( transition_function=concat_tf,", "NFA that recognises the language defined by that regular expression", "r3 = self.transition_function[(rip, state2)] r4 = self.transition_function[(state1, state2)] new_regex =", "current_states & self.accept_states == set() def determinize(self) -> \"DFA\": \"\"\"Returns", "Regex: if regex in EMPTIES: return '€' if len(regex) ==", "cannot start with '{first_char}'.\") processed = '' paren_count = 0", "This avoids a mypy bug. return empty.union( *[frozenset(get_successor(state, symbol)) for", "transition_function=star_tf, start_state=star_start, accept_states=star_accepts ) @staticmethod def fit( regex: Regex, alphabet:", "whether nfa accepts input string. Will raise a ValueError exception", "A be the language recognised by dfa1, B be the", "False def regex_star(regex: Regex) -> Regex: if regex in EMPTIES:", "r3), r4 ) reduced_tf[(state1, state2)] = new_regex return _GNFA( reduced_tf,", "= copy(other) overlap = self.states & other.states def add_empty_transitions( nfa1:", "all strings formed by concatenating any number of members of", "followed by an \" \"operator; not cool.\" ) if char", "= self._combine(other) union_start_state = _get_new_state(new_self.states | new_other.states) union_tf[(union_start_state, '')] =", "the alphabet (and hence the checks below will work as", "\" \"function are not in the fsa's state set.\") )", "regex1 == '€': return regex2 if regex2 == '€': return", "`transition_function`: Mapping[Tuple[State, Symbol], AbstractSet[State]] - `start_state`: State - `accept_states`: AbstractSet[State]", "set(product(self.accept_states, other_states)) | set(product(self_states, other.accept_states)) ) } return DFA( transition_function=union_transition_function,", "gnfa = gnfa.reduce() return gnfa.transition_function[(gnfa.start_state, gnfa.accept_state)] def non_determinize(self) -> NFA:", "pair is the empty set, if it is. You can", "def __add__(self, other: \"DFA\") -> \"DFA\": \"\"\" Let A be", "of the form a concat b, where a is an", "and B be the language recognized by dfa2. `dfa1 |", "set of accept states is not a subset of the", ") -> \"NFA\": \"\"\" Takes a regular expression and an", "the above veboten characters, 3. the input regex contain a", "new_other.states) union_tf[(union_start_state, '')] = { new_self.start_state, new_other.start_state } for symbol", "implicitly define the dfa's state-set and alphabet. The class will", "f\"({regex})*\" def regex_concat(regex1: Regex, regex2: Regex) -> Regex: if regex1", "*[frozenset(get_successor(state, symbol)) for state in state_set] ) def _add_epsilons(self, state_set:", "the empty string (because it kind of looks like an", "it is. You can define epsilon-moves by using the empty", "def union_main_scope(regex: Regex) -> bool: paren_count = 0 for char", "As of now, the syntax of the regular expressions that", "that takes a DFA instance and returns an NFA instance.", "self._states, self._alphabet = _extract_states_alphabet( self._transition_function.keys() ) self._well_defined() @property def alphabet(self)", "one-char strings.) The transition function' keys implicitly define the nfa's", "union_transition_function = {} for (state1, state2), symbol in product(state_pairs, union_alphabet):", "f'({regex2})' return regex1 + regex2 def regex_union(regex1: Regex, regex2: Regex)", "recognizes the same same language as the NFA instance. WARNING:", "returns a DFA that recognises the set of all concatenations", "union B. The cardinality of the state-set of nfa1 |", "= new_regex return _GNFA( reduced_tf, self.body_states - {rip}, self.start_state, self.accept_state", "parameter is optional; it's default value is string.printable -- i.e.,", "if any of th following are true: * the start", "languages for the last one; I am very against English", "for key, value in self.transition_function.items() } return NFA( transition_function=nd_transition_function, start_state=self.start_state,", "def compare(operator: Regex) -> int: return ( OPERATORS.index(operator) - OPERATORS.index(operator_stack[-1])", "sets.\") ) transition_range: Set[Optional[AbstractSet[State]]] = set.union( *self.transition_function.values() ) _error_message( bad_set=transition_range", "function is not a one-character string; 4. a member of", "new_other.start_state } for symbol in new_self.alphabet | new_other.alphabet: union_tf[(union_start_state, symbol)]", "message_plural=(\"Values {} in the range of the transition \" \"function", "much more complicated than necessary; maybe I'll figure out how", "{gnfa_accept} ): if (state1, state2) not in gnfa_tf: gnfa_tf[(state1, state2)]", "x in nfa.accept_states} return NFA( transition_function=copy_tf, start_state=copy_start, accept_states=copy_accept ) overlap", "self.transition_function.keys(): state2 = self.transition_function[(state1, symbol)] if (state1, state2) in gnfa_tf.keys():", "r4 = self.transition_function[(state1, state2)] new_regex = regex_union( regex_concat(regex_concat(r1, regex_star(r2)), r3),", "start_state: State, accept_state: State ): self.transition_function = transition_function self.body_states =", "and most common punctuation and white space. Actually, that's not", "avoids a mypy bug. return empty.union( *[frozenset(get_successor(state, symbol)) for state", "of the set of strings of the form a concat", "def accepts(self, string: str) -> bool: \"\"\" Determines whether nfa", "'+', then converts the result back to a DFA. That", "new_self.accept_states | new_other.accept_states return NFA( transition_function=union_tf, start_state=union_start_state, accept_states=union_accept_states ) def", "self._good_domain(self.alphabet) def _good_accept(self) -> None: bad_accept_states = self.accept_states - self.states", "-> None: bad_accept_states = self.accept_states - self.states _error_message( bad_set=bad_accept_states, message_singular=(\"Accept", "the transition function is not a subset of the power", "the power-set of the set of NFA states. For related", "star_accepts = self.accept_states | {star_start} return NFA( transition_function=star_tf, start_state=star_start, accept_states=star_accepts", "be the language recognised by dfa1, B be the language", "the transition function. Note that the empty string will not", "star symbol, and the tilde, for reasons that I will", "pair of a state and a symbol is in the", "other.start_state union_accept_states = { _stringify(item) for item in ( set(product(self.accept_states,", "the values of the transition function dictionary should be sets", "speakers of Scandinavian languages for the last one; I am", "chauvinism, but your letter is so very close to the", "for symbol in extra_symbols: for state in dfa1.states: new_tf[(state, symbol)]", "start_state=other.start_state, accept_states=other.accept_states ) combination_tf = {} combination_tf.update(new_self.transition_function) combination_tf.update(new_other.transition_function) return new_self,", "= { prime(x) for x in nfa.transition_function[(state, symbol)] } copy_start", "same same language as the NFA instance. WARNING: The set", "alphabet: AbstractSet[Symbol] = ( set(printable) - {'(', ')', '|', '*'}", "someone who cares about this, I will change the symbol", "fit_empty(empty: Regex) -> NFA: tf: NfaTransitionFunction = { pair: set()", "{rip}, self.start_state, self.accept_state ) NfaTransitionFunction = Mapping[Tuple[State, Symbol], AbstractSet[State]] MutableNfaTF", "char == ')': paren_count -= 1 elif char == '|':", "the default value is string.printable *minus* parentheses, the vertical bar,", "not have properly matching parentheses. \"\"\" operator_to_operation = { '|':", "MutableMapping, Optional, Set, Tuple, Union, cast ) from .base import", "the dfa's state-set and alphabet. The class will raise a", "if len(processed) > 0: processed += ( '•' if processed[-1]", "= MutableMapping[Tuple[State, Symbol], Set[State]] class NFA(_FSA): \"\"\" A nondeterministic finite", "accepted by dfa. `dfa.encode()` returns a regex string that generates", "raise NotImplementedError GnfaTransitionFunction = Mapping[Tuple[State, State], Regex] MutableGnfaTF = MutableMapping[Tuple[State,", "new_self = NFA( transition_function=self_tf, start_state=self.start_state, accept_states=self.accept_states ) new_other = NFA(", "set, if it is. You can define epsilon-moves by using", "f\"Regex contains character '{char}' that is not in \" \"alphabet", "= 1 new_state = 'new_state1' while new_state in state_set: counter", "message will specify which of these six conditions things triggered", "a ValueError exception if any of the following conditions hold:", "(option-shift-2) is used to match the empty string (because it", "string in normal python regex syntax either, though it can", "language recognised by dfa1, and B be the language recognized", "state2)] = 'Ø' return _GNFA(gnfa_tf, set(self.states), gnfa_start, gnfa_accept) def _good_range(self)", "{error_state} for symbol in union_alphabet: new_tf[(error_state, symbol)] = error_state for", "nfa.transition_function.keys(): copy_tf[(prime(state), symbol)] = { prime(x) for x in nfa.transition_function[(state,", "rip)] reduced_tf = {} for state1 in self.states - {self.accept_state,", "'•'' explicitly if you don't want to. But it gets", "Regex, alphabet: AbstractSet[Symbol] = ( set(printable) - {'(', ')', '|',", "bool: \"\"\" `my_dfa.accepts(\"some string\")` returns `True` if my_dfa accepts \"some", "hold: 1. the alphabet contains any of the verboten characters", "`•`, then `|`. This method uses a version of Dijkstra's", "returns `True` if my_dfa accepts \"some string\", and `False` otherwise.", "def _good_range(self) -> None: transition_range = set(self.transition_function.values()) bad_range = transition_range", "symbol)] if (state1, state2) in gnfa_tf.keys(): gnfa_tf[(state1, state2)] += '|'", "return state + '`' def copy(nfa: NFA) -> NFA: copy_tf", "for (state_set, symbol) in product(state_sets, self._alphabet): determinzed_state = _stringify(state_set) determinized_tf[(determinzed_state,", "'*', '•', '€' and 'Ø'. The parentheses, vertical bar and", "simple but, sadly, computationally expensive algorith. For that reason, I", "returns an NFA that recognises the language defined by that", "is a valid value; in fact, you are required to", "OPERATORS and processed[-1] in {'|', '•'}: raise ValueError( \"Regex contains", "very close to the empty-set symbol. If, by some miracle,", "return DFA( transition_function=determinized_tf, start_state=determinized_start, accept_states=determinized_accept ) def star(self) -> \"NFA\":", "if extra_symbols: error_state = _get_new_state(dfa1.states) new_states = dfa1.states | {error_state}", "that this `+` operation is not commutative. \"\"\" new_self, new_other,", "symbol for empty-set.) In the absence of parentheses, the order", "`Ø`, 2. the input regex string contains a character not", "str) -> bool: \"\"\" `my_dfa.accepts(\"some string\")` returns `True` if my_dfa", "OPERATORS: raise ValueError(f\"Regex cannot start with '{first_char}'.\") processed = ''", "body_states self.start_state = start_state self.accept_state = accept_state self.states = (", "AbstractSet[State]] ] class _FSA(_Base): def __init__( self, *, transition_function: FsaTransitionFunction,", "Regex: first_char = regex[0] if first_char in OPERATORS: raise ValueError(f\"Regex", "one; I am very against English chauvinism, but your letter", "'{first_char}'.\") processed = '' paren_count = 0 for char in", "def __add__(self, other: \"NFA\") -> \"NFA\": \"\"\" Let A be", "return self._add_epsilons(self._get_successors(state_set=state_set, symbol=symbol)) def accepts(self, string: str) -> bool: \"\"\"", "state-set and alphabet; the first elements of the tuples represent", "how to improve on average simplicity, eventually. \"\"\" gnfa =", "my_dfa accepts \"some string\", and `False` otherwise. Will raise a", "of the following are true: 1. the start state is", "= MutableMapping[Tuple[State, State], Regex] class _GNFA: def __init__( self, transition_function:", "(and hence the checks below will work as you would", "the transition function is missing cases -- i.e., it is", "transition \" \"function are not in the fsa's state set.\")", "of the transition \" \"function are not in the fsa's", "AbstractSet[State] (where States are strings and Symbols are one-char strings).", "make the return a set of frozensets). def powerset(iterable: Iterable)", "gnfa_tf[(gnfa_start, self.start_state)] = '€' for state in self.accept_states: gnfa_tf[(state, gnfa_accept)]", "for symbol in string: current_states = self._transition(current_states, symbol) return not", ") new_other = NFA( transition_function=other_tf, start_state=other.start_state, accept_states=other.accept_states ) combination_tf =", "accept states is not a subset of the set of", "in {'|', '•'}: raise ValueError( \"Regex contains binary operator followed", "what you'd expect them to mean if you are familiar", "cares about this, I will change the symbol for empty-set.)", "overlap = self.states & other.states def add_empty_transitions( nfa1: NFA, nfa2:", "in product(state_sets, self._alphabet): determinzed_state = _stringify(state_set) determinized_tf[(determinzed_state, symbol)] = _stringify(", "f\"{regex1}|{regex2}\" rip = self.body_states.pop() r2 = self.transition_function[(rip, rip)] reduced_tf =", "a ValueError exception on instantiation if any of the following", "recipe, from # https://docs.python.org/3/library/itertools.html#recipes # (minor modification to make the", "1 elif char == '|': if paren_count == 0: return", "machine_stack.pop() OPERATORS = ['sentinel', '|', '•', '*'] PARENTHE = ['(',", "improve on average simplicity, eventually. \"\"\" gnfa = self._gnfize() while", "= self.transition_function star_tf[(star_start, '')] = {self.start_state} for symbol in self.alphabet:", "instance and returns an NFA instance. \"\"\" nd_transition_function = {", "cardinality of the power-set of the set of NFA states.", "= dfa1.transition_function new_states = dfa1.states extra_symbols = dfa2.alphabet - dfa1.alphabet", "> 0: processed += ( '•' if processed[-1] not in", "str Symbol = str Regex = str FsaTransitionFunction = Mapping[", "the order of operations is: `*`, then `•`, then `|`.", "new_self, new_other, union_tf = self._combine(other) union_start_state = _get_new_state(new_self.states | new_other.states)", "ValueError( \"Regex contains binary operator followed by an \" \"operator;", "new_other, combination_tf def _good_range(self) -> None: bad_range = { x", "self.transition_function[(rip, state2)] r4 = self.transition_function[(state1, state2)] new_regex = regex_union( regex_concat(regex_concat(r1,", "symbol)] = error_state return new_states, new_tf self_states, self_tf = maybe_add_state(self,", "if union_main_scope(regex2): regex2 = f'({regex2})' return regex1 + regex2 def", "in A with strings in B. This DFA operator is", "For reaons related to the above, the characters '(', ')',", "self._transition_function ) return self._transition_function.get((state, sym), frozenset()) empty: FrozenSet[State] = frozenset()", "if first_char in OPERATORS: raise ValueError(f\"Regex cannot start with '{first_char}'.\")", "is not in \" \"alphabet and not an accepted regex", "keyword arguments: - `transition_function`: Mapping[Tuple[State, Symbol], State] - `start_state`: State", "`dfa1 + dfa2` returns a DFA that recognises the set", "+= 1 if char == ')': paren_count -= 1 if", "error_state return new_states, new_tf self_states, self_tf = maybe_add_state(self, other) other_states,", "regex = _pre_process(regex, alphabet) for char in regex: if char", "class. Takes three keyword arguments: - `transition_function`: Mapping[Tuple[State, Symbol], State]", "return NFA( transition_function=copy_tf, start_state=copy_start, accept_states=copy_accept ) overlap = self.states &", "self.start_state, self.accept_state ) NfaTransitionFunction = Mapping[Tuple[State, Symbol], AbstractSet[State]] MutableNfaTF =", "frozenset(item) for item in chain.from_iterable( combinations(s, r) for r in", "the alphabet, and not one of the above veboten characters,", "# (minor modification to make the return a set of", "Tuple, Union, cast ) from .base import ( _Base, _extract_states_alphabet,", "copy_tf = {} for state, symbol in nfa.transition_function.keys(): copy_tf[(prime(state), symbol)]", "star_start = _get_new_state(self.states) star_tf = self.transition_function star_tf[(star_start, '')] = {self.start_state}", "{ key: {value} for key, value in self.transition_function.items() } return", "')': paren_count -= 1 if paren_count < 0: raise ValueError(", "value is string.printable *minus* parentheses, the vertical bar, the star", "then `|`. This method uses a version of Dijkstra's shunting", "bad_set=bad_accept_states, message_singular=(\"Accept state {} is not a member of the", "of the transition function dictionary should be sets (or frozensets).", "set of states inferred from the transition function; 2. the", "_good_range(self): raise NotImplementedError GnfaTransitionFunction = Mapping[Tuple[State, State], Regex] MutableGnfaTF =", "self.transition_function[(rip, rip)] reduced_tf = {} for state1 in self.states -", "cast( NfaTransitionFunction, self._transition_function ) return self._transition_function.get((state, sym), frozenset()) empty: FrozenSet[State]", "and build the NFA. The method will raise a ValueError", "= regex_union( regex_concat(regex_concat(r1, regex_star(r2)), r3), r4 ) reduced_tf[(state1, state2)] =", "the transition function is not a one-character string; 4. a", "message_plural=(\"States {} in the range of the transition \" \"function", "to the empty language with it. For reaons related to", "= prime(nfa.start_state) copy_accept = {prime(x) for x in nfa.accept_states} return", "self.states & other.states while overlap: other = copy(other) overlap =", "self.transition_function[(state1, symbol)] if (state1, state2) in gnfa_tf.keys(): gnfa_tf[(state1, state2)] +=", "an element of A and b is an element of", "regex2 == '€': return regex1 if union_main_scope(regex1): regex1 = f'({regex1})'", "import product, chain, combinations from string import printable from typing", "th following are true: * the start state is not", "(self.non_determinize() + other.non_determinize()).determinize() def _gnfize(self) -> _GNFA: gnfa_tf: MutableGnfaTF =", "accepts(self, string: str) -> bool: \"\"\" `my_dfa.accepts(\"some string\")` returns `True`", "if my_dfa accepts \"some string\", and `False` otherwise. Will raise", "States are strings, and Symbols are one-char strings.) The transition", "set is a valid value; in fact, you are required", "function is not a subset of the power set of", "be inferred to be a member of the alphabet (and", "MutableNfaTF]: def prime(state: State): return state + '`' def copy(nfa:", "very against English chauvinism, but your letter is so very", "concat_tf[(state, '')] = {new_other.start_state} return NFA( transition_function=concat_tf, start_state=new_self.start_state, accept_states=new_other.accept_states )", "NFA, nfa2: NFA ) -> Tuple[NfaTransitionFunction, NfaTransitionFunction]: def add_one_way(nfa1: NFA,", ") combination_tf = {} combination_tf.update(new_self.transition_function) combination_tf.update(new_other.transition_function) return new_self, new_other, combination_tf", "dictionary should be sets (or frozensets). The empty set is", "not in the alphabet, and not one of the above", "are one-char strings.) The transition function' keys implicitly define the", "= ['sentinel'] def binary_operate() -> None: right_operand = machine_stack.pop() left_operand", "= Mapping[Tuple[State, Symbol], State] class DFA(_FSA): \"\"\" A deterministic finite", "set --- i.e., the values of the transition function dictionary", "which includes the standard ASCII letters and digits, and most", "B. This DFA operator is parasitic on the NFA operator;", "match, for instance, {'', '0'} with the current syntax. (Quick", "'Ø'] NOT_SYMBOLS = OPERATORS + PARENTHE + EMPTIES def _pre_process(regex:", "https://docs.python.org/3/library/itertools.html#recipes # (minor modification to make the return a set", "of all strings formed by concatenating any number of members", "states inferred from the transition function; 3. a member of", ") def _stringify(states: Iterable[State]) -> str: if not isinstance(states, collections.abc.Sequence):", "characters -- i.e.,`(` , `)`, `|`, `*`, `•`, `€` and", "necessary; maybe I'll figure out how to improve on average", "accept_states: AbstractSet[State] ): super().__init__( transition_function=transition_function, start_state=start_state ) self._accept_states = accept_states", "digits, and most common punctuation and white space. Actually, that's", "But it gets used internally. '€' (option-shift-2) is used to", "of Scandinavian languages for the last one; I am very", "that reason, I recommend you don't `+` dfas with large", "language accepted by dfa. `dfa.encode()` returns a regex string that", "'Ø' else {'q1'} return NFA( transition_function=tf, start_state='q1', accept_states=accept_states ) def", "normal python regex syntax either, though it can be done;", "\"function is not in the fsa's state set.\"), message_plural=(\"States {}", "transition_function: FsaTransitionFunction, start_state: State, accept_states: AbstractSet[State] ): super().__init__( transition_function=transition_function, start_state=start_state", "that every pair of a state and a symbol is", "following conditions hold: 1. the alphabet contains any of the", "if paren_count == 0: return True return False def regex_star(regex:", "if any of the following conditions hold: 1. the alphabet", "| nfa2` returns an nfa that recognizes A union B.", "For related reasons, the time complexity of this method is", "not in alphabet | set(NOT_SYMBOLS): raise ValueError( f\"Regex contains character", "nfa that recognizes A concat B -- i.e., the language", "state in new_self.accept_states: if (state, '') in concat_tf: concat_tf[(state, '')].add(new_other.start_state)", "current_state = self.transition_function[(current_state, symbol)] return current_state in self.accept_states def encode(self)", "start_state=determinized_start, accept_states=determinized_accept ) def star(self) -> \"NFA\": \"\"\" Let A", "expressions. '•' (option-8 on a mac keyboard) means concatenation. You", "the input DFAs having different alphabets. \"\"\" union_alphabet = self.alphabet", "inferred from the transition function; 2. the set of accept", "return a set of frozensets). def powerset(iterable: Iterable) -> Set[FrozenSet]:", "symbol gnfa_start = _get_new_state(self.states) gnfa_accept = _get_new_state(self.states | {gnfa_start}) gnfa_tf[(gnfa_start,", "= self.start_state for symbol in string: current_state = self.transition_function[(current_state, symbol)]", "): self.transition_function = transition_function self.body_states = body_states self.start_state = start_state", "containing DFA and NFA public classes \"\"\" import collections.abc from", "The domain of the transition function is the power-set of", "== 'Ø' or regex2 == 'Ø': return 'Ø' if regex1", "states has the cardinality of the power-set of the set", "from the transition function; 3. a member of the alphabet", "raise ValueError(f\"Regex cannot start with '{first_char}'.\") processed = '' paren_count", "A with strings in B. This DFA operator is parasitic", "other = copy(other) overlap = self.states & other.states def add_empty_transitions(", "nfa2 is the cardinality of the state set of nfa1", "epsilon_neighbours - state_set: state_set = state_set | epsilon_neighbours epsilon_neighbours =", "\"\"\" Let A be the language recognised by dfa1, and", "rip)] for state2 in self.states - {self.start_state, rip}: r3 =", "one-character string; 4. a member of the transition function's range", "= str Symbol = str Regex = str FsaTransitionFunction =", "simpler than the standard python regular expresssions. All characters are", "conditions hold: 1. the alphabet contains any of the verboten", "the transition function is not a one-character string; * the", "the NFA. (My apologies to speakers of Scandinavian languages for", "OPERATORS = ['sentinel', '|', '•', '*'] PARENTHE = ['(', ')']", "as is usual; no need to write '•'' explicitly if", "def accepts(self, string: str) -> bool: \"\"\" `my_dfa.accepts(\"some string\")` returns", "of the alphabet inferred from the transition function is not", "message_singular=\"Alphabet cannot contain character {}.\", message_plural=\"Alphabet cannot contain characters {}.\"", "self.accept_states: determinized_accept.add(determinzed_state) determinized_start = _stringify( self._add_epsilons({self._start_state}) ) return DFA( transition_function=determinized_tf,", "reduced_tf = {} for state1 in self.states - {self.accept_state, rip}:", "instance. WARNING: The set of DFA states has the cardinality", "NFA.__or__, '•': NFA.__add__ } _error_message( bad_set=set(NOT_SYMBOLS) & alphabet, message_singular=\"Alphabet cannot", "the state-set of nfa2 plus 1. There is no problem", "- {self.accept_state, rip}: r1 = self.transition_function[(state1, rip)] for state2 in", "Symbol) -> AbstractSet[State]: self._transition_function = cast( NfaTransitionFunction, self._transition_function ) return", "alphabet (i.e., a set of one-character strings) as input; returns", "{self.start_state, rip}: r3 = self.transition_function[(rip, state2)] r4 = self.transition_function[(state1, state2)]", "+ nfa2` returns an nfa that recognizes A concat B", "the transition function. The exception message will specify which of", "empty string (because it kind of looks like an epsilon);", "the transition function; * a member of the alphabet inferred", ") machine_stack.append(machine) def compare(operator: Regex) -> int: return ( OPERATORS.index(operator)", "current_state = self.start_state for symbol in string: current_state = self.transition_function[(current_state,", "copy_tf[(prime(state), symbol)] = { prime(x) for x in nfa.transition_function[(state, symbol)]", "accept_state: State ): self.transition_function = transition_function self.body_states = body_states self.start_state", "self.accept_states def encode(self) -> Regex: \"\"\" Let A be the", "cardinality of the state set of nfa1 plus the cardinality", "} copy_start = prime(nfa.start_state) copy_accept = {prime(x) for x in", "the alphabet inferred from the transition function is not a", "state_set | epsilon_neighbours epsilon_neighbours = self._get_successors( state_set=epsilon_neighbours, symbol='' ) return", "alphabet, and not one of the above veboten characters, 3.", "\"printable\" characters, which includes the standard ASCII letters and digits,", "state2) in gnfa_tf.keys(): gnfa_tf[(state1, state2)] += '|' + symbol else:", "char == '|': if paren_count == 0: return True return", "string: current_state = self.transition_function[(current_state, symbol)] return current_state in self.accept_states def", "-> \"DFA\": \"\"\" Let A be the language recognised by", "In the absence of parentheses, the order of operations is:", "string.printable *minus* parentheses, the vertical bar, the star symbol, and", "if char in alphabet or char == '(': if len(processed)", "(because it kind of looks like an epsilon); there's no", "\"\"\" Convenience method that takes a DFA instance and returns", "\"\"\" Let A be the language accepted by dfa. `dfa.encode()`", "that the empty string will not be inferred to be", "state {} is not a member of the fsa's \"", "gnfa_tf[(state, gnfa_accept)] = '€' for state1, state2 in product( self.states", "defined by that regular expression and that alphabet. The alphabet", "[] operator_stack = ['sentinel'] def binary_operate() -> None: right_operand =", "instance, {'', '0'} with the current syntax. (Quick challenge: it's", "['(', ')'] EMPTIES = ['€', 'Ø'] NOT_SYMBOLS = OPERATORS +", "in self.accept_states: gnfa_tf[(state, gnfa_accept)] = '€' for state1, state2 in", "default value is string.printable -- i.e., the set of \"printable\"", "match to the empty language with it. For reaons related", "`)`, `|`, `*`, `•`, `€` and `Ø`, 2. the input", "while overlap: other = copy(other) overlap = self.states & other.states", "an accepted regex character.\" ) if char in OPERATORS and", "return regex1 if union_main_scope(regex1): regex1 = f'({regex1})' if union_main_scope(regex2): regex2", "\"Left parenthesis occurs in regex without matching right \" \"parenthesis.\"", "language recognized by nfa2. `nfa1 + nfa2` returns an nfa", "\" \"left parenthesis.\" ) processed += char if paren_count >", "other_states) union_transition_function = {} for (state1, state2), symbol in product(state_pairs,", "computationally expensive algorith. For that reason, I recommend you don't", "= self.transition_function[(rip, state2)] r4 = self.transition_function[(state1, state2)] new_regex = regex_union(", "current_states = self._add_epsilons({self.start_state}) for symbol in string: current_states = self._transition(current_states,", "= self._gnfize() while len(gnfa.states) > 2: gnfa = gnfa.reduce() return", "will raise a ValueError exception if any of the following", "state + '`' def copy(nfa: NFA) -> NFA: copy_tf =", "State, accept_states: AbstractSet[State] ): super().__init__( transition_function=transition_function, start_state=start_state ) self._accept_states =", "function is not a subset of the set of states", "x in nfa.transition_function[(state, symbol)] } copy_start = prime(nfa.start_state) copy_accept =", "set of all strings formed by concatenating any number of", "dfa1 and dfa2. There is no problem with the input", "language recognised by nfa1, and B be the language recognized", "def non_determinize(self) -> NFA: \"\"\" Convenience method that takes a", "input DFAs into NFAs, uses the NFA '+', then converts", "_GNFA( reduced_tf, self.body_states - {rip}, self.start_state, self.accept_state ) NfaTransitionFunction =", "message_singular=(\"Value {} in the range of the transition \" \"function", "self._good_accept() self._good_domain(self.alphabet) def _good_accept(self) -> None: bad_accept_states = self.accept_states -", "first elements of the tuples represent the nfa's states, and", "AbstractSet[Symbol] = ( set(printable) - {'(', ')', '|', '*'} )", "-> bool: \"\"\" `my_dfa.accepts(\"some string\")` returns `True` if my_dfa accepts", "for state in self.accept_states: star_tf[(state, '')] = {self.start_state} star_accepts =", "write '•'' explicitly if you don't want to. But it", "string import printable from typing import ( AbstractSet, Container, FrozenSet,", "transition_function=transition_function, start_state=start_state ) self._accept_states = accept_states self._states, self._alphabet = _extract_states_alphabet(", "['sentinel'] def binary_operate() -> None: right_operand = machine_stack.pop() left_operand =", "not in the fsa's state set.\") ) def _get_successors( self,", "NotImplementedError GnfaTransitionFunction = Mapping[Tuple[State, State], Regex] MutableGnfaTF = MutableMapping[Tuple[State, State],", "transition_function=nd_transition_function, start_state=self.start_state, accept_states=self.accept_states ) def _stringify(states: Iterable[State]) -> str: if", "maybe_add_state( dfa1: DFA, dfa2: DFA ) -> Tuple[FrozenSet[State], DfaTransitionFunction]: new_tf", "the language consisting of the set of strings of the", "raise a ValueError exception on instantiation if any of the", "mean what you'd expect them to mean if you are", "transition function; 3. a member of the alphabet inferred from", "== \"Ø\": return regex1 return f\"{regex1}|{regex2}\" rip = self.body_states.pop() r2", "characters are intepreted as literals for symbols in the alphabet", "= machine_stack[-1].star() elif char in OPERATORS: if operator_stack[-1] in PARENTHE", "change the symbol for empty-set.) In the absence of parentheses,", "NFA operator; it converts the input DFAs into NFAs, uses", "self.states | {gnfa_accept} ): if (state1, state2) not in gnfa_tf:", "= add_empty_transitions(self, other) new_self = NFA( transition_function=self_tf, start_state=self.start_state, accept_states=self.accept_states )", "{} for state, symbol in nfa.transition_function.keys(): copy_tf[(prime(state), symbol)] = {", "\"\"\" _check_input(string=string, alphabet=self.alphabet) current_states = self._add_epsilons({self.start_state}) for symbol in string:", "if set(state_set) & self.accept_states: determinized_accept.add(determinzed_state) determinized_start = _stringify( self._add_epsilons({self._start_state}) )", "EMPTIES: machine_stack.append(fit_empty(char)) elif char in alphabet: machine_stack.append(fit_symbol(char)) elif char ==", "states from dfa1 and dfa2. There is no problem with", "ValueError exception on instantiation if any of the following are", "start_state=self.start_state, accept_states=self.accept_states ) def _stringify(states: Iterable[State]) -> str: if not", "set for a given state-symbol pair is the empty set,", "function's range is not a set; 5. the range of", "NFA( transition_function=nd_transition_function, start_state=self.start_state, accept_states=self.accept_states ) def _stringify(states: Iterable[State]) -> str:", "'`' def copy(nfa: NFA) -> NFA: copy_tf = {} for", "by dfa1, B be the language recognised by dfa2. `dfa1", "_add_epsilons(self, state_set: AbstractSet[State]) -> FrozenSet[State]: epsilon_neighbours = self._get_successors( state_set=state_set, symbol=''", "def _pre_process(regex: Regex, alphabet: AbstractSet[Symbol]) -> Regex: first_char = regex[0]", "are the source of the problem. \"\"\" def __or__(self, other:", "of states inferred from the transition function; * a member", "other_tf = add_empty_transitions(self, other) new_self = NFA( transition_function=self_tf, start_state=self.start_state, accept_states=self.accept_states", "an \" \"operator; not cool.\" ) if char == '(':", "set of states inferred from the transition function; * the", "a case -- i.e., it is not the case that", "The states of dfa1 | dfa2 are ordered pairs of", "binary operator followed by an operator, or 4. the input", "is very simple -- much simpler than the standard python", "Dijkstra's shunting yard algorithm to parse the regex and build", "one of the above veboten characters, 3. the input regex", "to write '•'' explicitly if you don't want to. But", "isinstance(x, collections.abc.Set) } _error_message( bad_set=bad_range, message_singular=(\"Value {} in the range", "Symbol ) -> FrozenSet[State]: def get_successor(state: State, sym: Symbol) ->", "gnfa_accept)] = '€' for state1, state2 in product( self.states |", "a set of frozensets). def powerset(iterable: Iterable) -> Set[FrozenSet]: s", "& other.states def add_empty_transitions( nfa1: NFA, nfa2: NFA ) ->", "cannot be symbols in the alphabet of the NFA. (My", "1: binary_operate() return machine_stack.pop() OPERATORS = ['sentinel', '|', '•', '*']", "def regex_star(regex: Regex) -> Regex: if regex in EMPTIES: return", "the alphabet. The domain of the transition function is the", "NFA( transition_function=tf, start_state='q1', accept_states=accept_states ) def fit_symbol(symbol: Symbol) -> NFA:", "state-set of nfa2 plus 1. There is no problem with", "regex2 def regex_union(regex1: Regex, regex2: Regex) -> Regex: if regex1", "self.transition_function[(state1, rip)] for state2 in self.states - {self.start_state, rip}: r3", "for x in self.transition_function.values() if not isinstance(x, collections.abc.Set) } _error_message(", "nfa2` returns an nfa that recognizes A concat B --", "new_self, new_other, concat_tf = self._combine(other) for state in new_self.accept_states: if", "i.e., the set of \"printable\" characters, which includes the standard", "that alphabet. The alphabet parameter is optional; it's default value", "= self.alphabet | other.alphabet def maybe_add_state( dfa1: DFA, dfa2: DFA", "self, *, transition_function: FsaTransitionFunction, start_state: State, accept_states: AbstractSet[State] ): super().__init__(", "operator_stack[-1] in PARENTHE or compare(char) > 0: operator_stack.append(char) else: while", "numbers of states. \"\"\" return (self.non_determinize() + other.non_determinize()).determinize() def _gnfize(self)", "operator_stack.pop() while len(operator_stack) > 1: binary_operate() return machine_stack.pop() OPERATORS =", "fsa's \" \"state set.\") ) def _good_range(self): raise NotImplementedError GnfaTransitionFunction", "= { new_self.start_state, new_other.start_state } for symbol in new_self.alphabet |", "the nfa's states, and the second elements are the symbols", "regex2 = f'({regex2})' return regex1 + regex2 def regex_union(regex1: Regex,", "for state in state_set] ) def _add_epsilons(self, state_set: AbstractSet[State]) ->", "code an itertools recipe, from # https://docs.python.org/3/library/itertools.html#recipes # (minor modification", "from the transition function; * the set of accept states", "now, the syntax of the regular expressions that this method", "of the verboten characters -- i.e.,`(` , `)`, `|`, `*`,", "\"function are not in the fsa's state set.\") ) def", "symbol) ) if set(state_set) & self.accept_states: determinized_accept.add(determinzed_state) determinized_start = _stringify(", "number of states of the NFA. Don't determinize big NFAs.", "if not isinstance(x, collections.abc.Set) } _error_message( bad_set=bad_range, message_singular=(\"Value {} in", "1 if char == ')': paren_count -= 1 if paren_count", "nondeterministic finite automaton class. Takes three keyword arguments: - `transition_function`:", "the empty set; you can match to the empty language", "this `+` operation is not commutative. \"\"\" new_self, new_other, concat_tf", "fsa's state set.\") ) def accepts(self, string: str) -> bool:", "pair in product({'q1', 'q2'}, alphabet) } tf[('q1', symbol)] = {'q2'}", "in extra_symbols: for state in dfa1.states: new_tf[(state, symbol)] = error_state", "Regex, regex2: Regex) -> Regex: if regex1 == \"Ø\": return", "{'q1'} return NFA( transition_function=tf, start_state='q1', accept_states=accept_states ) def fit_symbol(symbol: Symbol)", "empty set; you can match to the empty language with", ") -> FrozenSet[State]: def get_successor(state: State, sym: Symbol) -> AbstractSet[State]:", "function dictionary should be sets (or frozensets). The empty set", "`•`, `€` and `Ø`, 2. the input regex string contains", "_well_defined(self) -> None: super()._well_defined() _good_alphabet(alphabet=self.alphabet, name=\"alphabet\") self._good_accept() self._good_domain(self.alphabet) def _good_accept(self)", "= set(self.transition_function.values()) bad_range = transition_range - self.states _error_message( bad_set=bad_range, message_singular=(\"State", "`+` dfas with large numbers of states. \"\"\" return (self.non_determinize()", "machine_stack.append(fit_symbol(char)) elif char == '*': machine_stack[-1] = machine_stack[-1].star() elif char", "transition function; 2. the set of accept states is not", "other way to match, for instance, {'', '0'} with the", "sym: Symbol) -> AbstractSet[State]: self._transition_function = cast( NfaTransitionFunction, self._transition_function )", "{}.\", message_plural=\"Alphabet cannot contain characters {}.\" ) def fit_empty(empty: Regex)", "below will work as you would expect). The class will", "if char in OPERATORS and processed[-1] in {'|', '•'}: raise", "and alphabet; the first elements of the tuples represent the", "is not in the fsa's state set.\"), message_plural=(\"States {} in", ") regex = _pre_process(regex, alphabet) for char in regex: if", "other_tf = maybe_add_state(other, self) state_pairs = product(self_states, other_states) union_transition_function =", "in new_self.alphabet | new_other.alphabet: union_tf[(union_start_state, symbol)] = set() union_accept_states =", "key, value in self.transition_function.items() } return NFA( transition_function=nd_transition_function, start_state=self.start_state, accept_states=self.accept_states", "a member of the alphabet (and hence the checks below", "other.states def add_empty_transitions( nfa1: NFA, nfa2: NFA ) -> Tuple[NfaTransitionFunction,", "in product( self.states | {gnfa_start}, self.states | {gnfa_accept} ): if", "char in alphabet: machine_stack.append(fit_symbol(char)) elif char == '*': machine_stack[-1] =", ") union_start_state = self.start_state + other.start_state union_accept_states = { _stringify(item)", "things triggered the exception, and which states/symbols are the source", "1. the start state is not a member of the", "-> \"NFA\": \"\"\" Let A be the language recognised by", "the language recognised by nfa. `nfa.self()` returns an nfa that", "by nfa2. `nfa1 + nfa2` returns an nfa that recognizes", "\"NFA\": \"\"\" Let A be the language recognised by nfa1,", "prime(x) for x in nfa.transition_function[(state, symbol)] } copy_start = prime(nfa.start_state)", "makes for a relatively simple but, sadly, computationally expensive algorith.", "represents the empty set; you can match to the empty", "string contains symbols that aren't in the DFA's alphabet. \"\"\"", "a one-character string; * the transition function is missing a", "FrozenSet[State]: epsilon_neighbours = self._get_successors( state_set=state_set, symbol='' ) while epsilon_neighbours -", "Output a GNFA equivalent to `self` with one less state", "new_self, new_other, combination_tf def _good_range(self) -> None: bad_range = {", "accepted regex character.\" ) if char in OPERATORS and processed[-1]", "string. Will raise a ValueError exception is the string contains", "{} for (state1, state2), symbol in product(state_pairs, union_alphabet): union_transition_function[(state1 +", "State, sym: Symbol) -> AbstractSet[State]: self._transition_function = cast( NfaTransitionFunction, self._transition_function", "return regex2 if regex2 == \"Ø\": return regex1 return f\"{regex1}|{regex2}\"", "\"\"\" return (self.non_determinize() + other.non_determinize()).determinize() def _gnfize(self) -> _GNFA: gnfa_tf:", "= { '|': NFA.__or__, '•': NFA.__add__ } _error_message( bad_set=set(NOT_SYMBOLS) &", "transition_function: GnfaTransitionFunction, body_states: Set[State], start_state: State, accept_state: State ): self.transition_function", "= {} for state, symbol in nfa.transition_function.keys(): copy_tf[(prime(state), symbol)] =", "concat B -- i.e., the language consisting of the set", "characters '(', ')', '|', '*', '•', '€' and 'Ø' cannot", "@property def accept_states(self) -> AbstractSet[State]: return self._accept_states def _well_defined(self) ->", "= {} for state1 in self.states - {self.accept_state, rip}: r1", "-- i.e., the set of all strings formed by concatenating", "states of dfa1 | dfa2 are ordered pairs of states", "it's not totally obvious how to match the empty string", "members of the fsa's \" \"state set.\") ) def _good_range(self):", "5. the range of the transition function is not a", "NFA( transition_function=other_tf, start_state=other.start_state, accept_states=other.accept_states ) combination_tf = {} combination_tf.update(new_self.transition_function) combination_tf.update(new_other.transition_function)", "set(NOT_SYMBOLS): raise ValueError( f\"Regex contains character '{char}' that is not", "): if (state1, state2) not in gnfa_tf: gnfa_tf[(state1, state2)] =", "to match the empty string in normal python regex syntax", "familiar with regular expressions. '•' (option-8 on a mac keyboard)", "any of th following are true: * the start state", "that generates A. That regex string is liable to be", "_get_new_state(self.states) gnfa_accept = _get_new_state(self.states | {gnfa_start}) gnfa_tf[(gnfa_start, self.start_state)] = '€'", "operator, or 4. the input regex does not have properly", "properly matching parentheses. \"\"\" operator_to_operation = { '|': NFA.__or__, '•':", "def powerset(iterable: Iterable) -> Set[FrozenSet]: s = list(iterable) return {", "range(len(s)+1) ) } state_sets = powerset(self.states) determinized_tf = {} determinized_accept", ") return DFA( transition_function=determinized_tf, start_state=determinized_start, accept_states=determinized_accept ) def star(self) ->", "are not in the fsa's state set.\") ) def accepts(self,", "FsaTransitionFunction, start_state: State, accept_states: AbstractSet[State] ): super().__init__( transition_function=transition_function, start_state=start_state )", "set of \"printable\" characters, which includes the standard ASCII letters", "exception, and which states/symbols are the source of the problem.", "if (state1, state2) in gnfa_tf.keys(): gnfa_tf[(state1, state2)] += '|' +", "and `False` otherwise. Will raise a ValueError exception is the", "it can be done; give it a go.) 'Ø' (option-shift-o)", "the input NFAs having different alphabets. \"\"\" new_self, new_other, union_tf", "for the last one; I am very against English chauvinism,", "'' paren_count = 0 for char in regex: if char", "ValueError exception if any of the following conditions hold: 1.", "message_plural=(\"Accept states {} are not members of the fsa's \"", "-> NfaTransitionFunction: new_tf = nfa1.transition_function extra_symbols = nfa2.alphabet - nfa1.alphabet", "-> Set[FrozenSet]: s = list(iterable) return { frozenset(item) for item", "return True return False def regex_star(regex: Regex) -> Regex: if", "consisting of the set of strings of the form a", "nfa.accept_states} return NFA( transition_function=copy_tf, start_state=copy_start, accept_states=copy_accept ) overlap = self.states", "of states inferred from the transition function; 6. the transition", "- nfa1.alphabet if extra_symbols: for pair in product(nfa1.states, extra_symbols): new_tf[pair]", "self.transition_function[(state1, state2)] new_regex = regex_union( regex_concat(regex_concat(r1, regex_star(r2)), r3), r4 )", "operator followed by an operator, or 4. the input regex", "accept_states=copy_accept ) overlap = self.states & other.states while overlap: other", "are strings and Symbols are one-char strings). The keys of", "Don't determinize big NFAs. \"\"\" # powerset code an itertools", "match the empty string in normal python regex syntax either,", "the state-set of nfa1 | nfa2 is the cardinality of", "# This avoids a mypy bug. return empty.union( *[frozenset(get_successor(state, symbol))", "you would expect). The class will raise a ValueError exception", "kind of looks like an epsilon); there's no other way", "_error_message( bad_set=transition_range - self.states, message_singular=(\"State {} in the range of", "regex1 == \"Ø\": return regex2 if regex2 == \"Ø\": return", "The cardinality of the state-set of nfa1 | nfa2 is", "it converts the input DFAs into NFAs, uses the NFA", "epsilon); there's no other way to match, for instance, {'',", "\"NFA\") -> \"NFA\": \"\"\" Let A be the language recognised", "in self.alphabet: star_tf[(star_start, symbol)] = set() for state in self.accept_states:", "def add_empty_transitions( nfa1: NFA, nfa2: NFA ) -> Tuple[NfaTransitionFunction, NfaTransitionFunction]:", "Regex) -> NFA: tf: NfaTransitionFunction = { pair: set() for", "transition function is not a one-character string; * the transition", "binary operator followed by an \" \"operator; not cool.\" )", "char == '(': paren_count += 1 if char == ')':", "new_tf self_states, self_tf = maybe_add_state(self, other) other_states, other_tf = maybe_add_state(other,", "= '' paren_count = 0 for char in regex: if", "__or__(self, other: \"DFA\") -> \"DFA\": \"\"\" Let A be the", "this, I will change the symbol for empty-set.) In the", "for state in dfa1.states: new_tf[(state, symbol)] = error_state return new_states,", "= ['€', 'Ø'] NOT_SYMBOLS = OPERATORS + PARENTHE + EMPTIES", "an element of B. Note that this `+` operation is", "the empty-set symbol. If, by some miracle, there is someone", "work as you would expect). The class will raise a", "== ')': paren_count -= 1 if paren_count < 0: raise", "return (self.non_determinize() + other.non_determinize()).determinize() def _gnfize(self) -> _GNFA: gnfa_tf: MutableGnfaTF", "of Dijkstra's shunting yard algorithm to parse the regex and", "\"\"\" import collections.abc from itertools import product, chain, combinations from", "{self.accept_state, rip}: r1 = self.transition_function[(state1, rip)] for state2 in self.states", "NFA. Don't determinize big NFAs. \"\"\" # powerset code an", "the input regex does not have properly matching parentheses. \"\"\"", "(option-8 on a mac keyboard) means concatenation. You can leave", "is liable to be much more complicated than necessary; maybe", "by dfa. `dfa.encode()` returns a regex string that generates A.", "return regex1 + regex2 def regex_union(regex1: Regex, regex2: Regex) ->", "s = list(iterable) return { frozenset(item) for item in chain.from_iterable(", "converts the input DFAs into NFAs, uses the NFA '+',", ") self._well_defined() @property def alphabet(self) -> FrozenSet[Symbol]: return self._alphabet @property", "self.states - {self.accept_state, rip}: r1 = self.transition_function[(state1, rip)] for state2", "in the alphabet. The domain of the transition function is", "the number of states of the NFA. Don't determinize big", "MutableGnfaTF = MutableMapping[Tuple[State, State], Regex] class _GNFA: def __init__( self,", "to match the empty string (because it kind of looks", "0 for char in regex: if char == '(': paren_count", "the symbol for empty-set.) In the absence of parentheses, the", "in product(nfa1.states, extra_symbols): new_tf[pair] = set() return new_tf return add_one_way(nfa1,", "return regex2 if regex2 == '€': return regex1 if union_main_scope(regex1):", "is not commutative. \"\"\" new_self, new_other, concat_tf = self._combine(other) for", "alphabet: AbstractSet[Symbol]) -> Regex: first_char = regex[0] if first_char in", "+= char if paren_count > 0: raise ValueError( \"Left parenthesis", "set of states inferred from the transition function; 6. the", "transition_function=concat_tf, start_state=new_self.start_state, accept_states=new_other.accept_states ) def _combine(self, other: \"NFA\") -> Tuple[\"NFA\",", "'(', '')', '|', '*', '•', '€' and 'Ø'. The parentheses,", "2. the input regex string contains a character not in", "hence the checks below will work as you would expect).", "counter = 1 new_state = 'new_state1' while new_state in state_set:", "in the DFA's alphabet. \"\"\" _check_input(string=string, alphabet=self.alphabet) current_state = self.start_state", "self_tf, other_tf = add_empty_transitions(self, other) new_self = NFA( transition_function=self_tf, start_state=self.start_state,", "python regex syntax either, though it can be done; give", "(My apologies to speakers of Scandinavian languages for the last", "return NFA( transition_function=tf, start_state='q1', accept_states={'q2'} ) machine_stack: List[NFA] = []", "matching right \" \"parenthesis.\" ) return processed DfaTransitionFunction = Mapping[Tuple[State,", "different alphabets. \"\"\" union_alphabet = self.alphabet | other.alphabet def maybe_add_state(", "{'(', '|'} else '' ) if char not in alphabet", "def determinize(self) -> \"DFA\": \"\"\"Returns a DFA that recognizes the", "expressions that this method takes as input is very simple", "That makes for a relatively simple but, sadly, computationally expensive", "_stringify( self._transition(state_set, symbol) ) if set(state_set) & self.accept_states: determinized_accept.add(determinzed_state) determinized_start", "# https://docs.python.org/3/library/itertools.html#recipes # (minor modification to make the return a", "prime(nfa.start_state) copy_accept = {prime(x) for x in nfa.accept_states} return NFA(", "the language recognized by nfa2. `nfa1 + nfa2` returns an", "state2, symbol)] = ( self_tf[(state1, symbol)] + other_tf[(state2, symbol)] )", "that recognizes A concat B -- i.e., the language consisting", "\"state set.\"), message_plural=(\"Accept states {} are not members of the", "alphabet of the NFA. (My apologies to speakers of Scandinavian", "if not isinstance(states, collections.abc.Sequence): states = list(states) states.sort() return \"\".join(states)", "{'q2'} return NFA( transition_function=tf, start_state='q1', accept_states={'q2'} ) machine_stack: List[NFA] =", ") transition_range: Set[Optional[AbstractSet[State]]] = set.union( *self.transition_function.values() ) _error_message( bad_set=transition_range -", "__add__(self, other: \"NFA\") -> \"NFA\": \"\"\" Let A be the", "): super().__init__( transition_function=transition_function, start_state=start_state ) self._accept_states = accept_states self._states, self._alphabet", "a concat b, where a is an element of A", "= { pair: set() for pair in product({'q1', 'q2'}, alphabet)", "star(self) -> \"NFA\": \"\"\" Let A be the language recognised", "states = list(states) states.sort() return \"\".join(states) def _get_new_state(state_set: Container) ->", "and white space. Actually, that's not quite right -- the", "nfa's state-set and alphabet; the first elements of the tuples", "in ( set(product(self.accept_states, other_states)) | set(product(self_states, other.accept_states)) ) } return", "with the current syntax. (Quick challenge: it's not totally obvious", ") } state_sets = powerset(self.states) determinized_tf = {} determinized_accept =", "symbol) in product(state_sets, self._alphabet): determinzed_state = _stringify(state_set) determinized_tf[(determinzed_state, symbol)] =", "= ['sentinel', '|', '•', '*'] PARENTHE = ['(', ')'] EMPTIES", "return empty.union( *[frozenset(get_successor(state, symbol)) for state in state_set] ) def", "= nfa1.transition_function extra_symbols = nfa2.alphabet - nfa1.alphabet if extra_symbols: for", "in the fsa's state set.\"), message_plural=(\"States {} in the range", "-> Tuple[NfaTransitionFunction, NfaTransitionFunction]: def add_one_way(nfa1: NFA, nfa2: NFA) -> NfaTransitionFunction:", "the source of the problem. \"\"\" def __or__(self, other: \"DFA\")", "and B be the language recognized by nfa2. `nfa1 +", "is not a subset of the power set of states", "= self._add_epsilons({self.start_state}) for symbol in string: current_states = self._transition(current_states, symbol)", "converts the result back to a DFA. That makes for", "alphabet. \"\"\" _check_input(string=string, alphabet=self.alphabet) current_states = self._add_epsilons({self.start_state}) for symbol in", "set of one-character strings) as input; returns an NFA that", "super().__init__( transition_function=transition_function, start_state=start_state ) self._accept_states = accept_states self._states, self._alphabet =", "regex syntax either, though it can be done; give it", "the cardinality of the power-set of the set of NFA", "a character not in the alphabet, and not one of", "symbol)] ) union_start_state = self.start_state + other.start_state union_accept_states = {", "you don't want to. But it gets used internally. '€'", "| {star_start} return NFA( transition_function=star_tf, start_state=star_start, accept_states=star_accepts ) @staticmethod def", "= { x for x in self.transition_function.values() if not isinstance(x,", "as literals for symbols in the alphabet except for '(',", "regular expression and an alphabet (i.e., a set of one-character", "= _extract_states_alphabet( self._transition_function.keys() ) self._well_defined() @property def alphabet(self) -> FrozenSet[Symbol]:", "in place of an alphabet symbol in the transition function.", "state set --- i.e., the values of the transition function", "is: `*`, then `•`, then `|`. This method uses a", "there is someone who cares about this, I will change", "recognises the set of all concatenations of strings in A", "in fact, you are required to specify that the successor", "an NFA that recognises the language defined by that regular", "self._alphabet = _extract_states_alphabet( self._transition_function.keys() ) self._well_defined() @property def alphabet(self) ->", "__or__(self, other: \"NFA\") -> \"NFA\": \"\"\" Let A be the", "_get_new_state(state_set: Container) -> State: counter = 1 new_state = 'new_state1'", "of the fsa's \" \"state set.\") ) def _good_range(self): raise", "start_state=new_self.start_state, accept_states=new_other.accept_states ) def _combine(self, other: \"NFA\") -> Tuple[\"NFA\", \"NFA\",", "the fsa's state set.\"), message_plural=(\"States {} in the range of", "state1, symbol in self.transition_function.keys(): state2 = self.transition_function[(state1, symbol)] if (state1,", "= 0 for char in regex: if char == '(':", "of the problem. \"\"\" def __or__(self, other: \"NFA\") -> \"NFA\":", "state2)] = new_regex return _GNFA( reduced_tf, self.body_states - {rip}, self.start_state,", "+ symbol else: gnfa_tf[(state1, state2)] = symbol gnfa_start = _get_new_state(self.states)", "symbol in nfa.transition_function.keys(): copy_tf[(prime(state), symbol)] = { prime(x) for x", "= set() for (state_set, symbol) in product(state_sets, self._alphabet): determinzed_state =", "DfaTransitionFunction]: new_tf = dfa1.transition_function new_states = dfa1.states extra_symbols = dfa2.alphabet", "not current_states & self.accept_states == set() def determinize(self) -> \"DFA\":", "frozenset()) empty: FrozenSet[State] = frozenset() # This avoids a mypy", "very simple -- much simpler than the standard python regular", "A be the language recognised by nfa. `nfa.self()` returns an", "DfaTransitionFunction = Mapping[Tuple[State, Symbol], State] class DFA(_FSA): \"\"\" A deterministic", "-> AbstractSet[State]: return self._accept_states def _well_defined(self) -> None: super()._well_defined() _good_alphabet(alphabet=self.alphabet,", "state-set and alphabet. The class will raise a ValueError exception", "')': paren_count -= 1 elif char == '|': if paren_count", "processed DfaTransitionFunction = Mapping[Tuple[State, Symbol], State] class DFA(_FSA): \"\"\" A", "transition function is the power-set of the nfa's state set", "-> Regex: if regex in EMPTIES: return '€' if len(regex)", "return add_one_way(nfa1, nfa2), add_one_way(nfa2, nfa1) self_tf, other_tf = add_empty_transitions(self, other)", "'€' for state1, state2 in product( self.states | {gnfa_start}, self.states", "(Where States are strings, and Symbols are one-char strings.) The", "transition function. The exception message will specify which of these", "'•'}: raise ValueError( \"Regex contains binary operator followed by an", "machine_stack.append(machine) def compare(operator: Regex) -> int: return ( OPERATORS.index(operator) -", "(option-shift-o) represents the empty set; you can match to the", "if operator_stack[-1] in PARENTHE or compare(char) > 0: operator_stack.append(char) else:", "DFA, dfa2: DFA ) -> Tuple[FrozenSet[State], DfaTransitionFunction]: new_tf = dfa1.transition_function", "in regex: if char in alphabet or char == '(':", "that recognizes A union B. The cardinality of the state-set", "_stringify(states: Iterable[State]) -> str: if not isinstance(states, collections.abc.Sequence): states =", "= 'Ø' return _GNFA(gnfa_tf, set(self.states), gnfa_start, gnfa_accept) def _good_range(self) ->", "cardinality of the state-set of nfa1 | nfa2 is the", "leave concatentation implicit, as is usual; no need to write", "for '(', '')', '|', '*', '•', '€' and 'Ø'. The", "+ PARENTHE + EMPTIES def _pre_process(regex: Regex, alphabet: AbstractSet[Symbol]) ->", "of members of A. \"\"\" star_start = _get_new_state(self.states) star_tf =", "start_state=union_start_state, accept_states=union_accept_states ) def __add__(self, other: \"DFA\") -> \"DFA\": \"\"\"", "much simpler than the standard python regular expresssions. All characters", "accept_states=union_accept_states ) def __add__(self, other: \"NFA\") -> \"NFA\": \"\"\" Let", "accept_states=self.accept_states ) def _stringify(states: Iterable[State]) -> str: if not isinstance(states,", "- `transition_function`: Mapping[Tuple[State, Symbol], State] - `start_state`: State - `accept_state`:", "There is no problem with the input DFAs having different", "(state1, state2) not in gnfa_tf: gnfa_tf[(state1, state2)] = 'Ø' return", "it is not the case that every pair of a", "contains binary operator followed by an \" \"operator; not cool.\"", "this method is exponential in the number of states of", "veboten characters, 3. the input regex contain a binary operator", "else {'q1'} return NFA( transition_function=tf, start_state='q1', accept_states=accept_states ) def fit_symbol(symbol:", "set() for state in self.accept_states: star_tf[(state, '')] = {self.start_state} star_accepts", "A nondeterministic finite automaton class. Takes three keyword arguments: -", "method uses a version of Dijkstra's shunting yard algorithm to", "will work as you would expect). The class will raise", "states inferred from the transition function; 2. the set of", "are intepreted as literals for symbols in the alphabet except", "is missing cases -- i.e., it is not the case", "{star_start} return NFA( transition_function=star_tf, start_state=star_start, accept_states=star_accepts ) @staticmethod def fit(", "import printable from typing import ( AbstractSet, Container, FrozenSet, Iterable,", "return \"\".join(states) def _get_new_state(state_set: Container) -> State: counter = 1", "transition function is not a subset of the set of", "this method takes as input is very simple -- much", "yard algorithm to parse the regex and build the NFA.", "eventually. \"\"\" gnfa = self._gnfize() while len(gnfa.states) > 2: gnfa", "raise ValueError( \"Regex contains binary operator followed by an \"", "a given state-symbol pair is the empty set, if it", "source of the problem. \"\"\" def __or__(self, other: \"NFA\") ->", "= new_self.accept_states | new_other.accept_states return NFA( transition_function=union_tf, start_state=union_start_state, accept_states=union_accept_states )", "is someone who cares about this, I will change the", ", `)`, `|`, `*`, `•`, `€` and `Ø`, 2. the", "of the state-set of nfa2 plus 1. There is no", "language recognised by dfa1, B be the language recognised by", "specify that the successor set for a given state-symbol pair", "= NFA( transition_function=other_tf, start_state=other.start_state, accept_states=other.accept_states ) combination_tf = {} combination_tf.update(new_self.transition_function)", ") reduced_tf[(state1, state2)] = new_regex return _GNFA( reduced_tf, self.body_states -", "start_state=star_start, accept_states=star_accepts ) @staticmethod def fit( regex: Regex, alphabet: AbstractSet[Symbol]", "regular expressions that this method takes as input is very", "python regular expresssions. All characters are intepreted as literals for", "operator_stack = ['sentinel'] def binary_operate() -> None: right_operand = machine_stack.pop()", "parentheses. \"\"\" operator_to_operation = { '|': NFA.__or__, '•': NFA.__add__ }", "def add_one_way(nfa1: NFA, nfa2: NFA) -> NfaTransitionFunction: new_tf = nfa1.transition_function", "the transition function; 2. the set of accept states is", "( OPERATORS.index(operator) - OPERATORS.index(operator_stack[-1]) ) regex = _pre_process(regex, alphabet) for", "'€': return regex2 if regex2 == '€': return regex1 if", "-- i.e., the language consisting of the set of strings", "dfa1 | dfa2 are ordered pairs of states from dfa1", "is used to match the empty string (because it kind", "regex1 if union_main_scope(regex1): regex1 = f'({regex1})' if union_main_scope(regex2): regex2 =", ") def reduce(self) -> \"_GNFA\": \"\"\" Output a GNFA equivalent", "the set of NFA states. For related reasons, the time", "a relatively simple but, sadly, computationally expensive algorith. For that", "NFA: tf: MutableNfaTF = { pair: set() for pair in", "self._combine(other) for state in new_self.accept_states: if (state, '') in concat_tf:", "nfa1: NFA, nfa2: NFA ) -> Tuple[NfaTransitionFunction, NfaTransitionFunction]: def add_one_way(nfa1:", "relatively simple but, sadly, computationally expensive algorith. For that reason,", "start_state: State, accept_states: AbstractSet[State] ): super().__init__( transition_function=transition_function, start_state=start_state ) self._accept_states", "bad_range = transition_range - self.states _error_message( bad_set=bad_range, message_singular=(\"State {} in", ") return self._transition_function.get((state, sym), frozenset()) empty: FrozenSet[State] = frozenset() #", "1 new_state = 'new_state1' while new_state in state_set: counter +=", "of states of the NFA. Don't determinize big NFAs. \"\"\"", "with the input DFAs having different alphabets. \"\"\" union_alphabet =", "self._add_epsilons(self._get_successors(state_set=state_set, symbol=symbol)) def accepts(self, string: str) -> bool: \"\"\" Determines", "['sentinel', '|', '•', '*'] PARENTHE = ['(', ')'] EMPTIES =", "new_self.start_state, new_other.start_state } for symbol in new_self.alphabet | new_other.alphabet: union_tf[(union_start_state,", "\"function is not a set.\"), message_plural=(\"Values {} in the range", "combinations(s, r) for r in range(len(s)+1) ) } state_sets =", "and not an accepted regex character.\" ) if char in", "+ other_tf[(state2, symbol)] ) union_start_state = self.start_state + other.start_state union_accept_states", "figure out how to improve on average simplicity, eventually. \"\"\"", "of NFA states. For related reasons, the time complexity of", "= 0 for char in regex: if char in alphabet", "specify which of these above conditions things triggered the exception,", "+ '*' return f\"({regex})*\" def regex_concat(regex1: Regex, regex2: Regex) ->", "member of the alphabet inferred from the transition function is", "the result back to a DFA. That makes for a", "second elements are the symbols in the alphabet. The domain", "\"\"\" `my_dfa.accepts(\"some string\")` returns `True` if my_dfa accepts \"some string\",", "AbstractSet[State] ): super().__init__( transition_function=transition_function, start_state=start_state ) self._accept_states = accept_states self._states,", "range of the transition \" \"function is not a set.\"),", "binary_operate() operator_stack.pop() while len(operator_stack) > 1: binary_operate() return machine_stack.pop() OPERATORS", "AbstractSet[State]) -> FrozenSet[State]: epsilon_neighbours = self._get_successors( state_set=state_set, symbol='' ) while", "\"\"\" def __or__(self, other: \"DFA\") -> \"DFA\": \"\"\" Let A", "Tuple[State, Symbol], Union[State, AbstractSet[State]] ] class _FSA(_Base): def __init__( self,", "| {self.start_state} | {self.accept_state} ) def reduce(self) -> \"_GNFA\": \"\"\"", "no need to write '•'' explicitly if you don't want", "(or frozensets). The empty set is a valid value; in", "NFA) -> NfaTransitionFunction: new_tf = nfa1.transition_function extra_symbols = nfa2.alphabet -", "def __or__(self, other: \"DFA\") -> \"DFA\": \"\"\" Let A be", "symbol)] = { prime(x) for x in nfa.transition_function[(state, symbol)] }", "symbol)] = {'q2'} return NFA( transition_function=tf, start_state='q1', accept_states={'q2'} ) machine_stack:", "new_tf[pair] = set() return new_tf return add_one_way(nfa1, nfa2), add_one_way(nfa2, nfa1)", "GNFA equivalent to `self` with one less state in it.", "i.e., the values of the transition function dictionary should be", "gets used internally. '€' (option-shift-2) is used to match the", "NFA( transition_function=union_tf, start_state=union_start_state, accept_states=union_accept_states ) def __add__(self, other: \"NFA\") ->", "= maybe_add_state(other, self) state_pairs = product(self_states, other_states) union_transition_function = {}", "AbstractSet[State] (Where States are strings, and Symbols are one-char strings.)", "if regex1 == 'Ø' or regex2 == 'Ø': return 'Ø'", "above, the characters '(', ')', '|', '*', '•', '€' and", "for pair in product(nfa1.states, extra_symbols): new_tf[pair] = set() return new_tf", "the language accepted by dfa. `dfa.encode()` returns a regex string", "while len(gnfa.states) > 2: gnfa = gnfa.reduce() return gnfa.transition_function[(gnfa.start_state, gnfa.accept_state)]", "is not a one-character string; * the transition function is", "classes \"\"\" import collections.abc from itertools import product, chain, combinations", "symbol, and the tilde, for reasons that I will explain", "'*'] PARENTHE = ['(', ')'] EMPTIES = ['€', 'Ø'] NOT_SYMBOLS", "that aren't in the DFA's alphabet. \"\"\" _check_input(string=string, alphabet=self.alphabet) current_state", "be the language recognized by nfa2. `nfa1 | nfa2` returns", "recognized by dfa2. `dfa1 | dfa2` returns a dfa that", "= transition_range - self.states _error_message( bad_set=bad_range, message_singular=(\"State {} in the", "{ pair: set() for pair in product({'q1'}, alphabet) } accept_states", "with regular expressions. '•' (option-8 on a mac keyboard) means", "NFA '+', then converts the result back to a DFA.", "_Base, _extract_states_alphabet, _error_message, _good_alphabet, _check_input ) State = str Symbol", "ASCII letters and digits, and most common punctuation and white", "bad_accept_states = self.accept_states - self.states _error_message( bad_set=bad_accept_states, message_singular=(\"Accept state {}", "product(self_states, other_states) union_transition_function = {} for (state1, state2), symbol in", "a DFA that recognizes the same same language as the", "{} are not members of the fsa's \" \"state set.\")", "A union B. The cardinality of the state-set of nfa1", "is the power-set of the nfa's state set --- i.e.,", "build the NFA. The method will raise a ValueError exception", "')', '|', '*', '•', '€' and 'Ø' cannot be symbols", "parenthesis.\" ) processed += char if paren_count > 0: raise", "the set of all strings formed by concatenating any number", "input regex contain a binary operator followed by an operator,", "language consisting of the set of strings of the form", "for state2 in self.states - {self.start_state, rip}: r3 = self.transition_function[(rip,", "( operator_stack[-1] not in PARENTHE and compare(char) <= 0 ):", "alphabet. \"\"\" _check_input(string=string, alphabet=self.alphabet) current_state = self.start_state for symbol in", "to make the return a set of frozensets). def powerset(iterable:", "of an alphabet symbol in the transition function. Note that", "_stringify(state_set) determinized_tf[(determinzed_state, symbol)] = _stringify( self._transition(state_set, symbol) ) if set(state_set)", "for reasons that I will explain presently. As of now,", "If, by some miracle, there is someone who cares about", "union_alphabet = self.alphabet | other.alphabet def maybe_add_state( dfa1: DFA, dfa2:", "== 1: return regex + '*' return f\"({regex})*\" def regex_concat(regex1:", "domain of the transition function. The exception message will specify", "fit_symbol(symbol: Symbol) -> NFA: tf: MutableNfaTF = { pair: set()", "NfaTransitionFunction = { pair: set() for pair in product({'q1'}, alphabet)", "Regex, regex2: Regex) -> Regex: if regex1 == 'Ø' or", "The method will raise a ValueError exception if any of", "| {gnfa_accept} ): if (state1, state2) not in gnfa_tf: gnfa_tf[(state1,", "pair: set() for pair in product({'q1', 'q2'}, alphabet) } tf[('q1',", "list(states) states.sort() return \"\".join(states) def _get_new_state(state_set: Container) -> State: counter", "`€` and `Ø`, 2. the input regex string contains a", "State], Regex] MutableGnfaTF = MutableMapping[Tuple[State, State], Regex] class _GNFA: def", "compare(operator: Regex) -> int: return ( OPERATORS.index(operator) - OPERATORS.index(operator_stack[-1]) )", "regex1 == 'Ø' or regex2 == 'Ø': return 'Ø' if", "state, symbol in nfa.transition_function.keys(): copy_tf[(prime(state), symbol)] = { prime(x) for", "will specify which of these six conditions things triggered the", "nfa.transition_function[(state, symbol)] } copy_start = prime(nfa.start_state) copy_accept = {prime(x) for", "of the fsa's \" \"state set.\"), message_plural=(\"Accept states {} are", "NFA states. For related reasons, the time complexity of this", "\"\".join(states) def _get_new_state(state_set: Container) -> State: counter = 1 new_state", "nfa's state set --- i.e., the values of the transition", "of the transition function's range is not a set; 5.", "language recognized by nfa2. `nfa1 | nfa2` returns an nfa", "is optional; it's default value is string.printable -- i.e., the", "operator_to_operation = { '|': NFA.__or__, '•': NFA.__add__ } _error_message( bad_set=set(NOT_SYMBOLS)", "is not a subset of the set of states inferred", "'q2'}, alphabet) } tf[('q1', symbol)] = {'q2'} return NFA( transition_function=tf,", "new_other.alphabet: union_tf[(union_start_state, symbol)] = set() union_accept_states = new_self.accept_states | new_other.accept_states", "alphabet: machine_stack.append(fit_symbol(char)) elif char == '*': machine_stack[-1] = machine_stack[-1].star() elif", "returns a dfa that recognizes A union B. The states", "def regex_union(regex1: Regex, regex2: Regex) -> Regex: if regex1 ==", "alphabet parameter is optional; it's default value is string.printable --", "'*'} ) ) -> \"NFA\": \"\"\" Takes a regular expression", "to a DFA. That makes for a relatively simple but,", "bar and star mean what you'd expect them to mean", "State] - `start_state`: State - `accept_state`: AbstractSet[State] (where States are", "and a symbol is in the domain of the transition", "left_operand = machine_stack.pop() machine = operator_to_operation[operator_stack.pop()]( left_operand, right_operand ) machine_stack.append(machine)", "_get_new_state(self.states | {gnfa_start}) gnfa_tf[(gnfa_start, self.start_state)] = '€' for state in", "symbol in product(state_pairs, union_alphabet): union_transition_function[(state1 + state2, symbol)] = (", "state1 in self.states - {self.accept_state, rip}: r1 = self.transition_function[(state1, rip)]", "the transition function's range is not a set; 5. the", "you are familiar with regular expressions. '•' (option-8 on a", "return machine_stack.pop() OPERATORS = ['sentinel', '|', '•', '*'] PARENTHE =", "of states. \"\"\" return (self.non_determinize() + other.non_determinize()).determinize() def _gnfize(self) ->", "combinations from string import printable from typing import ( AbstractSet,", "State: counter = 1 new_state = 'new_state1' while new_state in", "NfaTransitionFunction]: def add_one_way(nfa1: NFA, nfa2: NFA) -> NfaTransitionFunction: new_tf =", "(i.e., a set of one-character strings) as input; returns an", "reduced_tf[(state1, state2)] = new_regex return _GNFA( reduced_tf, self.body_states - {rip},", "self._transition(state_set, symbol) ) if set(state_set) & self.accept_states: determinized_accept.add(determinzed_state) determinized_start =", "algorith. For that reason, I recommend you don't `+` dfas", "transition_function self.body_states = body_states self.start_state = start_state self.accept_state = accept_state", "a regular expression and an alphabet (i.e., a set of", "accept_states=union_accept_states ) def __add__(self, other: \"DFA\") -> \"DFA\": \"\"\" Let", "AbstractSet[State]: self._transition_function = cast( NfaTransitionFunction, self._transition_function ) return self._transition_function.get((state, sym),", "of \"printable\" characters, which includes the standard ASCII letters and", "in regex: if char == '(': paren_count += 1 elif", "on instantiation if any of th following are true: *", "in alphabet: machine_stack.append(fit_symbol(char)) elif char == '*': machine_stack[-1] = machine_stack[-1].star()", "in dfa1.states: new_tf[(state, symbol)] = error_state return new_states, new_tf self_states,", "@staticmethod def fit( regex: Regex, alphabet: AbstractSet[Symbol] = ( set(printable)", "} state_sets = powerset(self.states) determinized_tf = {} determinized_accept = set()", "> 2: gnfa = gnfa.reduce() return gnfa.transition_function[(gnfa.start_state, gnfa.accept_state)] def non_determinize(self)", "= dfa2.alphabet - dfa1.alphabet if extra_symbols: error_state = _get_new_state(dfa1.states) new_states", "= start_state self.accept_state = accept_state self.states = ( self.body_states |", ") def _add_epsilons(self, state_set: AbstractSet[State]) -> FrozenSet[State]: epsilon_neighbours = self._get_successors(", "= regex[0] if first_char in OPERATORS: raise ValueError(f\"Regex cannot start", "_GNFA: gnfa_tf: MutableGnfaTF = {} for state1, symbol in self.transition_function.keys():", "return new_tf return add_one_way(nfa1, nfa2), add_one_way(nfa2, nfa1) self_tf, other_tf =", "start with '{first_char}'.\") processed = '' paren_count = 0 for", "of the transition \" \"function is not in the fsa's", "is string.printable *minus* parentheses, the vertical bar, the star symbol,", "A be the language accepted by dfa. `dfa.encode()` returns a", "& other.states while overlap: other = copy(other) overlap = self.states", "= powerset(self.states) determinized_tf = {} determinized_accept = set() for (state_set,", "( self.body_states | {self.start_state} | {self.accept_state} ) def reduce(self) ->", "None: transition_range = set(self.transition_function.values()) bad_range = transition_range - self.states _error_message(", "keys of the `transition_function` implicitly define the dfa's state-set and", "union_main_scope(regex2): regex2 = f'({regex2})' return regex1 + regex2 def regex_union(regex1:", "{self.accept_state} ) def reduce(self) -> \"_GNFA\": \"\"\" Output a GNFA", "self_states, self_tf = maybe_add_state(self, other) other_states, other_tf = maybe_add_state(other, self)", "-> \"_GNFA\": \"\"\" Output a GNFA equivalent to `self` with", "state2 = self.transition_function[(state1, symbol)] if (state1, state2) in gnfa_tf.keys(): gnfa_tf[(state1,", "self._transition_function.keys() ) self._well_defined() @property def alphabet(self) -> FrozenSet[Symbol]: return self._alphabet", "the tuples represent the nfa's states, and the second elements", "EMPTIES = ['€', 'Ø'] NOT_SYMBOLS = OPERATORS + PARENTHE +", "that is not in \" \"alphabet and not an accepted", "machine_stack.append(fit_empty(char)) elif char in alphabet: machine_stack.append(fit_symbol(char)) elif char == '*':", "Symbols are one-char strings.) The transition function' keys implicitly define", "This DFA operator is parasitic on the NFA operator; it", "takes as input is very simple -- much simpler than", "not in the fsa's state set.\"), message_plural=(\"States {} in the", "-> Regex: \"\"\" Let A be the language accepted by", "complexity of this method is exponential in the number of", "by some miracle, there is someone who cares about this,", "= {self.start_state} for symbol in self.alphabet: star_tf[(star_start, symbol)] = set()", "alphabets. \"\"\" new_self, new_other, union_tf = self._combine(other) union_start_state = _get_new_state(new_self.states", "} return NFA( transition_function=nd_transition_function, start_state=self.start_state, accept_states=self.accept_states ) def _stringify(states: Iterable[State])", "regex string is liable to be much more complicated than", "\" \"operator; not cool.\" ) if char == '(': paren_count", "character '{char}' that is not in \" \"alphabet and not", "Symbol = str Regex = str FsaTransitionFunction = Mapping[ Tuple[State,", "gnfa_start = _get_new_state(self.states) gnfa_accept = _get_new_state(self.states | {gnfa_start}) gnfa_tf[(gnfa_start, self.start_state)]", "if char == '(': paren_count += 1 if char ==", "*minus* parentheses, the vertical bar, the star symbol, and the", "union_start_state = _get_new_state(new_self.states | new_other.states) union_tf[(union_start_state, '')] = { new_self.start_state,", "are one-char strings). The keys of the `transition_function` implicitly define", "Let A be the language recognised by nfa1, and B", "== 'Ø' else {'q1'} return NFA( transition_function=tf, start_state='q1', accept_states=accept_states )", "you can match to the empty language with it. For", "\"\"\"Returns a DFA that recognizes the same same language as", "miracle, there is someone who cares about this, I will", "function is not a one-character string; * the transition function", "ValueError( f\"Regex contains character '{char}' that is not in \"", "reasons that I will explain presently. As of now, the", "= set.union( *self.transition_function.values() ) _error_message( bad_set=transition_range - self.states, message_singular=(\"State {}", "add_one_way(nfa1: NFA, nfa2: NFA) -> NfaTransitionFunction: new_tf = nfa1.transition_function extra_symbols", "out how to improve on average simplicity, eventually. \"\"\" gnfa", "\"left parenthesis.\" ) processed += char if paren_count > 0:", "The parentheses, vertical bar and star mean what you'd expect", "paren_count -= 1 elif char == '|': if paren_count ==", "add_one_way(nfa1, nfa2), add_one_way(nfa2, nfa1) self_tf, other_tf = add_empty_transitions(self, other) new_self", "absence of parentheses, the order of operations is: `*`, then", "simple -- much simpler than the standard python regular expresssions.", "['€', 'Ø'] NOT_SYMBOLS = OPERATORS + PARENTHE + EMPTIES def", "None: bad_accept_states = self.accept_states - self.states _error_message( bad_set=bad_accept_states, message_singular=(\"Accept state", "transition function dictionary should be sets (or frozensets). The empty", "class DFA(_FSA): \"\"\" A deterministic finite automaton class. Takes three", "def accept_states(self) -> AbstractSet[State]: return self._accept_states def _well_defined(self) -> None:", "for pair in product({'q1', 'q2'}, alphabet) } tf[('q1', symbol)] =", "state in self.accept_states: star_tf[(state, '')] = {self.start_state} star_accepts = self.accept_states", "strings, and Symbols are one-char strings.) The transition function' keys", "operator is parasitic on the NFA operator; it converts the", "liable to be much more complicated than necessary; maybe I'll", "`self` with one less state in it. \"\"\" def union_main_scope(regex:", "processed += char if paren_count > 0: raise ValueError( \"Left", "the state set of nfa1 plus the cardinality of the", "character.\" ) if char in OPERATORS and processed[-1] in {'|',", "symbol in self.transition_function.keys(): state2 = self.transition_function[(state1, symbol)] if (state1, state2)", "paren_count += 1 if char == ')': paren_count -= 1", "alphabet contains any of the verboten characters -- i.e.,`(` ,", "_check_input ) State = str Symbol = str Regex =", "self._transition(current_states, symbol) return not current_states & self.accept_states == set() def", "For that reason, I recommend you don't `+` dfas with", "that recognizes A union B. The states of dfa1 |", "be symbols in the alphabet of the NFA. (My apologies", "an nfa that recognizes A concat B -- i.e., the", "AbstractSet[State]: return self._accept_states def _well_defined(self) -> None: super()._well_defined() _good_alphabet(alphabet=self.alphabet, name=\"alphabet\")", "recognizes A union B. The states of dfa1 | dfa2", "Convenience method that takes a DFA instance and returns an", "regex: Regex, alphabet: AbstractSet[Symbol] = ( set(printable) - {'(', ')',", "typing import ( AbstractSet, Container, FrozenSet, Iterable, List, Mapping, MutableMapping,", "a go.) 'Ø' (option-shift-o) represents the empty set; you can", "Tuple[FrozenSet[State], DfaTransitionFunction]: new_tf = dfa1.transition_function new_states = dfa1.states extra_symbols =", ") ) -> \"NFA\": \"\"\" Takes a regular expression and", "maybe_add_state(self, other) other_states, other_tf = maybe_add_state(other, self) state_pairs = product(self_states,", "returns an nfa that recognizes A concat B -- i.e.,", "string: str) -> bool: \"\"\" Determines whether nfa accepts input", "mac keyboard) means concatenation. You can leave concatentation implicit, as", "be the language recognised by dfa1, and B be the", "would expect). The class will raise a ValueError exception on", "you don't `+` dfas with large numbers of states. \"\"\"", "generates A. That regex string is liable to be much", "transition_function=union_transition_function, start_state=union_start_state, accept_states=union_accept_states ) def __add__(self, other: \"DFA\") -> \"DFA\":", "\"alphabet and not an accepted regex character.\" ) if char", "processed[-1] not in {'(', '|'} else '' ) if char", "`dfa.encode()` returns a regex string that generates A. That regex", "'{char}' that is not in \" \"alphabet and not an", "state_set: state_set = state_set | epsilon_neighbours epsilon_neighbours = self._get_successors( state_set=epsilon_neighbours,", "public classes \"\"\" import collections.abc from itertools import product, chain,", "2. the set of accept states is not a subset", "alphabet=self.alphabet) current_state = self.start_state for symbol in string: current_state =", "of the transition function is the power-set of the nfa's", "= gnfa.reduce() return gnfa.transition_function[(gnfa.start_state, gnfa.accept_state)] def non_determinize(self) -> NFA: \"\"\"", "__add__(self, other: \"DFA\") -> \"DFA\": \"\"\" Let A be the", "ValueError( \"Left parenthesis occurs in regex without matching right \"", "characters, which includes the standard ASCII letters and digits, and", "regex and build the NFA. The method will raise a", "conditions things triggered the exception, and which states/symbols are the", "of nfa2 plus 1. There is no problem with the", "transition \" \"function are not sets.\") ) transition_range: Set[Optional[AbstractSet[State]]] =", "transition \" \"function is not a set.\"), message_plural=(\"Values {} in", "other_states)) | set(product(self_states, other.accept_states)) ) } return DFA( transition_function=union_transition_function, start_state=union_start_state,", "in self.accept_states: star_tf[(state, '')] = {self.start_state} star_accepts = self.accept_states |", "symbols in the alphabet except for '(', '')', '|', '*',", "union B. The states of dfa1 | dfa2 are ordered", "product({'q1', 'q2'}, alphabet) } tf[('q1', symbol)] = {'q2'} return NFA(", "string in place of an alphabet symbol in the transition", "states inferred from the transition function; * the set of", "start_state=start_state ) self._accept_states = accept_states self._states, self._alphabet = _extract_states_alphabet( self._transition_function.keys()", "PARENTHE = ['(', ')'] EMPTIES = ['€', 'Ø'] NOT_SYMBOLS =", "of the set of states inferred from the transition function;", "'Ø' if regex1 == '€': return regex2 if regex2 ==", "in normal python regex syntax either, though it can be", "} _error_message( bad_set=set(NOT_SYMBOLS) & alphabet, message_singular=\"Alphabet cannot contain character {}.\",", "'€' (option-shift-2) is used to match the empty string (because", "{gnfa_start}) gnfa_tf[(gnfa_start, self.start_state)] = '€' for state in self.accept_states: gnfa_tf[(state,", "return _GNFA( reduced_tf, self.body_states - {rip}, self.start_state, self.accept_state ) NfaTransitionFunction", "the alphabet except for '(', '')', '|', '*', '•', '€'", "| {error_state} for symbol in union_alphabet: new_tf[(error_state, symbol)] = error_state", "power set of states inferred from the transition function; 6.", "syntax either, though it can be done; give it a", "= OPERATORS + PARENTHE + EMPTIES def _pre_process(regex: Regex, alphabet:", "with large numbers of states. \"\"\" return (self.non_determinize() + other.non_determinize()).determinize()", "not a subset of the power set of states inferred", "\"\"\" Let A be the language recognised by nfa1, and", "and that alphabet. The alphabet parameter is optional; it's default", "left_operand, right_operand ) machine_stack.append(machine) def compare(operator: Regex) -> int: return", "regex_union( regex_concat(regex_concat(r1, regex_star(r2)), r3), r4 ) reduced_tf[(state1, state2)] = new_regex", "if processed[-1] not in {'(', '|'} else '' ) if", "determinize big NFAs. \"\"\" # powerset code an itertools recipe,", "\"parenthesis.\" ) return processed DfaTransitionFunction = Mapping[Tuple[State, Symbol], State] class", "def copy(nfa: NFA) -> NFA: copy_tf = {} for state,", "and 'Ø' cannot be symbols in the alphabet of the", "reason, I recommend you don't `+` dfas with large numbers", "( set(product(self.accept_states, other_states)) | set(product(self_states, other.accept_states)) ) } return DFA(", "len(processed) > 0: processed += ( '•' if processed[-1] not", "`start_state`: State - `accept_state`: AbstractSet[State] (where States are strings and", "an alphabet (i.e., a set of one-character strings) as input;", "symbol) return not current_states & self.accept_states == set() def determinize(self)", "exception if any of the following conditions hold: 1. the", "-> State: counter = 1 new_state = 'new_state1' while new_state", "tf: NfaTransitionFunction = { pair: set() for pair in product({'q1'},", "in range(len(s)+1) ) } state_sets = powerset(self.states) determinized_tf = {}", "alphabet (and hence the checks below will work as you", "union_accept_states = new_self.accept_states | new_other.accept_states return NFA( transition_function=union_tf, start_state=union_start_state, accept_states=union_accept_states", "x for x in self.transition_function.values() if not isinstance(x, collections.abc.Set) }", "the transition function; 6. the transition function is missing cases", "if extra_symbols: for pair in product(nfa1.states, extra_symbols): new_tf[pair] = set()", "| {gnfa_start}, self.states | {gnfa_accept} ): if (state1, state2) not", "a mypy bug. return empty.union( *[frozenset(get_successor(state, symbol)) for state in", "gnfa_tf[(state1, state2)] = 'Ø' return _GNFA(gnfa_tf, set(self.states), gnfa_start, gnfa_accept) def", "alphabet; the first elements of the tuples represent the nfa's", "nfa. `nfa.self()` returns an nfa that recognizes A* -- i.e.,", "method will raise a ValueError exception if any of the", "r1 = self.transition_function[(state1, rip)] for state2 in self.states - {self.start_state,", "_get_new_state(self.states) star_tf = self.transition_function star_tf[(star_start, '')] = {self.start_state} for symbol", "if regex2 == \"Ø\": return regex1 return f\"{regex1}|{regex2}\" rip =", "- state_set: state_set = state_set | epsilon_neighbours epsilon_neighbours = self._get_successors(", "!= '(': binary_operate() operator_stack.pop() while len(operator_stack) > 1: binary_operate() return", "symbol in union_alphabet: new_tf[(error_state, symbol)] = error_state for symbol in", "alphabet) } accept_states = set() if empty == 'Ø' else", "= set() for state in self.accept_states: star_tf[(state, '')] = {self.start_state}", "+ other.non_determinize()).determinize() def _gnfize(self) -> _GNFA: gnfa_tf: MutableGnfaTF = {}", "self._get_successors( state_set=epsilon_neighbours, symbol='' ) return frozenset(state_set) def _transition(self, state_set: AbstractSet[State],", "b is an element of B. Note that this `+`", "function is missing cases -- i.e., it is not the", "determinize(self) -> \"DFA\": \"\"\"Returns a DFA that recognizes the same", "self.body_states.pop() r2 = self.transition_function[(rip, rip)] reduced_tf = {} for state1", "Mapping[Tuple[State, Symbol], State] class DFA(_FSA): \"\"\" A deterministic finite automaton", "the transition function is missing a case -- i.e., it", "Mapping[Tuple[State, Symbol], AbstractSet[State]] - `start_state`: State - `accept_states`: AbstractSet[State] (Where", "in the transition function. Note that the empty string will", "DFAs having different alphabets. \"\"\" union_alphabet = self.alphabet | other.alphabet", "matching parentheses. \"\"\" operator_to_operation = { '|': NFA.__or__, '•': NFA.__add__", "frozenset(state_set) def _transition(self, state_set: AbstractSet[State], symbol: Symbol): return self._add_epsilons(self._get_successors(state_set=state_set, symbol=symbol))", "collections.abc from itertools import product, chain, combinations from string import", "not members of the fsa's \" \"state set.\") ) def", "-> None: transition_range = set(self.transition_function.values()) bad_range = transition_range - self.states", "the checks below will work as you would expect). The", "operator; it converts the input DFAs into NFAs, uses the", "accept_states=new_other.accept_states ) def _combine(self, other: \"NFA\") -> Tuple[\"NFA\", \"NFA\", MutableNfaTF]:", "returns an NFA instance. \"\"\" nd_transition_function = { key: {value}", "strings in B. This DFA operator is parasitic on the", "in EMPTIES: return '€' if len(regex) == 1: return regex", "Mapping, MutableMapping, Optional, Set, Tuple, Union, cast ) from .base", "\"\"\" nd_transition_function = { key: {value} for key, value in", "if regex1 == '€': return regex2 if regex2 == '€':", "to match, for instance, {'', '0'} with the current syntax.", "char in OPERATORS: if operator_stack[-1] in PARENTHE or compare(char) >", "-> Tuple[\"NFA\", \"NFA\", MutableNfaTF]: def prime(state: State): return state +", "== '(': operator_stack.append(char) else: while operator_stack[-1] != '(': binary_operate() operator_stack.pop()", "concatenations of strings in A with strings in B. This", "'(', ')', '|', '*', '•', '€' and 'Ø' cannot be", "gnfa.reduce() return gnfa.transition_function[(gnfa.start_state, gnfa.accept_state)] def non_determinize(self) -> NFA: \"\"\" Convenience", "the successor set for a given state-symbol pair is the", "| dfa2` returns a dfa that recognizes A union B.", "+= 1 elif char == ')': paren_count -= 1 elif", "verboten characters -- i.e.,`(` , `)`, `|`, `*`, `•`, `€`", "DFA ) -> Tuple[FrozenSet[State], DfaTransitionFunction]: new_tf = dfa1.transition_function new_states =", "operator_stack.append(char) elif char == '(': operator_stack.append(char) else: while operator_stack[-1] !=", "new_states = dfa1.states | {error_state} for symbol in union_alphabet: new_tf[(error_state,", "- dfa1.alphabet if extra_symbols: error_state = _get_new_state(dfa1.states) new_states = dfa1.states", "them to mean if you are familiar with regular expressions.", "the language recognized by nfa2. `nfa1 | nfa2` returns an", "combination_tf.update(new_self.transition_function) combination_tf.update(new_other.transition_function) return new_self, new_other, combination_tf def _good_range(self) -> None:", "problem. \"\"\" def __or__(self, other: \"DFA\") -> \"DFA\": \"\"\" Let", "transition function is not a subset of the power set", "6. the transition function is missing cases -- i.e., it", "`|`. This method uses a version of Dijkstra's shunting yard", "of now, the syntax of the regular expressions that this", "item in ( set(product(self.accept_states, other_states)) | set(product(self_states, other.accept_states)) ) }", "can match to the empty language with it. For reaons", "new_states = dfa1.states extra_symbols = dfa2.alphabet - dfa1.alphabet if extra_symbols:", "in EMPTIES: machine_stack.append(fit_empty(char)) elif char in alphabet: machine_stack.append(fit_symbol(char)) elif char", "NFA public classes \"\"\" import collections.abc from itertools import product,", "true: 1. the start state is not a member of", "\"\"\" Determines whether nfa accepts input string. Will raise a", "that recognizes A* -- i.e., the set of all strings", "concat b, where a is an element of A and", "self.states - {self.start_state, rip}: r3 = self.transition_function[(rip, state2)] r4 =", "\"\"\" def union_main_scope(regex: Regex) -> bool: paren_count = 0 for", "')', '|', '*'} ) ) -> \"NFA\": \"\"\" Takes a", "'|', '*', '•', '€' and 'Ø'. The parentheses, vertical bar", "new_tf[(state, symbol)] = error_state return new_states, new_tf self_states, self_tf =", "algorithm to parse the regex and build the NFA. The", "'*' return f\"({regex})*\" def regex_concat(regex1: Regex, regex2: Regex) -> Regex:", "def alphabet(self) -> FrozenSet[Symbol]: return self._alphabet @property def accept_states(self) ->", "union_main_scope(regex: Regex) -> bool: paren_count = 0 for char in", "define the nfa's state-set and alphabet; the first elements of", "string; 4. a member of the transition function's range is", "the cardinality of the state set of nfa1 plus the", "other.non_determinize()).determinize() def _gnfize(self) -> _GNFA: gnfa_tf: MutableGnfaTF = {} for", "for char in regex: if char in alphabet or char", "_check_input(string=string, alphabet=self.alphabet) current_state = self.start_state for symbol in string: current_state", "\" \"function is not in the fsa's state set.\"), message_plural=(\"States", ") return processed DfaTransitionFunction = Mapping[Tuple[State, Symbol], State] class DFA(_FSA):", "from itertools import product, chain, combinations from string import printable", "| new_other.accept_states return NFA( transition_function=union_tf, start_state=union_start_state, accept_states=union_accept_states ) def __add__(self,", "set of all concatenations of strings in A with strings", "gnfa_start, gnfa_accept) def _good_range(self) -> None: transition_range = set(self.transition_function.values()) bad_range", "in OPERATORS: if operator_stack[-1] in PARENTHE or compare(char) > 0:", "( set(printable) - {'(', ')', '|', '*'} ) ) ->", "for char in regex: if char == '(': paren_count +=", "def fit_symbol(symbol: Symbol) -> NFA: tf: MutableNfaTF = { pair:", "You can leave concatentation implicit, as is usual; no need", "regular expression and that alphabet. The alphabet parameter is optional;", "to the above, the characters '(', ')', '|', '*', '•',", "{} for state1, symbol in self.transition_function.keys(): state2 = self.transition_function[(state1, symbol)]", "{value} for key, value in self.transition_function.items() } return NFA( transition_function=nd_transition_function,", "i.e., the set of all strings formed by concatenating any", "symbol in string: current_states = self._transition(current_states, symbol) return not current_states", "f'({regex1})' if union_main_scope(regex2): regex2 = f'({regex2})' return regex1 + regex2", "the exception, and which states/symbols are the source of the", "= _get_new_state(self.states | {gnfa_start}) gnfa_tf[(gnfa_start, self.start_state)] = '€' for state", "a member of the transition function's range is not a", "is. You can define epsilon-moves by using the empty string", "subset of the set of states inferred from the transition", "less state in it. \"\"\" def union_main_scope(regex: Regex) -> bool:", "set of DFA states has the cardinality of the power-set", "1 if paren_count < 0: raise ValueError( \"Right parenthesis occurs", "_get_new_state(dfa1.states) new_states = dfa1.states | {error_state} for symbol in union_alphabet:", "not in the fsa's state set.\") ) def accepts(self, string:", "symbol)] = ( self_tf[(state1, symbol)] + other_tf[(state2, symbol)] ) union_start_state", "the case that every pair of a state and a", "the transition function; * the set of accept states is", ") def __add__(self, other: \"NFA\") -> \"NFA\": \"\"\" Let A", "{self.start_state} | {self.accept_state} ) def reduce(self) -> \"_GNFA\": \"\"\" Output", "reaons related to the above, the characters '(', ')', '|',", "not in gnfa_tf: gnfa_tf[(state1, state2)] = 'Ø' return _GNFA(gnfa_tf, set(self.states),", "start_state='q1', accept_states=accept_states ) def fit_symbol(symbol: Symbol) -> NFA: tf: MutableNfaTF", "expression and that alphabet. The alphabet parameter is optional; it's", "accepts input string. Will raise a ValueError exception is the", "dfa2. There is no problem with the input DFAs having", "union_alphabet): union_transition_function[(state1 + state2, symbol)] = ( self_tf[(state1, symbol)] +", "strings of the form a concat b, where a is", "NFA instance. WARNING: The set of DFA states has the", "for a given state-symbol pair is the empty set, if", "that recognises the set of all concatenations of strings in", "if paren_count < 0: raise ValueError( \"Right parenthesis occurs in", "( _Base, _extract_states_alphabet, _error_message, _good_alphabet, _check_input ) State = str", "determinized_start = _stringify( self._add_epsilons({self._start_state}) ) return DFA( transition_function=determinized_tf, start_state=determinized_start, accept_states=determinized_accept", "_GNFA(gnfa_tf, set(self.states), gnfa_start, gnfa_accept) def _good_range(self) -> None: transition_range =", "parenthesis occurs in regex withour matching \" \"left parenthesis.\" )", "from the transition function; * a member of the alphabet", "-- i.e.,`(` , `)`, `|`, `*`, `•`, `€` and `Ø`,", "set(self.transition_function.values()) bad_range = transition_range - self.states _error_message( bad_set=bad_range, message_singular=(\"State {}", "of states from dfa1 and dfa2. There is no problem", "characters {}.\" ) def fit_empty(empty: Regex) -> NFA: tf: NfaTransitionFunction", "powerset(self.states) determinized_tf = {} determinized_accept = set() for (state_set, symbol)", "return NFA( transition_function=concat_tf, start_state=new_self.start_state, accept_states=new_other.accept_states ) def _combine(self, other: \"NFA\")", "self.accept_states - self.states _error_message( bad_set=bad_accept_states, message_singular=(\"Accept state {} is not", "1: return regex + '*' return f\"({regex})*\" def regex_concat(regex1: Regex,", "set.\"), message_plural=(\"States {} in the range of the transition \"", "should be sets (or frozensets). The empty set is a", "of this method is exponential in the number of states", "string\")` returns `True` if my_dfa accepts \"some string\", and `False`", "nfa2` returns an nfa that recognizes A union B. The", "union_main_scope(regex1): regex1 = f'({regex1})' if union_main_scope(regex2): regex2 = f'({regex2})' return", "string contains symbols that aren't in the nfa's alphabet. \"\"\"", "of the NFA. Don't determinize big NFAs. \"\"\" # powerset", ") def _good_range(self): raise NotImplementedError GnfaTransitionFunction = Mapping[Tuple[State, State], Regex]", "the set of all concatenations of strings in A with", "in alphabet | set(NOT_SYMBOLS): raise ValueError( f\"Regex contains character '{char}'", "dfa1.alphabet if extra_symbols: error_state = _get_new_state(dfa1.states) new_states = dfa1.states |", "nfa that recognizes A* -- i.e., the set of all", "return frozenset(state_set) def _transition(self, state_set: AbstractSet[State], symbol: Symbol): return self._add_epsilons(self._get_successors(state_set=state_set,", "== '|': if paren_count == 0: return True return False", "states inferred from the transition function; 6. the transition function", "a symbol is in the domain of the transition function.", "return 'Ø' if regex1 == '€': return regex2 if regex2", "the characters '(', ')', '|', '*', '•', '€' and 'Ø'", "the syntax of the regular expressions that this method takes", "regex withour matching \" \"left parenthesis.\" ) processed += char", "0 ): binary_operate() operator_stack.append(char) elif char == '(': operator_stack.append(char) else:", "MutableMapping[Tuple[State, State], Regex] class _GNFA: def __init__( self, transition_function: GnfaTransitionFunction,", "Iterable[State]) -> str: if not isinstance(states, collections.abc.Sequence): states = list(states)", "in alphabet or char == '(': if len(processed) > 0:", "apologies to speakers of Scandinavian languages for the last one;", "DFA instance and returns an NFA instance. \"\"\" nd_transition_function =", "any of the following conditions hold: 1. the alphabet contains", "the string contains symbols that aren't in the nfa's alphabet.", "input regex does not have properly matching parentheses. \"\"\" operator_to_operation", "by nfa1, and B be the language recognized by nfa2.", "are ordered pairs of states from dfa1 and dfa2. There", "the same same language as the NFA instance. WARNING: The", "regex string contains a character not in the alphabet, and", "= {} for state1, symbol in self.transition_function.keys(): state2 = self.transition_function[(state1,", "the range of the transition \" \"function is not a", "extra_symbols: for pair in product(nfa1.states, extra_symbols): new_tf[pair] = set() return", "{self.start_state} for symbol in self.alphabet: star_tf[(star_start, symbol)] = set() for", "method is exponential in the number of states of the", "operations is: `*`, then `•`, then `|`. This method uses", "error_state = _get_new_state(dfa1.states) new_states = dfa1.states | {error_state} for symbol", "'€' and 'Ø' cannot be symbols in the alphabet of", "implicitly define the nfa's state-set and alphabet; the first elements", "having different alphabets. \"\"\" union_alphabet = self.alphabet | other.alphabet def", "source of the problem. \"\"\" def __or__(self, other: \"DFA\") ->", "in the alphabet except for '(', '')', '|', '*', '•',", "which of these above conditions things triggered the exception, and", "AbstractSet, Container, FrozenSet, Iterable, List, Mapping, MutableMapping, Optional, Set, Tuple,", "strings.) The transition function' keys implicitly define the nfa's state-set", "the empty set, if it is. You can define epsilon-moves", "three keyword arguments: - `transition_function`: Mapping[Tuple[State, Symbol], AbstractSet[State]] - `start_state`:", "for r in range(len(s)+1) ) } state_sets = powerset(self.states) determinized_tf", "reduced_tf, self.body_states - {rip}, self.start_state, self.accept_state ) NfaTransitionFunction = Mapping[Tuple[State,", "alphabet(self) -> FrozenSet[Symbol]: return self._alphabet @property def accept_states(self) -> AbstractSet[State]:", "arguments: - `transition_function`: Mapping[Tuple[State, Symbol], AbstractSet[State]] - `start_state`: State -", "{ _stringify(item) for item in ( set(product(self.accept_states, other_states)) | set(product(self_states,", "\"DFA\": \"\"\"Returns a DFA that recognizes the same same language", "\"\"\" Let A be the language recognised by nfa. `nfa.self()`", "| {self.accept_state} ) def reduce(self) -> \"_GNFA\": \"\"\" Output a", "State, accept_state: State ): self.transition_function = transition_function self.body_states = body_states", "GnfaTransitionFunction, body_states: Set[State], start_state: State, accept_state: State ): self.transition_function =", "one-character strings) as input; returns an NFA that recognises the", "states of the NFA. Don't determinize big NFAs. \"\"\" #", "need to write '•'' explicitly if you don't want to.", "the transition \" \"function is not a set.\"), message_plural=(\"Values {}", "self._gnfize() while len(gnfa.states) > 2: gnfa = gnfa.reduce() return gnfa.transition_function[(gnfa.start_state,", "language recognised by dfa2. `dfa1 + dfa2` returns a DFA", "def _transition(self, state_set: AbstractSet[State], symbol: Symbol): return self._add_epsilons(self._get_successors(state_set=state_set, symbol=symbol)) def", "of the power set of states inferred from the transition", "elif char in OPERATORS: if operator_stack[-1] in PARENTHE or compare(char)", "for state1, symbol in self.transition_function.keys(): state2 = self.transition_function[(state1, symbol)] if", "will specify which of these above conditions things triggered the", "The set of DFA states has the cardinality of the", "other_tf[(state2, symbol)] ) union_start_state = self.start_state + other.start_state union_accept_states =", "- `accept_states`: AbstractSet[State] (Where States are strings, and Symbols are", "the cardinality of the state-set of nfa2 plus 1. There", "self.body_states | {self.start_state} | {self.accept_state} ) def reduce(self) -> \"_GNFA\":", "-- i.e., it is not the case that every pair", "\"NFA\", MutableNfaTF]: def prime(state: State): return state + '`' def", "accept_states = set() if empty == 'Ø' else {'q1'} return", "super()._well_defined() _good_alphabet(alphabet=self.alphabet, name=\"alphabet\") self._good_accept() self._good_domain(self.alphabet) def _good_accept(self) -> None: bad_accept_states", "it kind of looks like an epsilon); there's no other", "regular expressions. '•' (option-8 on a mac keyboard) means concatenation.", "the return a set of frozensets). def powerset(iterable: Iterable) ->", "_stringify( self._add_epsilons({self._start_state}) ) return DFA( transition_function=determinized_tf, start_state=determinized_start, accept_states=determinized_accept ) def", "): binary_operate() operator_stack.append(char) elif char == '(': operator_stack.append(char) else: while", "letter is so very close to the empty-set symbol. If,", "is not a member of the fsa's \" \"state set.\"),", "in product(state_pairs, union_alphabet): union_transition_function[(state1 + state2, symbol)] = ( self_tf[(state1,", "commutative. \"\"\" new_self, new_other, concat_tf = self._combine(other) for state in", "= Mapping[Tuple[State, Symbol], AbstractSet[State]] MutableNfaTF = MutableMapping[Tuple[State, Symbol], Set[State]] class", "< 0: raise ValueError( \"Right parenthesis occurs in regex withour", "+ dfa2` returns a DFA that recognises the set of", "in state_set] ) def _add_epsilons(self, state_set: AbstractSet[State]) -> FrozenSet[State]: epsilon_neighbours", "self.states | {gnfa_start}, self.states | {gnfa_accept} ): if (state1, state2)", "problem with the input NFAs having different alphabets. \"\"\" new_self,", "by that regular expression and that alphabet. The alphabet parameter", "Let A be the language recognised by dfa1, and B", "\"\"\" star_start = _get_new_state(self.states) star_tf = self.transition_function star_tf[(star_start, '')] =", "return { frozenset(item) for item in chain.from_iterable( combinations(s, r) for", "an epsilon); there's no other way to match, for instance,", "rip = self.body_states.pop() r2 = self.transition_function[(rip, rip)] reduced_tf = {}", "list(iterable) return { frozenset(item) for item in chain.from_iterable( combinations(s, r)", "_pre_process(regex, alphabet) for char in regex: if char in EMPTIES:", "new_other = NFA( transition_function=other_tf, start_state=other.start_state, accept_states=other.accept_states ) combination_tf = {}", "space. Actually, that's not quite right -- the default value", "the vertical bar, the star symbol, and the tilde, for", "} for symbol in new_self.alphabet | new_other.alphabet: union_tf[(union_start_state, symbol)] =", "regex2 if regex2 == '€': return regex1 if union_main_scope(regex1): regex1", "Regex] MutableGnfaTF = MutableMapping[Tuple[State, State], Regex] class _GNFA: def __init__(", "for state1 in self.states - {self.accept_state, rip}: r1 = self.transition_function[(state1,", "not totally obvious how to match the empty string in", "DFA and NFA public classes \"\"\" import collections.abc from itertools", "pair in product({'q1'}, alphabet) } accept_states = set() if empty", "the transition \" \"function are not in the fsa's state", "is not a set; 5. the range of the transition", "in OPERATORS and processed[-1] in {'|', '•'}: raise ValueError( \"Regex", "that recognises the language defined by that regular expression and", "\"\"\" new_self, new_other, union_tf = self._combine(other) union_start_state = _get_new_state(new_self.states |", "*self.transition_function.values() ) _error_message( bad_set=transition_range - self.states, message_singular=(\"State {} in the", "common punctuation and white space. Actually, that's not quite right", "B. The states of dfa1 | dfa2 are ordered pairs", "Note that this `+` operation is not commutative. \"\"\" new_self,", "automaton class. Takes three keyword arguments: - `transition_function`: Mapping[Tuple[State, Symbol],", "and the second elements are the symbols in the alphabet.", "_stringify(item) for item in ( set(product(self.accept_states, other_states)) | set(product(self_states, other.accept_states))", "{gnfa_start}, self.states | {gnfa_accept} ): if (state1, state2) not in", "tf: MutableNfaTF = { pair: set() for pair in product({'q1',", "fsa's state set.\"), message_plural=(\"States {} in the range of the", "characters, 3. the input regex contain a binary operator followed", "'•' (option-8 on a mac keyboard) means concatenation. You can", "\" \"function is not a set.\"), message_plural=(\"Values {} in the", "in string: current_states = self._transition(current_states, symbol) return not current_states &", ") if char not in alphabet | set(NOT_SYMBOLS): raise ValueError(", "not a member of the fsa's \" \"state set.\"), message_plural=(\"Accept", "parentheses, the vertical bar, the star symbol, and the tilde,", "is parasitic on the NFA operator; it converts the input", "of the above veboten characters, 3. the input regex contain", "= _pre_process(regex, alphabet) for char in regex: if char in", "transition_function=tf, start_state='q1', accept_states=accept_states ) def fit_symbol(symbol: Symbol) -> NFA: tf:", "--- i.e., the values of the transition function dictionary should", "a set; 5. the range of the transition function is", "combination_tf = {} combination_tf.update(new_self.transition_function) combination_tf.update(new_other.transition_function) return new_self, new_other, combination_tf def", "start_state self.accept_state = accept_state self.states = ( self.body_states | {self.start_state}", "function. The exception message will specify which of these six", "accept_state self.states = ( self.body_states | {self.start_state} | {self.accept_state} )", "start_state=union_start_state, accept_states=union_accept_states ) def __add__(self, other: \"NFA\") -> \"NFA\": \"\"\"", "nfa accepts input string. Will raise a ValueError exception is", "MutableNfaTF = { pair: set() for pair in product({'q1', 'q2'},", "elif char in alphabet: machine_stack.append(fit_symbol(char)) elif char == '*': machine_stack[-1]", "parse the regex and build the NFA. The method will", "set(printable) - {'(', ')', '|', '*'} ) ) -> \"NFA\":", "missing a case -- i.e., it is not the case", "= self.body_states.pop() r2 = self.transition_function[(rip, rip)] reduced_tf = {} for", "not one of the above veboten characters, 3. the input", "'•' if processed[-1] not in {'(', '|'} else '' )", "+ EMPTIES def _pre_process(regex: Regex, alphabet: AbstractSet[Symbol]) -> Regex: first_char", "A and b is an element of B. Note that", "-> NFA: copy_tf = {} for state, symbol in nfa.transition_function.keys():", "function is the power-set of the nfa's state set ---", "includes the standard ASCII letters and digits, and most common", "contains character '{char}' that is not in \" \"alphabet and", "state_set: AbstractSet[State], symbol: Symbol): return self._add_epsilons(self._get_successors(state_set=state_set, symbol=symbol)) def accepts(self, string:", "DFA's alphabet. \"\"\" _check_input(string=string, alphabet=self.alphabet) current_state = self.start_state for symbol", "range of the transition \" \"function is not in the", "4. the input regex does not have properly matching parentheses.", "an NFA instance. \"\"\" nd_transition_function = { key: {value} for", "for char in regex: if char in EMPTIES: machine_stack.append(fit_empty(char)) elif", "isinstance(states, collections.abc.Sequence): states = list(states) states.sort() return \"\".join(states) def _get_new_state(state_set:", "is an element of A and b is an element", "of operations is: `*`, then `•`, then `|`. This method", "no other way to match, for instance, {'', '0'} with", "states.sort() return \"\".join(states) def _get_new_state(state_set: Container) -> State: counter =", "cast ) from .base import ( _Base, _extract_states_alphabet, _error_message, _good_alphabet,", "for a relatively simple but, sadly, computationally expensive algorith. For", "that's not quite right -- the default value is string.printable", "'(': paren_count += 1 elif char == ')': paren_count -=", "powerset code an itertools recipe, from # https://docs.python.org/3/library/itertools.html#recipes # (minor", "order of operations is: `*`, then `•`, then `|`. This", "with strings in B. This DFA operator is parasitic on", "if any of the following are true: 1. the start", "close to the empty-set symbol. If, by some miracle, there", "Takes three keyword arguments: - `transition_function`: Mapping[Tuple[State, Symbol], State] -", "'Ø' return _GNFA(gnfa_tf, set(self.states), gnfa_start, gnfa_accept) def _good_range(self) -> None:", "+ regex2 def regex_union(regex1: Regex, regex2: Regex) -> Regex: if", "def _stringify(states: Iterable[State]) -> str: if not isinstance(states, collections.abc.Sequence): states", "All characters are intepreted as literals for symbols in the", "| set(product(self_states, other.accept_states)) ) } return DFA( transition_function=union_transition_function, start_state=union_start_state, accept_states=union_accept_states", "range of the transition \" \"function are not sets.\") )", "- {rip}, self.start_state, self.accept_state ) NfaTransitionFunction = Mapping[Tuple[State, Symbol], AbstractSet[State]]", "value; in fact, you are required to specify that the", "'•', '€' and 'Ø' cannot be symbols in the alphabet", "the second elements are the symbols in the alphabet. The", "-> bool: \"\"\" Determines whether nfa accepts input string. Will", "dfa1.states: new_tf[(state, symbol)] = error_state return new_states, new_tf self_states, self_tf", "the range of the transition \" \"function are not sets.\")", "self._combine(other) union_start_state = _get_new_state(new_self.states | new_other.states) union_tf[(union_start_state, '')] = {", "about this, I will change the symbol for empty-set.) In", "paren_count += 1 elif char == ')': paren_count -= 1", "\" \"function are not sets.\") ) transition_range: Set[Optional[AbstractSet[State]]] = set.union(", "= machine_stack.pop() left_operand = machine_stack.pop() machine = operator_to_operation[operator_stack.pop()]( left_operand, right_operand", "if it is. You can define epsilon-moves by using the", "first_char = regex[0] if first_char in OPERATORS: raise ValueError(f\"Regex cannot", "AbstractSet[State]] MutableNfaTF = MutableMapping[Tuple[State, Symbol], Set[State]] class NFA(_FSA): \"\"\" A", "dfa2: DFA ) -> Tuple[FrozenSet[State], DfaTransitionFunction]: new_tf = dfa1.transition_function new_states", "and returns an NFA instance. \"\"\" nd_transition_function = { key:", "Regex) -> Regex: if regex1 == \"Ø\": return regex2 if", "in the nfa's alphabet. \"\"\" _check_input(string=string, alphabet=self.alphabet) current_states = self._add_epsilons({self.start_state})", "if regex2 == '€': return regex1 if union_main_scope(regex1): regex1 =", "determinized_accept.add(determinzed_state) determinized_start = _stringify( self._add_epsilons({self._start_state}) ) return DFA( transition_function=determinized_tf, start_state=determinized_start,", "and alphabet. The class will raise a ValueError exception on", "r2 = self.transition_function[(rip, rip)] reduced_tf = {} for state1 in", "in gnfa_tf.keys(): gnfa_tf[(state1, state2)] += '|' + symbol else: gnfa_tf[(state1,", "the regex and build the NFA. The method will raise", "DFA operator is parasitic on the NFA operator; it converts", "function; * the range of the transition function is not", "in the fsa's state set.\") ) def accepts(self, string: str)", "\" \"parenthesis.\" ) return processed DfaTransitionFunction = Mapping[Tuple[State, Symbol], State]", "Symbol], AbstractSet[State]] MutableNfaTF = MutableMapping[Tuple[State, Symbol], Set[State]] class NFA(_FSA): \"\"\"", "accept_states=determinized_accept ) def star(self) -> \"NFA\": \"\"\" Let A be", "return False def regex_star(regex: Regex) -> Regex: if regex in", "dfa1, B be the language recognised by dfa2. `dfa1 +", "-> int: return ( OPERATORS.index(operator) - OPERATORS.index(operator_stack[-1]) ) regex =", "in product({'q1'}, alphabet) } accept_states = set() if empty ==", "if regex1 == \"Ø\": return regex2 if regex2 == \"Ø\":", "current_state in self.accept_states def encode(self) -> Regex: \"\"\" Let A", "and Symbols are one-char strings). The keys of the `transition_function`", "a subset of the set of states inferred from the", ") def fit_symbol(symbol: Symbol) -> NFA: tf: MutableNfaTF = {", "if char == '(': paren_count += 1 elif char ==", "how to match the empty string in normal python regex", "\" \"state set.\") ) def _good_range(self): raise NotImplementedError GnfaTransitionFunction =", "{prime(x) for x in nfa.accept_states} return NFA( transition_function=copy_tf, start_state=copy_start, accept_states=copy_accept", "NFA( transition_function=copy_tf, start_state=copy_start, accept_states=copy_accept ) overlap = self.states & other.states", "domain of the transition function is the power-set of the", "1. There is no problem with the input NFAs having", "elif char == '*': machine_stack[-1] = machine_stack[-1].star() elif char in", "from the transition function is not a one-character string; 4.", "if (state1, state2) not in gnfa_tf: gnfa_tf[(state1, state2)] = 'Ø'", "Let A be the language recognised by dfa1, B be", "from string import printable from typing import ( AbstractSet, Container,", "pair in product(nfa1.states, extra_symbols): new_tf[pair] = set() return new_tf return", "subset of the power set of states inferred from the", "len(operator_stack) > 1: binary_operate() return machine_stack.pop() OPERATORS = ['sentinel', '|',", "of states inferred from the transition function; * the range", "Set[State], start_state: State, accept_state: State ): self.transition_function = transition_function self.body_states", "transition function is not a one-character string; 4. a member", "by using the empty string in place of an alphabet", "from the transition function; 2. the set of accept states", "equivalent to `self` with one less state in it. \"\"\"", "\" \"state set.\"), message_plural=(\"Accept states {} are not members of", "else: while ( operator_stack[-1] not in PARENTHE and compare(char) <=", "2: gnfa = gnfa.reduce() return gnfa.transition_function[(gnfa.start_state, gnfa.accept_state)] def non_determinize(self) ->", "1. the alphabet contains any of the verboten characters --", "takes a DFA instance and returns an NFA instance. \"\"\"", "the standard python regular expresssions. All characters are intepreted as", "a member of the set of states inferred from the", "in the range of the transition \" \"function is not", "\"_GNFA\": \"\"\" Output a GNFA equivalent to `self` with one", "is the string contains symbols that aren't in the DFA's", "star_tf[(star_start, symbol)] = set() for state in self.accept_states: star_tf[(state, '')]", "be the language accepted by dfa. `dfa.encode()` returns a regex", "NFA: \"\"\" Convenience method that takes a DFA instance and", "b, where a is an element of A and b", "language defined by that regular expression and that alphabet. The", "| nfa2 is the cardinality of the state set of", "symbol: Symbol): return self._add_epsilons(self._get_successors(state_set=state_set, symbol=symbol)) def accepts(self, string: str) ->", "the DFA's alphabet. \"\"\" _check_input(string=string, alphabet=self.alphabet) current_state = self.start_state for", "elif char == '|': if paren_count == 0: return True", "if char not in alphabet | set(NOT_SYMBOLS): raise ValueError( f\"Regex", "\"\"\" A nondeterministic finite automaton class. Takes three keyword arguments:", "product( self.states | {gnfa_start}, self.states | {gnfa_accept} ): if (state1,", "{ '|': NFA.__or__, '•': NFA.__add__ } _error_message( bad_set=set(NOT_SYMBOLS) & alphabet,", "element of A and b is an element of B.", "in nfa.transition_function.keys(): copy_tf[(prime(state), symbol)] = { prime(x) for x in", "bad_set=set(NOT_SYMBOLS) & alphabet, message_singular=\"Alphabet cannot contain character {}.\", message_plural=\"Alphabet cannot", "returns an nfa that recognizes A union B. The cardinality", "Tuple[\"NFA\", \"NFA\", MutableNfaTF]: def prime(state: State): return state + '`'", "by an operator, or 4. the input regex does not", "self_tf = maybe_add_state(self, other) other_states, other_tf = maybe_add_state(other, self) state_pairs", "reduce(self) -> \"_GNFA\": \"\"\" Output a GNFA equivalent to `self`", "the language recognised by dfa1, and B be the language", "for x in nfa.transition_function[(state, symbol)] } copy_start = prime(nfa.start_state) copy_accept", "frozensets). The empty set is a valid value; in fact,", "of all concatenations of strings in A with strings in", "triggered the exception, and which states/symbols are the source of", "a DFA. That makes for a relatively simple but, sadly,", "tilde, for reasons that I will explain presently. As of", "'0'} with the current syntax. (Quick challenge: it's not totally", "expensive algorith. For that reason, I recommend you don't `+`", "alphabet | set(NOT_SYMBOLS): raise ValueError( f\"Regex contains character '{char}' that", "symbol=symbol)) def accepts(self, string: str) -> bool: \"\"\" Determines whether", "Regex: if regex1 == \"Ø\": return regex2 if regex2 ==", "DFA states has the cardinality of the power-set of the", "arguments: - `transition_function`: Mapping[Tuple[State, Symbol], State] - `start_state`: State -", "bool: paren_count = 0 for char in regex: if char", "which states/symbols are the source of the problem. \"\"\" def", "alphabet inferred from the transition function is not a one-character", "finite automaton class. Takes three keyword arguments: - `transition_function`: Mapping[Tuple[State,", "be the language recognised by nfa. `nfa.self()` returns an nfa", "return NFA( transition_function=star_tf, start_state=star_start, accept_states=star_accepts ) @staticmethod def fit( regex:", "binary_operate() return machine_stack.pop() OPERATORS = ['sentinel', '|', '•', '*'] PARENTHE", "dfa. `dfa.encode()` returns a regex string that generates A. That", "} tf[('q1', symbol)] = {'q2'} return NFA( transition_function=tf, start_state='q1', accept_states={'q2'}", "= self._get_successors( state_set=epsilon_neighbours, symbol='' ) return frozenset(state_set) def _transition(self, state_set:", "set.\"), message_plural=(\"Accept states {} are not members of the fsa's", "Symbol): return self._add_epsilons(self._get_successors(state_set=state_set, symbol=symbol)) def accepts(self, string: str) -> bool:", "right -- the default value is string.printable *minus* parentheses, the", "members of A. \"\"\" star_start = _get_new_state(self.states) star_tf = self.transition_function", "error_state for symbol in extra_symbols: for state in dfa1.states: new_tf[(state,", "def _well_defined(self) -> None: super()._well_defined() _good_alphabet(alphabet=self.alphabet, name=\"alphabet\") self._good_accept() self._good_domain(self.alphabet) def", "set of NFA states. For related reasons, the time complexity", "A. \"\"\" star_start = _get_new_state(self.states) star_tf = self.transition_function star_tf[(star_start, '')]", "char == '(': operator_stack.append(char) else: while operator_stack[-1] != '(': binary_operate()", "regex2 if regex2 == \"Ø\": return regex1 return f\"{regex1}|{regex2}\" rip", "character not in the alphabet, and not one of the", "len(gnfa.states) > 2: gnfa = gnfa.reduce() return gnfa.transition_function[(gnfa.start_state, gnfa.accept_state)] def", "copy_start = prime(nfa.start_state) copy_accept = {prime(x) for x in nfa.accept_states}", "_get_successors( self, *, state_set: AbstractSet[State], symbol: Symbol ) -> FrozenSet[State]:", "Determines whether nfa accepts input string. Will raise a ValueError", "the set of states inferred from the transition function; 3.", "tuples represent the nfa's states, and the second elements are", "current syntax. (Quick challenge: it's not totally obvious how to", "- self.states _error_message( bad_set=bad_accept_states, message_singular=(\"Accept state {} is not a", "accept_states={'q2'} ) machine_stack: List[NFA] = [] operator_stack = ['sentinel'] def", "| dfa2 are ordered pairs of states from dfa1 and", "The transition function' keys implicitly define the nfa's state-set and", "the time complexity of this method is exponential in the", "expression and an alphabet (i.e., a set of one-character strings)", "am very against English chauvinism, but your letter is so", "alphabet or char == '(': if len(processed) > 0: processed", "be sets (or frozensets). The empty set is a valid", "usual; no need to write '•'' explicitly if you don't", "of the transition \" \"function is not a set.\"), message_plural=(\"Values", "parasitic on the NFA operator; it converts the input DFAs", "element of B. Note that this `+` operation is not", "method takes as input is very simple -- much simpler", "non_determinize(self) -> NFA: \"\"\" Convenience method that takes a DFA", "- `accept_state`: AbstractSet[State] (where States are strings and Symbols are", "B be the language recognized by nfa2. `nfa1 + nfa2`", "'')].add(new_other.start_state) else: concat_tf[(state, '')] = {new_other.start_state} return NFA( transition_function=concat_tf, start_state=new_self.start_state,", "are true: 1. the start state is not a member", "= ( self_tf[(state1, symbol)] + other_tf[(state2, symbol)] ) union_start_state =", "def encode(self) -> Regex: \"\"\" Let A be the language", "for state1, state2 in product( self.states | {gnfa_start}, self.states |", "char == '(': paren_count += 1 elif char == ')':", "self.body_states = body_states self.start_state = start_state self.accept_state = accept_state self.states", "transition function is missing cases -- i.e., it is not", "if len(regex) == 1: return regex + '*' return f\"({regex})*\"", "== '*': machine_stack[-1] = machine_stack[-1].star() elif char in OPERATORS: if", "symbol)] = set() union_accept_states = new_self.accept_states | new_other.accept_states return NFA(", "state and a symbol is in the domain of the", "_good_range(self) -> None: bad_range = { x for x in", "return not current_states & self.accept_states == set() def determinize(self) ->", "'Ø'. The parentheses, vertical bar and star mean what you'd", "be the language recognized by dfa2. `dfa1 | dfa2` returns", "cannot contain characters {}.\" ) def fit_empty(empty: Regex) -> NFA:", "State], Regex] class _GNFA: def __init__( self, transition_function: GnfaTransitionFunction, body_states:", "shunting yard algorithm to parse the regex and build the", "combination_tf.update(new_other.transition_function) return new_self, new_other, combination_tf def _good_range(self) -> None: bad_range", "pair: set() for pair in product({'q1'}, alphabet) } accept_states =", "cases -- i.e., it is not the case that every", "than the standard python regular expresssions. All characters are intepreted", "in concat_tf: concat_tf[(state, '')].add(new_other.start_state) else: concat_tf[(state, '')] = {new_other.start_state} return", "other.accept_states)) ) } return DFA( transition_function=union_transition_function, start_state=union_start_state, accept_states=union_accept_states ) def", "cardinality of the state-set of nfa2 plus 1. There is", "dfa's state-set and alphabet. The class will raise a ValueError", "are required to specify that the successor set for a", "your letter is so very close to the empty-set symbol.", "epsilon_neighbours = self._get_successors( state_set=epsilon_neighbours, symbol='' ) return frozenset(state_set) def _transition(self,", "a dfa that recognizes A union B. The states of", "regex1 = f'({regex1})' if union_main_scope(regex2): regex2 = f'({regex2})' return regex1", "transition function; * a member of the alphabet inferred from", "every pair of a state and a symbol is in", "ValueError(f\"Regex cannot start with '{first_char}'.\") processed = '' paren_count =", "= dfa1.states | {error_state} for symbol in union_alphabet: new_tf[(error_state, symbol)]", "> 1: binary_operate() return machine_stack.pop() OPERATORS = ['sentinel', '|', '•',", "strings). The keys of the `transition_function` implicitly define the dfa's", "State ): self.transition_function = transition_function self.body_states = body_states self.start_state =", "of nfa1 plus the cardinality of the state-set of nfa2", "# powerset code an itertools recipe, from # https://docs.python.org/3/library/itertools.html#recipes #", "set of strings of the form a concat b, where", ") def _combine(self, other: \"NFA\") -> Tuple[\"NFA\", \"NFA\", MutableNfaTF]: def", "the empty string in place of an alphabet symbol in", "symbol)] return current_state in self.accept_states def encode(self) -> Regex: \"\"\"", "AbstractSet[State], symbol: Symbol): return self._add_epsilons(self._get_successors(state_set=state_set, symbol=symbol)) def accepts(self, string: str)", "a is an element of A and b is an", "| new_other.states) union_tf[(union_start_state, '')] = { new_self.start_state, new_other.start_state } for", "(state1, state2) in gnfa_tf.keys(): gnfa_tf[(state1, state2)] += '|' + symbol", "standard ASCII letters and digits, and most common punctuation and", "not a one-character string; 4. a member of the transition", "Takes three keyword arguments: - `transition_function`: Mapping[Tuple[State, Symbol], AbstractSet[State]] -", "'|': NFA.__or__, '•': NFA.__add__ } _error_message( bad_set=set(NOT_SYMBOLS) & alphabet, message_singular=\"Alphabet", "-> NFA: tf: NfaTransitionFunction = { pair: set() for pair", "__init__( self, *, transition_function: FsaTransitionFunction, start_state: State, accept_states: AbstractSet[State] ):", "A concat B -- i.e., the language consisting of the", "return self._transition_function.get((state, sym), frozenset()) empty: FrozenSet[State] = frozenset() # This", "raise a ValueError exception if any of the following conditions", "to specify that the successor set for a given state-symbol", "vertical bar, the star symbol, and the tilde, for reasons", "gnfa_tf: MutableGnfaTF = {} for state1, symbol in self.transition_function.keys(): state2", "and `Ø`, 2. the input regex string contains a character", "counter += 1 new_state = \"new_state\" + str(counter) return new_state", "= dfa1.states extra_symbols = dfa2.alphabet - dfa1.alphabet if extra_symbols: error_state", "= _stringify( self._transition(state_set, symbol) ) if set(state_set) & self.accept_states: determinized_accept.add(determinzed_state)", "more complicated than necessary; maybe I'll figure out how to", "exception on instantiation if any of the following are true:", "regular expresssions. All characters are intepreted as literals for symbols", "with the input NFAs having different alphabets. \"\"\" new_self, new_other,", "strings in A with strings in B. This DFA operator", "class NFA(_FSA): \"\"\" A nondeterministic finite automaton class. Takes three", "be the language recognized by nfa2. `nfa1 + nfa2` returns", "DFA( transition_function=determinized_tf, start_state=determinized_start, accept_states=determinized_accept ) def star(self) -> \"NFA\": \"\"\"", "FsaTransitionFunction = Mapping[ Tuple[State, Symbol], Union[State, AbstractSet[State]] ] class _FSA(_Base):", "the language recognized by dfa2. `dfa1 | dfa2` returns a", "set.union( *self.transition_function.values() ) _error_message( bad_set=transition_range - self.states, message_singular=(\"State {} in", "set() if empty == 'Ø' else {'q1'} return NFA( transition_function=tf,", "} accept_states = set() if empty == 'Ø' else {'q1'}", "transition function; * the set of accept states is not", "for symbol in string: current_state = self.transition_function[(current_state, symbol)] return current_state", "NfaTransitionFunction, self._transition_function ) return self._transition_function.get((state, sym), frozenset()) empty: FrozenSet[State] =", "self.accept_state = accept_state self.states = ( self.body_states | {self.start_state} |", "| epsilon_neighbours epsilon_neighbours = self._get_successors( state_set=epsilon_neighbours, symbol='' ) return frozenset(state_set)", "extra_symbols = nfa2.alphabet - nfa1.alphabet if extra_symbols: for pair in", "have properly matching parentheses. \"\"\" operator_to_operation = { '|': NFA.__or__,", "`True` if my_dfa accepts \"some string\", and `False` otherwise. Will", "== '(': paren_count += 1 if char == ')': paren_count", "-> _GNFA: gnfa_tf: MutableGnfaTF = {} for state1, symbol in", "the above, the characters '(', ')', '|', '*', '•', '€'", "= body_states self.start_state = start_state self.accept_state = accept_state self.states =", "= list(iterable) return { frozenset(item) for item in chain.from_iterable( combinations(s,", "one-char strings). The keys of the `transition_function` implicitly define the", "state_sets = powerset(self.states) determinized_tf = {} determinized_accept = set() for", "ValueError exception on instantiation if any of th following are", "is string.printable -- i.e., the set of \"printable\" characters, which", "'')] = { new_self.start_state, new_other.start_state } for symbol in new_self.alphabet", "compare(char) <= 0 ): binary_operate() operator_stack.append(char) elif char == '(':", "self._alphabet @property def accept_states(self) -> AbstractSet[State]: return self._accept_states def _well_defined(self)", "| new_other.alphabet: union_tf[(union_start_state, symbol)] = set() union_accept_states = new_self.accept_states |", "of the power-set of the set of NFA states. For", "'new_state1' while new_state in state_set: counter += 1 new_state =", "Union[State, AbstractSet[State]] ] class _FSA(_Base): def __init__( self, *, transition_function:", "{} for state1 in self.states - {self.accept_state, rip}: r1 =", "not cool.\" ) if char == '(': paren_count += 1", "operator_stack.append(char) else: while operator_stack[-1] != '(': binary_operate() operator_stack.pop() while len(operator_stack)", "and B be the language recognized by nfa2. `nfa1 |", "alphabet except for '(', '')', '|', '*', '•', '€' and", "vertical bar and star mean what you'd expect them to", "0 for char in regex: if char in alphabet or", "return f\"({regex})*\" def regex_concat(regex1: Regex, regex2: Regex) -> Regex: if", "empty language with it. For reaons related to the above,", "Symbol], Set[State]] class NFA(_FSA): \"\"\" A nondeterministic finite automaton class.", "transition_function=other_tf, start_state=other.start_state, accept_states=other.accept_states ) combination_tf = {} combination_tf.update(new_self.transition_function) combination_tf.update(new_other.transition_function) return", "states inferred from the transition function; * the range of", "rip}: r3 = self.transition_function[(rip, state2)] r4 = self.transition_function[(state1, state2)] new_regex", "r4 ) reduced_tf[(state1, state2)] = new_regex return _GNFA( reduced_tf, self.body_states", "= _stringify(state_set) determinized_tf[(determinzed_state, symbol)] = _stringify( self._transition(state_set, symbol) ) if", "accept_states=star_accepts ) @staticmethod def fit( regex: Regex, alphabet: AbstractSet[Symbol] =", "accept_states=other.accept_states ) combination_tf = {} combination_tf.update(new_self.transition_function) combination_tf.update(new_other.transition_function) return new_self, new_other,", "state in dfa1.states: new_tf[(state, symbol)] = error_state return new_states, new_tf", "and NFA public classes \"\"\" import collections.abc from itertools import", "the following conditions hold: 1. the alphabet contains any of", "no problem with the input DFAs having different alphabets. \"\"\"", "transition_function=determinized_tf, start_state=determinized_start, accept_states=determinized_accept ) def star(self) -> \"NFA\": \"\"\" Let", "NfaTransitionFunction: new_tf = nfa1.transition_function extra_symbols = nfa2.alphabet - nfa1.alphabet if", "OPERATORS.index(operator) - OPERATORS.index(operator_stack[-1]) ) regex = _pre_process(regex, alphabet) for char", "in \" \"alphabet and not an accepted regex character.\" )", "recognized by nfa2. `nfa1 | nfa2` returns an nfa that", "state_set=state_set, symbol='' ) while epsilon_neighbours - state_set: state_set = state_set", "return _GNFA(gnfa_tf, set(self.states), gnfa_start, gnfa_accept) def _good_range(self) -> None: transition_range", "of strings in A with strings in B. This DFA", "not in {'(', '|'} else '' ) if char not", "= NFA( transition_function=self_tf, start_state=self.start_state, accept_states=self.accept_states ) new_other = NFA( transition_function=other_tf,", "state_set: AbstractSet[State]) -> FrozenSet[State]: epsilon_neighbours = self._get_successors( state_set=state_set, symbol='' )", "recognises the language defined by that regular expression and that", "except for '(', '')', '|', '*', '•', '€' and 'Ø'.", "empty: FrozenSet[State] = frozenset() # This avoids a mypy bug.", "obvious how to match the empty string in normal python", "inferred from the transition function; * a member of the", "* the transition function is missing a case -- i.e.,", "self.states = ( self.body_states | {self.start_state} | {self.accept_state} ) def", "None: bad_range = { x for x in self.transition_function.values() if", "Symbol], State] - `start_state`: State - `accept_state`: AbstractSet[State] (where States", "recognizes A* -- i.e., the set of all strings formed", ") while epsilon_neighbours - state_set: state_set = state_set | epsilon_neighbours", "= operator_to_operation[operator_stack.pop()]( left_operand, right_operand ) machine_stack.append(machine) def compare(operator: Regex) ->", "not isinstance(x, collections.abc.Set) } _error_message( bad_set=bad_range, message_singular=(\"Value {} in the", "r) for r in range(len(s)+1) ) } state_sets = powerset(self.states)", "states/symbols are the source of the problem. \"\"\" def __or__(self,", "I will change the symbol for empty-set.) In the absence", "if paren_count > 0: raise ValueError( \"Left parenthesis occurs in", "_pre_process(regex: Regex, alphabet: AbstractSet[Symbol]) -> Regex: first_char = regex[0] if", "add_one_way(nfa2, nfa1) self_tf, other_tf = add_empty_transitions(self, other) new_self = NFA(", "You can define epsilon-moves by using the empty string in", "be the language recognised by nfa1, and B be the", "checks below will work as you would expect). The class", "of strings of the form a concat b, where a", "plus 1. There is no problem with the input NFAs", "keys implicitly define the nfa's state-set and alphabet; the first", "Regex) -> Regex: if regex1 == 'Ø' or regex2 ==", "of the problem. \"\"\" def __or__(self, other: \"DFA\") -> \"DFA\":", "the NFA operator; it converts the input DFAs into NFAs,", "key: {value} for key, value in self.transition_function.items() } return NFA(", "gnfa_tf[(state1, state2)] = symbol gnfa_start = _get_new_state(self.states) gnfa_accept = _get_new_state(self.states", "_good_alphabet(alphabet=self.alphabet, name=\"alphabet\") self._good_accept() self._good_domain(self.alphabet) def _good_accept(self) -> None: bad_accept_states =", "self.transition_function star_tf[(star_start, '')] = {self.start_state} for symbol in self.alphabet: star_tf[(star_start,", "used internally. '€' (option-shift-2) is used to match the empty", "the current syntax. (Quick challenge: it's not totally obvious how", "& alphabet, message_singular=\"Alphabet cannot contain character {}.\", message_plural=\"Alphabet cannot contain", "machine_stack.pop() machine = operator_to_operation[operator_stack.pop()]( left_operand, right_operand ) machine_stack.append(machine) def compare(operator:", "raise a ValueError exception on instantiation if any of th", "= self.start_state + other.start_state union_accept_states = { _stringify(item) for item", "_good_alphabet, _check_input ) State = str Symbol = str Regex", "while operator_stack[-1] != '(': binary_operate() operator_stack.pop() while len(operator_stack) > 1:", "'Ø' or regex2 == 'Ø': return 'Ø' if regex1 ==", "member of the alphabet (and hence the checks below will", "as you would expect). The class will raise a ValueError", "the last one; I am very against English chauvinism, but", "the empty language with it. For reaons related to the", "compare(char) > 0: operator_stack.append(char) else: while ( operator_stack[-1] not in", "body_states: Set[State], start_state: State, accept_state: State ): self.transition_function = transition_function", "'(': if len(processed) > 0: processed += ( '•' if", "rip}: r1 = self.transition_function[(state1, rip)] for state2 in self.states -", "_error_message( bad_set=bad_accept_states, message_singular=(\"Accept state {} is not a member of", "contain characters {}.\" ) def fit_empty(empty: Regex) -> NFA: tf:", "state in state_set] ) def _add_epsilons(self, state_set: AbstractSet[State]) -> FrozenSet[State]:", "DFA(_FSA): \"\"\" A deterministic finite automaton class. Takes three keyword", "\"some string\", and `False` otherwise. Will raise a ValueError exception", "string: current_states = self._transition(current_states, symbol) return not current_states & self.accept_states", "nfa's states, and the second elements are the symbols in", "accept_states=self.accept_states ) new_other = NFA( transition_function=other_tf, start_state=other.start_state, accept_states=other.accept_states ) combination_tf", "_good_accept(self) -> None: bad_accept_states = self.accept_states - self.states _error_message( bad_set=bad_accept_states,", "Iterable, List, Mapping, MutableMapping, Optional, Set, Tuple, Union, cast )", "symbols in the alphabet. The domain of the transition function", "which of these six conditions things triggered the exception, and", "not in PARENTHE and compare(char) <= 0 ): binary_operate() operator_stack.append(char)", "product(state_sets, self._alphabet): determinzed_state = _stringify(state_set) determinized_tf[(determinzed_state, symbol)] = _stringify( self._transition(state_set,", "`*`, then `•`, then `|`. This method uses a version", "str Regex = str FsaTransitionFunction = Mapping[ Tuple[State, Symbol], Union[State,", "`+` operation is not commutative. \"\"\" new_self, new_other, concat_tf =", "'Ø' (option-shift-o) represents the empty set; you can match to", "operator_stack[-1] != '(': binary_operate() operator_stack.pop() while len(operator_stack) > 1: binary_operate()", "`accept_state`: AbstractSet[State] (where States are strings and Symbols are one-char", "extra_symbols): new_tf[pair] = set() return new_tf return add_one_way(nfa1, nfa2), add_one_way(nfa2,", "contains symbols that aren't in the nfa's alphabet. \"\"\" _check_input(string=string,", "totally obvious how to match the empty string in normal", "of the alphabet (and hence the checks below will work", "alphabet) for char in regex: if char in EMPTIES: machine_stack.append(fit_empty(char))", "can be done; give it a go.) 'Ø' (option-shift-o) represents", "state2), symbol in product(state_pairs, union_alphabet): union_transition_function[(state1 + state2, symbol)] =", "the fsa's state set.\") ) def accepts(self, string: str) ->", "transition_range = set(self.transition_function.values()) bad_range = transition_range - self.states _error_message( bad_set=bad_range,", "values of the transition function dictionary should be sets (or", "withour matching \" \"left parenthesis.\" ) processed += char if", "new_regex return _GNFA( reduced_tf, self.body_states - {rip}, self.start_state, self.accept_state )", "above conditions things triggered the exception, and which states/symbols are", "quite right -- the default value is string.printable *minus* parentheses,", "`my_dfa.accepts(\"some string\")` returns `True` if my_dfa accepts \"some string\", and", "give it a go.) 'Ø' (option-shift-o) represents the empty set;", "\"\"\" def __or__(self, other: \"NFA\") -> \"NFA\": \"\"\" Let A", "transition function is missing a case -- i.e., it is", "= self.states & other.states while overlap: other = copy(other) overlap", "extra_symbols = dfa2.alphabet - dfa1.alphabet if extra_symbols: error_state = _get_new_state(dfa1.states)", "NFA( transition_function=concat_tf, start_state=new_self.start_state, accept_states=new_other.accept_states ) def _combine(self, other: \"NFA\") ->", "self.accept_states == set() def determinize(self) -> \"DFA\": \"\"\"Returns a DFA", "these above conditions things triggered the exception, and which states/symbols", "= {self.start_state} star_accepts = self.accept_states | {star_start} return NFA( transition_function=star_tf,", "= self._get_successors( state_set=state_set, symbol='' ) while epsilon_neighbours - state_set: state_set", "presently. As of now, the syntax of the regular expressions", "symbol='' ) while epsilon_neighbours - state_set: state_set = state_set |", "alphabet. The alphabet parameter is optional; it's default value is", "self._add_epsilons({self._start_state}) ) return DFA( transition_function=determinized_tf, start_state=determinized_start, accept_states=determinized_accept ) def star(self)", "char in alphabet or char == '(': if len(processed) >", "_get_new_state(new_self.states | new_other.states) union_tf[(union_start_state, '')] = { new_self.start_state, new_other.start_state }", "= self.transition_function[(current_state, symbol)] return current_state in self.accept_states def encode(self) ->", "of states inferred from the transition function; 2. the set", "problem. \"\"\" def __or__(self, other: \"NFA\") -> \"NFA\": \"\"\" Let", "star_tf = self.transition_function star_tf[(star_start, '')] = {self.start_state} for symbol in", "product({'q1'}, alphabet) } accept_states = set() if empty == 'Ø'", "state-symbol pair is the empty set, if it is. You", "and star mean what you'd expect them to mean if", "-> str: if not isinstance(states, collections.abc.Sequence): states = list(states) states.sort()", "recognised by dfa2. `dfa1 + dfa2` returns a DFA that", "nfa1, and B be the language recognized by nfa2. `nfa1", "a one-character string; 4. a member of the transition function's", "def _get_new_state(state_set: Container) -> State: counter = 1 new_state =", "from the transition function is not a one-character string; *", "against English chauvinism, but your letter is so very close", "or char == '(': if len(processed) > 0: processed +=", "in product({'q1', 'q2'}, alphabet) } tf[('q1', symbol)] = {'q2'} return", "state2) not in gnfa_tf: gnfa_tf[(state1, state2)] = 'Ø' return _GNFA(gnfa_tf,", "any number of members of A. \"\"\" star_start = _get_new_state(self.states)", "union_tf[(union_start_state, '')] = { new_self.start_state, new_other.start_state } for symbol in", "collections.abc.Set) } _error_message( bad_set=bad_range, message_singular=(\"Value {} in the range of", "it. For reaons related to the above, the characters '(',", "a subset of the power set of states inferred from", "'|', '*', '•', '€' and 'Ø' cannot be symbols in", "in gnfa_tf: gnfa_tf[(state1, state2)] = 'Ø' return _GNFA(gnfa_tf, set(self.states), gnfa_start,", "empty string in place of an alphabet symbol in the", "= self.transition_function[(rip, rip)] reduced_tf = {} for state1 in self.states", "- `start_state`: State - `accept_state`: AbstractSet[State] (where States are strings", "method that takes a DFA instance and returns an NFA", "string (because it kind of looks like an epsilon); there's", "star_tf[(state, '')] = {self.start_state} star_accepts = self.accept_states | {star_start} return", "I recommend you don't `+` dfas with large numbers of", "Regex: \"\"\" Let A be the language accepted by dfa.", "regex without matching right \" \"parenthesis.\" ) return processed DfaTransitionFunction", "in PARENTHE and compare(char) <= 0 ): binary_operate() operator_stack.append(char) elif", "-> None: super()._well_defined() _good_alphabet(alphabet=self.alphabet, name=\"alphabet\") self._good_accept() self._good_domain(self.alphabet) def _good_accept(self) ->", "the first elements of the tuples represent the nfa's states,", "for state, symbol in nfa.transition_function.keys(): copy_tf[(prime(state), symbol)] = { prime(x)", "deterministic finite automaton class. Takes three keyword arguments: - `transition_function`:", "in chain.from_iterable( combinations(s, r) for r in range(len(s)+1) ) }", "recognizes A union B. The cardinality of the state-set of", "by nfa2. `nfa1 | nfa2` returns an nfa that recognizes", "alphabets. \"\"\" union_alphabet = self.alphabet | other.alphabet def maybe_add_state( dfa1:", "and b is an element of B. Note that this", "states inferred from the transition function; * a member of", "== '(': paren_count += 1 elif char == ')': paren_count", "on the NFA operator; it converts the input DFAs into", "else: gnfa_tf[(state1, state2)] = symbol gnfa_start = _get_new_state(self.states) gnfa_accept =", "== \"Ø\": return regex2 if regex2 == \"Ø\": return regex1", "class will raise a ValueError exception on instantiation if any", "= {} combination_tf.update(new_self.transition_function) combination_tf.update(new_other.transition_function) return new_self, new_other, combination_tf def _good_range(self)", "from # https://docs.python.org/3/library/itertools.html#recipes # (minor modification to make the return", "all concatenations of strings in A with strings in B.", "'') in concat_tf: concat_tf[(state, '')].add(new_other.start_state) else: concat_tf[(state, '')] = {new_other.start_state}", "= str FsaTransitionFunction = Mapping[ Tuple[State, Symbol], Union[State, AbstractSet[State]] ]", "symbol. If, by some miracle, there is someone who cares", "matching \" \"left parenthesis.\" ) processed += char if paren_count", "return DFA( transition_function=union_transition_function, start_state=union_start_state, accept_states=union_accept_states ) def __add__(self, other: \"DFA\")", "simplicity, eventually. \"\"\" gnfa = self._gnfize() while len(gnfa.states) > 2:", "self._transition_function = cast( NfaTransitionFunction, self._transition_function ) return self._transition_function.get((state, sym), frozenset())", "string that generates A. That regex string is liable to", "aren't in the DFA's alphabet. \"\"\" _check_input(string=string, alphabet=self.alphabet) current_state =", "accept_states=accept_states ) def fit_symbol(symbol: Symbol) -> NFA: tf: MutableNfaTF =", "range is not a set; 5. the range of the", "is usual; no need to write '•'' explicitly if you", "plus the cardinality of the state-set of nfa2 plus 1.", "of the transition function. The exception message will specify which", "by dfa2. `dfa1 | dfa2` returns a dfa that recognizes", "NFAs, uses the NFA '+', then converts the result back", "machine_stack[-1].star() elif char in OPERATORS: if operator_stack[-1] in PARENTHE or", "I'll figure out how to improve on average simplicity, eventually.", "'*', '•', '€' and 'Ø' cannot be symbols in the", "epsilon-moves by using the empty string in place of an", "new_state = 'new_state1' while new_state in state_set: counter += 1", "and an alphabet (i.e., a set of one-character strings) as", "in the alphabet, and not one of the above veboten", "GnfaTransitionFunction = Mapping[Tuple[State, State], Regex] MutableGnfaTF = MutableMapping[Tuple[State, State], Regex]", "can define epsilon-moves by using the empty string in place", "transition function's range is not a set; 5. the range", "in self.transition_function.items() } return NFA( transition_function=nd_transition_function, start_state=self.start_state, accept_states=self.accept_states ) def", "big NFAs. \"\"\" # powerset code an itertools recipe, from", "the nfa's state-set and alphabet; the first elements of the", "in regex: if char in EMPTIES: machine_stack.append(fit_empty(char)) elif char in", "str FsaTransitionFunction = Mapping[ Tuple[State, Symbol], Union[State, AbstractSet[State]] ] class", "{'|', '•'}: raise ValueError( \"Regex contains binary operator followed by", "define the dfa's state-set and alphabet. The class will raise", "= cast( NfaTransitionFunction, self._transition_function ) return self._transition_function.get((state, sym), frozenset()) empty:", "version of Dijkstra's shunting yard algorithm to parse the regex", "the transition \" \"function is not in the fsa's state", "gnfa_accept = _get_new_state(self.states | {gnfa_start}) gnfa_tf[(gnfa_start, self.start_state)] = '€' for", "i.e.,`(` , `)`, `|`, `*`, `•`, `€` and `Ø`, 2.", "len(regex) == 1: return regex + '*' return f\"({regex})*\" def", "are not sets.\") ) transition_range: Set[Optional[AbstractSet[State]]] = set.union( *self.transition_function.values() )", "')'] EMPTIES = ['€', 'Ø'] NOT_SYMBOLS = OPERATORS + PARENTHE", "as the NFA instance. WARNING: The set of DFA states", "string: str) -> bool: \"\"\" `my_dfa.accepts(\"some string\")` returns `True` if", "nfa1 plus the cardinality of the state-set of nfa2 plus", "symbol: Symbol ) -> FrozenSet[State]: def get_successor(state: State, sym: Symbol)", "= { key: {value} for key, value in self.transition_function.items() }" ]
[ "self.assertEqual(arabic_to_roman(4), \"IV\") self.assertEqual(arabic_to_roman(12), \"XII\") self.assertEqual(arabic_to_roman(20), \"XX\") if __name__ == \"__main__\":", "Generator/tests.py #!/usr/bin/env python3 import unittest from roman_number_generator import arabic_to_roman class", "Number Generator/tests.py #!/usr/bin/env python3 import unittest from roman_number_generator import arabic_to_roman", "4000) self.assertEqual(arabic_to_roman(4), \"IV\") self.assertEqual(arabic_to_roman(12), \"XII\") self.assertEqual(arabic_to_roman(20), \"XX\") if __name__ ==", "unittest from roman_number_generator import arabic_to_roman class Test(unittest.TestCase): def _start_arabic_to_roman(self): self.assertRaises(ValueError,", "#!/usr/bin/env python3 import unittest from roman_number_generator import arabic_to_roman class Test(unittest.TestCase):", "class Test(unittest.TestCase): def _start_arabic_to_roman(self): self.assertRaises(ValueError, arabic_to_roman, 4000) self.assertEqual(arabic_to_roman(4), \"IV\") self.assertEqual(arabic_to_roman(12),", "def _start_arabic_to_roman(self): self.assertRaises(ValueError, arabic_to_roman, 4000) self.assertEqual(arabic_to_roman(4), \"IV\") self.assertEqual(arabic_to_roman(12), \"XII\") self.assertEqual(arabic_to_roman(20),", "roman_number_generator import arabic_to_roman class Test(unittest.TestCase): def _start_arabic_to_roman(self): self.assertRaises(ValueError, arabic_to_roman, 4000)", "self.assertRaises(ValueError, arabic_to_roman, 4000) self.assertEqual(arabic_to_roman(4), \"IV\") self.assertEqual(arabic_to_roman(12), \"XII\") self.assertEqual(arabic_to_roman(20), \"XX\") if", "\"IV\") self.assertEqual(arabic_to_roman(12), \"XII\") self.assertEqual(arabic_to_roman(20), \"XX\") if __name__ == \"__main__\": unittest.main()", "_start_arabic_to_roman(self): self.assertRaises(ValueError, arabic_to_roman, 4000) self.assertEqual(arabic_to_roman(4), \"IV\") self.assertEqual(arabic_to_roman(12), \"XII\") self.assertEqual(arabic_to_roman(20), \"XX\")", "import unittest from roman_number_generator import arabic_to_roman class Test(unittest.TestCase): def _start_arabic_to_roman(self):", "python3 import unittest from roman_number_generator import arabic_to_roman class Test(unittest.TestCase): def", "from roman_number_generator import arabic_to_roman class Test(unittest.TestCase): def _start_arabic_to_roman(self): self.assertRaises(ValueError, arabic_to_roman,", "arabic_to_roman, 4000) self.assertEqual(arabic_to_roman(4), \"IV\") self.assertEqual(arabic_to_roman(12), \"XII\") self.assertEqual(arabic_to_roman(20), \"XX\") if __name__", "import arabic_to_roman class Test(unittest.TestCase): def _start_arabic_to_roman(self): self.assertRaises(ValueError, arabic_to_roman, 4000) self.assertEqual(arabic_to_roman(4),", "<reponame>fossabot/IdeaBag2-Solutions<filename>Numbers/Roman Number Generator/tests.py #!/usr/bin/env python3 import unittest from roman_number_generator import", "arabic_to_roman class Test(unittest.TestCase): def _start_arabic_to_roman(self): self.assertRaises(ValueError, arabic_to_roman, 4000) self.assertEqual(arabic_to_roman(4), \"IV\")", "Test(unittest.TestCase): def _start_arabic_to_roman(self): self.assertRaises(ValueError, arabic_to_roman, 4000) self.assertEqual(arabic_to_roman(4), \"IV\") self.assertEqual(arabic_to_roman(12), \"XII\")" ]
[ "working_dir): self.working_dir = working_dir def runAfterDownload(self, file_name, full_path, observation): raise", "import os class ModuleBase: def __init__(self, working_dir): self.working_dir = working_dir", "infrastructure.satnogClient import SatnogClient import os class ModuleBase: def __init__(self, working_dir):", "SatnogClient import os class ModuleBase: def __init__(self, working_dir): self.working_dir =", "self.working_dir = working_dir def runAfterDownload(self, file_name, full_path, observation): raise NotImplementedError()", "from infrastructure.satnogClient import SatnogClient import os class ModuleBase: def __init__(self,", "def __init__(self, working_dir): self.working_dir = working_dir def runAfterDownload(self, file_name, full_path,", "__init__(self, working_dir): self.working_dir = working_dir def runAfterDownload(self, file_name, full_path, observation):", "class ModuleBase: def __init__(self, working_dir): self.working_dir = working_dir def runAfterDownload(self,", "import SatnogClient import os class ModuleBase: def __init__(self, working_dir): self.working_dir", "os class ModuleBase: def __init__(self, working_dir): self.working_dir = working_dir def", "ModuleBase: def __init__(self, working_dir): self.working_dir = working_dir def runAfterDownload(self, file_name," ]
[ "collector: c.send(pb.StartRound(round_pubkey = round_pubkey, blind_nonce_points = [b.get_R() for b in", "ft < time_best or size > size_best: time_best = ft", "for cj,commit in enumerate(commitments)] rng.shuffle(commitment_master_list) all_commitments = tuple(commit for commit,ci,cj", "broadcast the transaction! {nice_msg}\") except TimeoutException: self.print_error(\"timed out while trying", "due to tags being full self.tags = defaultdict(TagStatus) # how", "the commitment list and note where each client's commitments ended", "enumerate(proofs): dest_client_idx, dest_key_idx = possible_commitment_destinations[rand_position(seed, N, i)] src_commitment_idx = client_commit_indexes[myindex][i]", "blame message: {e} (you claimed: {blame.blame_reason!r})') continue if isinstance(ret, str):", "(dropped {prev_client_count - len(self.clients)})\") total_excess_fees = sum(f for _,_,f in", "src_client.kill(f'bad proof (for {src_commitment_idx}): {ret}') continue if src_client.dead: # If", "size = len(pool.pool) if size >= size_best: if time_best is", "for s in signatures if s is None]) ### self.print_error(f\"ending", "and we break the connection as a result. # Note", "in msg.tags: if len(tag.id) > 20: client.error(\"Tag id too long\")", "players at tier={self.tier}') covert_server = CovertServer(self.bindhost, upnp = self.upnp) try:", "network, tier, chosen_clients, self.bindhost, upnp = self.upnp, announcehost = self.announcehost)", "client): client_ip = client.connection.socket.getpeername()[0] msg = client.recv('clienthello') if msg.version !=", "number of pool clients to trigger setting fill_time self.fill_time =", "for i, (inp, sig) in enumerate(zip(tx.inputs(), signatures)): inp['signatures'][0] = sig.hex()", "(lokad) + 33 (session hash) ) if min_safe_clients * num_components", "len(self.pool) < self.fill_threshold: self.fill_time = None for t in client.tags:", "bad_inputs = set(i for i,sig in enumerate(signatures) if sig is", "+ '41' assert tx.is_complete() txid = tx.txid() self.print_error(\"completed the transaction!", "= 1000 # sats/kB max_excess_fee = 300000 # sats tiers", "signatures phase remtime = covert_T0 + Protocol.TS_EXPECTING_COVERT_SIGNATURES - time.monotonic() if", "fusion if the pool has stayed at or above min_clients", "all the other components were # just imposters who didn't", "= FusionController(self. network, tier, chosen_clients, self.bindhost, upnp = self.upnp, announcehost", "dead clients self.clients = [c for c in self.clients if", "IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS #", "dict() tfill_thresh = tnow - Params.start_time_max for t, pool in", "commitment_master_list if ci != myindex] N = len(possible_commitment_destinations) assert N", "Note that we could have aborted earlier but this #", "for client in moved: self.queue.remove(client) class FusionServer(GenericServer): \"\"\"Server for clients", "if all blames are checked, we # can start next", "ResultsCollector(live_clients, done_on_fail = False) def client_get_blames(client, myindex, proofs, collector): with", "# If all clients submitted largest possible component (uncompressed p2pkh", "trigger setting fill_time self.fill_time = None # when did pool", "the Software, # and to permit persons to whom the", "to # broadcast a malleated version by re-signing one of", "# Note that we could have aborted earlier but this", "or start_ev.is_set(): return tnow = time.monotonic() # scan through tiers", "self.clients = [c for c, _, _ in results] self.check_client_count()", "len(all_commitments) - Params.num_components # calculate the randomly chosen destinations, same", "sent at {time.time()}; accepting covert components\") # Await commitment messages", "+ Protocol.TS_EXPECTING_COVERT_COMPONENTS - time.monotonic() assert remtime > 0, \"timings set", "round right away. collector.add(None) for idx, (client, proofs) in enumerate(zip(self.clients,", "results = collector.gather(deadline = covert_T0 + Protocol.TS_EXPECTING_COMMITMENTS) # Filter clients", "detected\") # If exactly one of the inputs is signed,", "client.send_error(text) raise client.Disconnect class ClientThread(ClientHandlerThread): \"\"\"Basic thread per connected client.\"\"\"", "0 class WaitingPool: \"\"\" a waiting pool for a specific", "collector.add((client, relays)): client.error(\"late proofs\") for client in self.clients: client.addjob(client_get_proofs, collector)", "self.clients = [c for c in self.clients if not c.dead]", "msg.tiers} except KeyError: if self.stopping: return client.error(f\"Invalid tier selected: {t}\")", "removed; on stop we don't care.) with self.lock: for t,", "where each client's commitments ended up client_commit_indexes = [[None]*Params.num_components for", "- To signal the end of covert components phase, owner", "inputs (through matching the prevout and claimed pubkey). prevout_spenders =", "client.error('component submitted at wrong time') else: assert mtype == 'signature'", "= self.components del self.components return ret def start_signatures(self, sighashes, pubkeys):", "client.got_submit = False while True: msg, mtype = client.recv('component', 'signature',", "copy of this software and associated documentation files # (the", "portions of the Software. # # THE SOFTWARE IS PROVIDED", "ARISING FROM, OUT OF OR IN # CONNECTION WITH THE", "(for privacy)? min_safe_clients = 6 # Choose the minimum excess", "12) // (num_components * 173) # Every round, clients leave", "LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A", "except AttributeError: pass def new_client_job(self, client): client.got_submit = False while", "= check_covert_component(msg, round_pubkey, feerate) with self.lock: try: self.components[msg.component] = (sort_key,", "round_pubkey, feerate) with self.lock: try: self.components[msg.component] = (sort_key, contrib) except", "def __enter__(self, ): return self def __exit__(self, exc_type, exc_value, traceback):", "gather(self, *, deadline): remtime = deadline - time.monotonic() self.done_ev.wait(max(0., remtime))", "+ len(newhashes) seen_salthashes.update(newhashes) if len(seen_salthashes) != expected_len: c.error('duplicate component commitment')", "pool(s) with the most number of players, - Choose the", "6 # Choose the minimum excess fee based on dividing", "100 # For a given IP, how many players can", "Sleep until end of covert components phase remtime = covert_T0", "those clients from all pools for t, pool in self.waiting_pools.items():", "purpose. \"\"\" import secrets import sys import threading import time", "23 component_feerate = 1000 # sats/kB max_excess_fee = 300000 #", "self.pool.add(client) for t in client.tags: ts = self.tags[t] ts.pool +=", "sig except AttributeError: client.error('signature submitted at wrong time') client.send_ok() client.got_submit", "True, txsignatures = signatures)) return True self.sendall(pb.FusionResult(ok = False, bad_components", "independently of waiting server. # self.spawned_clients.difference_update(chosen_clients) # Kick off the", "calc_round_hash(self.last_hash, round_pubkey, round_time, all_commitments, all_components) #TODO : Check the inputs", "- make these configurable class Params: num_components = 23 component_feerate", "the other components were # just imposters who didn't have", "proofs according to destination. proofs_to_relay = [list() for _ in", "= announcehost self.donation_address = donation_address self.waiting_pools = {t: WaitingPool(Params.min_clients, Params.max_tier_client_tags)", "bindhost, upnp = None, announcehost = None): super().__init__(name=\"FusionController\") self.network =", "def __init__(self, num_results, done_on_fail = True): self.num_results = int(num_results) self.done_on_fail", "to choose their pool. msg = client.recv('joinpools', timeout=120) if len(msg.tiers)", "good connection to the EC server. Report this back to", "start_signatures(self, sighashes, pubkeys): num_inputs = len(sighashes) assert num_inputs == len(pubkeys)", "if client_ip.startswith('127.'): # localhost is whitelisted to allow unlimited access", "to players; this # will form the basis of our", "else: self._add_pool(client) moved.append(client) for client in moved: self.queue.remove(client) class FusionServer(GenericServer):", "= self.upnp) try: annhost = covert_server.host if self.announcehost is None", "when did pool exceed fill_threshold self.tag_max = tag_max # how", "the rest of this function might run for a while", "1.5, 2.2, 3.3, 4.7, 6.8] E12 = [1.0, 1.2, 1.5,", "are somewhat subjective. It would be # appropriate to add", "# attempt to move clients from queue into pool moved", "results] self.check_client_count() self.print_error(f\"got commitments from {len(self.clients)} clients (dropped {prev_client_count -", "!= existing_sig: if not schnorr.verify(pubkey, sig, sighash): raise ValidationError('bad transaction", "How many clients can share same tag on a given", "we could have aborted earlier but this # way third", "float('inf') while True: with self.lock: if self.stopping or start_ev.is_set(): return", "# missing. However, if the client declares the genesis_hash, we", "msg), timeout=5) def error(self, msg): self.send_error(msg) raise FusionError(f'Rejected client: {msg}')", "c in self.spawned_clients: c.got_submit = False def end_signatures(self): with self.lock:", "to add some 'ban score' to the player. # we", "max(time_best + Params.start_time_min, self.t_last_fuse + Params.start_time_spacing) self.tier_best = tier_best def", "if len(msg.tiers) == 0: client.error(\"No tiers\") if len(msg.tags) > 5:", "i, (commit, ci, cj) in enumerate(commitment_master_list): client_commit_indexes[ci][cj] = i collector", "\"E series\" values -- round numbers that are almost geometrically", "if ts is not None and ts.all_ >= self.tag_max: return", "len(self.fails) + len(self.results) >= self.num_results: self.done_ev.set() return True class FusionController(threading.Thread,", "inputs against blockchain. if len(msg.blames) > len(proofs): client.error('too many blames')", "many # checks against blockchain need to be done, perhaps", "reason was: \"+repr(blame.blame_reason)) client.kill(f'bad blame message: {e} (you claimed: {blame.blame_reason!r})')", "# allowed and we break the connection as a result.", "indicates misconfiguration since fusion server ought # to have a", "number of proofs\") if any(len(p) > 200 for p in", "ci, cj) for ci, (_, commitments, _) in enumerate(results) for", "time_best or size > size_best: time_best = ft tier_best =", "all_components, session_hash = session_hash)) # Sleep until end of covert", "= tier_best def start_fuse(self, tier): \"\"\" Immediately launch Fusion at", "other components were # just imposters who didn't have private", "secrets import sys import threading import time import traceback from", "associated documentation files # (the \"Software\"), to deal in the", "component submission? COVERT_CLIENT_TIMEOUT = 40 # used for non-cryptographic purposes", "raise else: self.print_error(\"broadcast was successful!\") # Give our transaction a", "if ts.pool >= t.maxsimul: can_pool = False if can_pool: self._add_pool(client)", "set() # clients who will be put into fusion round", "# and we are mainnet, etc. client.error(\"This server is on", "waiting pools for pool in mytierpools.values(): res = pool.check_add(client) if", "ValidationError('signature length is wrong') # It might be we already", "Choose the pool with the earliest fill time; - If", "can start at a special time remtime = min(remtime, self.tier_best_starttime", "remtime < 0: # really shouldn't happen, we had plenty", "time.monotonic() if remtime < 0: # really shouldn't happen, we", "in the smallest fusion # (these overhead numbers assume op_return", "remtime = pool.fill_time - tfill_thresh if t == self.tier_best: #", "#TODO : Check the inputs and outputs to see if", "phase remtime = covert_T0 + Protocol.TS_EXPECTING_COVERT_SIGNATURES - time.monotonic() if remtime", "collector.gather(deadline = time.monotonic() + Protocol.STANDARD_TIMEOUT) # Now, repackage the proofs", "src_client)) live_clients = len(results) collector = ResultsCollector(live_clients, done_on_fail = False)", "covert signature acceptance\") self.sendall(pb.ShareCovertComponents(components = all_components, skip_signatures = True)) else:", "self.t_last_fuse = time.monotonic() self.reset_timer() # Uncomment the following to: Remove", "donation_address = '' if isinstance(self.donation_address, Address): donation_address = self.donation_address.to_full_ui_string() client.send(pb.ServerHello(", "= msg.txsignature if len(sig) != 64: raise ValidationError('signature length is", "persons to whom the Software is furnished to do so,", "# Filter clients who didn't manage to give a good", "start a fusion. New clients get a ClientThread made for", "or exception while True: covert_server.reset() # Clean up dead clients", "nice_msg, = e.args server_msg = e.server_msg self.print_error(f\"could not broadcast the", "Exception as e: self.print_error(f\"player indicated bad input but checking failed", "AttributeError: client.error('component submitted at wrong time') sort_key, contrib = check_covert_component(msg,", "in enumerate(commitments)] rng.shuffle(commitment_master_list) all_commitments = tuple(commit for commit,ci,cj in commitment_master_list)", "pool is full). start_time_min = 400 # whether to print", "wrong') # It might be we already have this signature.", "60 else: # the smallest fusion will use 1-byte varint", "for a specific tier \"\"\" def __init__(self, fill_threshold, tag_max): self.pool", "oregano.util import PrintError, ServerError, TimeoutException from . import fusion_pb2 as", "8.2, 9.1] # TODO - make these configurable class Params:", "note this needs to consider the maximum interval between messages:", "is None or ft < time_best or size > size_best:", "continue status = pb.TierStatusUpdate.TierStatus(players = len(pool.pool), min_players = Params.min_clients) remtime", "= None tier_best = None size_best = 0 for t,", "self.clients if not c.dead] self.check_client_count() if self.run_round(covert_server): break self.print_error('Ended successfully!')", "returns a dict of {component: contrib}, where contrib is (+-", "new round, call .reset(); to kill all connections, call .stop().", "if ts.all_ == 0: # cleanup for no-longer-used tags del", "slow\") time.sleep(remtime) signatures = list(covert_server.end_signatures()) missing_sigs = len([s for s", "immediately since client may be trying to DoS us by", "self.round_pubkey del self.components del self.feerate except AttributeError: pass try: del", "3.3, 3.9, 4.7, 5.6, 6.8, 8.2] E24 = [1.0, 1.1,", "len(proofs): client.error('too many blames') if len(set(blame.which_proof for blame in msg.blames))", "annport = covert_server.port covert_server.noisy = Params.noisy covert_server.start() self.print_error(f'Covert server started", "in client.tags: ts = self.tags[t] if ts.pool >= t.maxsimul: break", "self._add_pool(client) else: self.queue.append(client) return can_pool def remove(self, client): # make", "input: ' + reason) continue except Exception as e: self.print_error(f\"player", "[(ci,cj) for commit, ci, cj in commitment_master_list if ci !=", "self.stopping: return donation_address = '' if isinstance(self.donation_address, Address): donation_address =", "(all commitments, but leaving out the originating client's commitments). myindex", "can start next round right away. collector.add(None) for idx, (client,", "t, pool in self.waiting_pools.items(): ft = pool.fill_time if ft is", "round_pubkey, feerate): self.components = dict() self.feerate = feerate self.round_pubkey =", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "on failure (on success, we are already removed; on stop", "min_clients for this long. start_time_max = 1200 # Inter-fusion delay", "except IndexError: client.kill(f'bad proof index {blame.which_proof} / {len(proofs)}') continue src_commit_blob,", "the randomly chosen destinations, same way as client did. relays", "covert components phase, call start_components. - To signal the end", "of covert components phase, owner calls end_components, which returns a", "standard tx size limitation? max_clients = (100000 - 12) //", "# checks against blockchain need to be done, perhaps even", "__slots__ = () def __new__(cls, ipstr, tagbytes, maxsimul): ipb =", "c.error(\"late commitment\") # record for later c.blind_sig_requests = msg.blind_sig_requests c.random_number_commitment", "set(i for i,sig in enumerate(signatures) if sig is None) #", "and note where each client's commitments ended up client_commit_indexes =", "we aren't collecting any results, rather just marking that #", "bind to an ephemeral port. - Before start of covert", "contrib = check_covert_component(msg, round_pubkey, feerate) with self.lock: try: self.components[msg.component] =", "per connected client.\"\"\" def recv(self, *expected_msg_names, timeout=Protocol.STANDARD_TIMEOUT): submsg, mtype =", "= annhost_b, covert_port = annport, covert_ssl = False, server_time =", "proof is malicious. Boot client # immediately since client may", "out of range\") ip = '' if tag.no_ip else client_ip", "# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "= 8 # If all clients submitted largest possible component", "or size > size_best: time_best = ft tier_best = t", "= pool.fill_time if ft is None: continue size = len(pool.pool)", "= msg.random_number if sha256(seed) != client.random_number_commitment: client.error(\"seed did not match", "class CovertServer(GenericServer): \"\"\" Server for covert submissions. How it works:", "the rights to use, copy, modify, merge, # publish, distribute,", "this is the favoured pool, can start at a special", "ret def reset(self): try: del self.round_pubkey del self.components del self.feerate", "# blockchain checks are somewhat subjective. It would be #", "client.error(\"No tiers\") if len(msg.tags) > 5: client.error(\"Too many tags\") #", "for either inputs or outputs lists overhead = 60 else:", "fusion. New clients get a ClientThread made for them, and", "for c in self.clients if not c.dead] self.check_client_count() if self.run_round(covert_server):", "time.monotonic() + Protocol.STANDARD_TIMEOUT) # Now, repackage the proofs according to", "covert_ssl = False, server_time = begin_time)) self.last_hash = calc_initial_hash(self.tier, annhost_b,", "resubmission after ack failed delivery, # but we don't allow", "False if len(all_components) != len(self.clients)*Params.num_components: skip_signatures = True self.print_error(\"problem detected:", "= validate_blame(blame, encproof, src_commit_blob, dest_commit_blob, all_components, bad_components, Params.component_feerate) except ValidationError", "FusionServer(GenericServer): \"\"\"Server for clients waiting to start a fusion. New", "in self.queue: for t in client.tags: ts = self.tags[t] if", "# appropriate to add some 'ban score' to the player.", "fusion = FusionController(self. network, tier, chosen_clients, self.bindhost, upnp = self.upnp,", "component_master_list] del component_master_list # Do some preliminary checks to see", "= dict() tfill_thresh = tnow - Params.start_time_max for t, pool", "client.addjob(clientjob_send, msg, timeout) def check_client_count(self,): live = [c for c", "range\") ip = '' if tag.no_ip else client_ip client.tags.append(ClientTag(ip, tag.id,", "# generate blind nonces (slow!) for c in self.clients: c.blinds", "of waiting server. # self.spawned_clients.difference_update(chosen_clients) # Kick off the fusion.", "the fusion. rng.shuffle(chosen_clients) fusion = FusionController(self. network, tier, chosen_clients, self.bindhost,", "we don't care.) with self.lock: for t, pool in mytierpools.items():", "ts.all_ += 1 if ts.pool >= t.maxsimul: can_pool = False", "client_commit_indexes[myindex][i] relays.append((proof, src_commitment_idx, dest_client_idx, dest_key_idx)) if not collector.add((client, relays)): client.error(\"late", "x:x[1]) client.send(pb.TheirProofsList(proofs = [ dict(encrypted_proof=x, src_commitment_idx=y, dst_key_idx=z) for x,y,z, _", "genesis hash declared by client, we'll let them slide...\") if", "(Address, type(None))) if not schnorr.has_fast_sign() or not schnorr.has_fast_verify(): raise RuntimeError(\"Fusion", "# signing phase and go directly to blame, or maybe", "but this # way third parties can't abuse us to", "pb.ClientMessage, *expected_msg_names, timeout=timeout) return submsg def send(self, submsg, timeout=Protocol.STANDARD_TIMEOUT): send_pb(self.connection,", "blame phase. bad components: {bad_components}\") if len(self.clients) < 2: #", "pool exceed fill_threshold self.tag_max = tag_max # how many clients", "- Before start of covert signatures phase, owner calls start_signatures.", "= None): assert network assert isinstance(donation_address, (Address, type(None))) if not", "stay open without activity. # note this needs to consider", "client): self.pool.add(client) for t in client.tags: ts = self.tags[t] ts.pool", "- Params.start_time_max for t, pool in mytierpools.items(): if client not", "\"Software\"), to deal in the Software without restriction, # including", "as schnorr from oregano.address import Address from oregano.util import PrintError,", "max_excess_fee = Params.max_excess_fee, tiers = Params.tiers, donation_address = donation_address ))", "= False) def client_get_blames(client, myindex, proofs, collector): with collector: #", "self.sendall(pb.ShareCovertComponents(components = all_components, skip_signatures = True)) else: self.print_error(\"starting covert signature", "abuse us to find out the # timing of a", "nothing after this point can report back to the #", "But don't start a fusion if it has only been", "msg.genesis_hash is optional and we tolerate it # missing. However,", "be put into fusion round if started at this tier", "10000000, 100000000] for s in E12] # How many clients", "the earliest fill time; - If no pools are filled", "not None: self.fails.append(exc_value) if self.done_on_fail: self.done_ev.set() elif len(self.fails) + len(getattr(self,", "src_commitment_idx, dest_client_idx, dest_key_idx)) if not collector.add((client, relays)): client.error(\"late proofs\") for", "with the timeout but that's OK. self.sendall(pb.AllCommitments(initial_commitments = all_commitments), timeout=Protocol.TS_EXPECTING_COVERT_SIGNATURES)", "= time.monotonic() def add(self, client): can_pool = True for t", "tag (in pool and queue) def check_add(self, client): for t", "moved: self.queue.remove(client) class FusionServer(GenericServer): \"\"\"Server for clients waiting to start", "msg.version != Protocol.VERSION: client.error(\"Mismatched protocol version, please upgrade\") if msg.genesis_hash:", "in Params.tiers} self.t_last_fuse = time.monotonic() # when the last fuse", "import secrets import sys import threading import time import traceback", "= sighashes self.pubkeys = pubkeys for c in self.spawned_clients: c.got_submit", "(unless pool is full). start_time_min = 400 # whether to", "we tolerate it # missing. However, if the client declares", "# record for later c.blind_sig_requests = msg.blind_sig_requests c.random_number_commitment = msg.random_number_commitment", "list, but remember exactly where each commitment originated. commitment_master_list =", "lock: expected_len = len(seen_salthashes) + len(newhashes) seen_salthashes.update(newhashes) if len(seen_salthashes) !=", "call try_move_from_queue() after calling this try: self.pool.remove(client) except KeyError: in_pool", "start_time_max = 1200 # Inter-fusion delay -- after starting any", "at this tier self.queue = list() # clients who are", "signature acceptance\") tx, input_indices = tx_from_components(all_components, session_hash) sighashes = [sha256(sha256(bytes.fromhex(tx.serialize_preimage(i,", "made for them, and they are put into the waiting", "ClientThreads are passed over to a FusionController to run the", "shouldn't happen, we had plenty of time raise FusionError(\"way too", "successfully!') except FusionError as e: self.print_error(f\"Ended with error: {e}\") except", "client.recv('joinpools', timeout=120) if len(msg.tiers) == 0: client.error(\"No tiers\") if len(msg.tags)", "ts.pool += 1 if len(self.pool) == self.fill_threshold: self.fill_time = time.monotonic()", ">= Params.max_clients: # pool filled up to the maximum size,", "clients who are waiting due to tags being full self.tags", "fuse. (since fill time is a float, this will almost", "= ResultsCollector(len(self.clients), done_on_fail = False) def client_get_proofs(client, collector): with collector:", "- how long from one round's component submission to the", "tags\") # Event for signalling us that a pool started.", "clients self.clients = [c for c in self.clients if not", "x:x[1][0]) all_components = [comp for comp, (sort_key, contrib) in component_master_list]", "total_excess_fees != sum(component_contribs): skip_signatures = True self.print_error(\"problem detected: excess fee", "just imposters who didn't have private key. If more than", "If the blamed client is already dead, don't waste more", "raise client.Disconnect class ClientThread(ClientHandlerThread): \"\"\"Basic thread per connected client.\"\"\" def", "8 # If all clients submitted largest possible component (uncompressed", "clients waiting to start a fusion. New clients get a", "c.dead] self.check_client_count() if self.run_round(covert_server): break self.print_error('Ended successfully!') except FusionError as", "# Sleep until end of covert components phase remtime =", "round(remtime) statuses[t] = status client.send(pb.TierStatusUpdate(statuses = statuses)) start_ev.wait(2) except: #", "try: ret = validate_blame(blame, encproof, src_commit_blob, dest_commit_blob, all_components, bad_components, Params.component_feerate)", "to the # verifier, there is no privacy leak by", "mytierpools.values(): res = pool.check_add(client) if res is not None: client.error(res)", "client.recv('myproofslist') seed = msg.random_number if sha256(seed) != client.random_number_commitment: client.error(\"seed did", "script size of 1 + 5 (lokad) + 33 (session", "require 3-byte varint for either inputs or outputs lists overhead", "annport, covert_ssl = False, server_time = begin_time)) self.last_hash = calc_initial_hash(self.tier,", "commit_messages) with lock: expected_len = len(seen_salthashes) + len(newhashes) seen_salthashes.update(newhashes) if", "the genesis_hash, we # do indeed disallow them connecting if", "pool, we don't have bias towards any particular tier with", "no pools are filled then there is no favoured fuse.", "missing_sigs = len([s for s in signatures if s is", "the originator, however # blockchain checks are somewhat subjective. It", "if isinstance(self.donation_address, Address): donation_address = self.donation_address.to_full_ui_string() client.send(pb.ServerHello( num_components = Params.num_components,", "timeout=timeout) def clientjob_goodbye(client, text): # a gentler goodbye than killing", "ci != myindex] N = len(possible_commitment_destinations) assert N == len(all_commitments)", "servers\") else: client.print_error(\"👀 No genesis hash declared by client, we'll", "have this signature. This is fine # since it might", "in client.tags: ts = self.tags.get(t) if ts is not None", "client.error(\"too-long proof\") # they should only be 129 bytes long.", "libsecp256k1\") super().__init__(bindhost, port, ClientThread, upnp = upnp) self.config = config", "send_pb(self.connection, pb.ServerMessage, submsg, timeout=timeout) def send_error(self, msg): self.send(pb.Error(message = msg),", "client from waiting pools on failure (on success, we are", "this IP cannot be present in too many fuses. client.tags", "waiting due to tags being full self.tags = defaultdict(TagStatus) #", "bad_components = sorted(bad_components))) ### self.print_error(f\"entering blame phase. bad components: {bad_components}\")", "how long from first connection to last possible Tor component", "startup time. self.reset_timer() def run(self): try: super().run() finally: self.waiting_pools.clear() #", "for m in commit_messages) with lock: expected_len = len(seen_salthashes) +", "random rng = random.Random() rng.seed(secrets.token_bytes(32)) def clientjob_send(client, msg, timeout =", "{time.time()}; accepting covert components\") # Await commitment messages then process", "covert signatures phase, owner calls start_signatures. - To signal the", "did not match commitment\") proofs = msg.encrypted_proofs if len(proofs) !=", "ts.all_ == 0: # cleanup for no-longer-used tags del self.tags[t]", "best pool, so it might not be best anymore. self.reset_timer()", "idx, (client, proofs) in enumerate(zip(self.clients, proofs_to_relay)): client.addjob(client_get_blames, idx, proofs, collector)", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF #", "def send_error(self, msg): self.send(pb.Error(message = msg), timeout=Protocol.STANDARD_TIMEOUT) def error(self, msg):", "changed the favoured tier self.reset_timer() inftime = float('inf') while True:", "network self.tier = tier self.clients = list(clients) self.bindhost = bindhost", "privacy with what we have. bad_components = set() ### if", "> len(proofs): client.error('too many blames') if len(set(blame.which_proof for blame in", "if many # checks against blockchain need to be done,", "pools for t, pool in self.waiting_pools.items(): for c in chosen_clients:", "send(self, submsg, timeout=None): send_pb(self.connection, pb.CovertResponse, submsg, timeout=timeout) def send_ok(self,): self.send(pb.OK(),", "for _co in range(Params.num_components)] lock = threading.Lock() seen_salthashes = set()", "timeout for clients to choose their pool. msg = client.recv('joinpools',", "tier, clients, bindhost, upnp = None, announcehost = None): super().__init__(name=\"FusionController\")", "destinations list (all commitments, but leaving out the originating client's", "oregano.address import Address from oregano.util import PrintError, ServerError, TimeoutException from", "client.recv('component', 'signature', 'ping', timeout = COVERT_CLIENT_TIMEOUT) if mtype == 'ping':", "{annhost_b}:{annport})') begin_time = round(time.time()) self.sendall(pb.FusionBegin(tier = self.tier, covert_domain = annhost_b,", "pool.fill_time - tfill_thresh if t == self.tier_best: # this is", "self.print_error(f\"could not broadcast the transaction! {nice_msg}\") except TimeoutException: self.print_error(\"timed out", "start_components(self, round_pubkey, feerate): self.components = dict() self.feerate = feerate self.round_pubkey", "bad_inputs.update(spenders) if bad_inputs: bad_components.update(input_indices[i] for i in bad_inputs) else: for", "tag_max # how many clients can share same tag (in", "failure (on success, we are already removed; on stop we", "pool filled up to the maximum size, so start immediately", "the most number of players, - Choose the pool with", "= msg), timeout=5) def error(self, msg): self.send_error(msg) raise FusionError(f'Rejected client:", "'signature', 'ping', timeout = COVERT_CLIENT_TIMEOUT) if mtype == 'ping': continue", "= session_hash)) # Sleep until end of covert signatures phase", "port, CovertClientThread, upnp = upnp) self.round_pubkey = None def start_components(self,", "allow one submission per connection # per phase. client.error('multiple submission", "duplicated inputs (through matching the prevout and claimed pubkey). prevout_spenders", "originator, however # blockchain checks are somewhat subjective. It would", "threading.Event() client.start_ev = start_ev if client_ip.startswith('127.'): # localhost is whitelisted", "in the Software without restriction, # including without limitation the", "c.recv('playercommit') commit_messages = check_playercommit(msg, Params.min_excess_fee, Params.max_excess_fee, Params.num_components) newhashes = set(m.salted_component_hash", "for i,sig in enumerate(signatures) if sig is None) # further,", "most number of players, - Choose the pool with the", "= client.connection.socket.getpeername()[0] msg = client.recv('clienthello') if msg.version != Protocol.VERSION: client.error(\"Mismatched", "reset(self): try: del self.round_pubkey del self.components del self.feerate except AttributeError:", "deadline - time.monotonic() self.done_ev.wait(max(0., remtime)) with self.lock: ret = self.results", "version by re-signing one of their inputs. time.sleep(2) self.sendall(pb.FusionResult(ok =", "commitment idx removes ordering correlations about which client sent which", "earliest fill time; - If no pools are filled then", "# the smallest fusion will use 1-byte varint for both", "failed with exception {repr(e)} ({outpoint})\") else: self.print_error(f\"player indicated bad input", "lists overhead = 62 elif min_safe_clients * num_components >= 0xfc:", "# We allow a long timeout for clients to choose", "CovertServer(GenericServer): \"\"\" Server for covert submissions. How it works: -", "permit persons to whom the Software is furnished to do", "anymore. self.reset_timer() raise class ResultsCollector: # Collect submissions from different", ".validation import (check_playercommit, check_covert_component, validate_blame, ValidationError, check_input_electrumx) # Resistor \"E", "start to accept covert components covert_server.start_components(round_pubkey, Params.component_feerate) # generate blind", "bytes long. # generate the possible destinations list (all commitments,", "following to: Remove from spawned clients list, so that the", "[ dict(encrypted_proof=x, src_commitment_idx=y, dst_key_idx=z) for x,y,z, _ in proofs])) msg", "fusion # (these overhead numbers assume op_return script size of", "/ {len(proofs)}') continue src_commit_blob, src_commit_client_idx, _ = commitment_master_list[src_commitment_idx] dest_commit_blob =", "self.sighashes del self.pubkeys except AttributeError: pass def new_client_job(self, client): client.got_submit", "c.blinds = [schnorr.BlindSigner() for _co in range(Params.num_components)] lock = threading.Lock()", "-= 1 if ts.all_ == 0: # cleanup for no-longer-used", "to whom the Software is furnished to do so, #", "can_pool = True for t in client.tags: ts = self.tags[t]", "making us check many inputs against blockchain. if len(msg.blames) >", "mtype == 'ping': continue if client.got_submit: # We got a", "round_time = round(time.time()) collector = ResultsCollector(len(self.clients), done_on_fail = False) def", "bad input but checking failed with exception {repr(e)} ({outpoint})\") else:", "num_components = Params.num_components, component_feerate = Params.component_feerate, min_excess_fee = Params.min_excess_fee, max_excess_fee", "in chosen_clients: c.start_ev.set() # Remove those clients from all pools", "sats/kB max_excess_fee = 300000 # sats tiers = [round(b*s) for", "True self.sendall(pb.FusionResult(ok = False, bad_components = sorted(bad_components))) ### self.print_error(f\"entering blame", "client to waiting pools for pool in mytierpools.values(): res =", "didn't manage to give a good commitment. prev_client_count = len(self.clients)", "fusion? min_clients = 8 # If all clients submitted largest", "let them slide...\") if self.stopping: return donation_address = '' if", "# SOFTWARE. \"\"\" A basic server implementation for CashFusion. Does", "we're a bit generous with the timeout but that's OK.", "- Before start of covert components phase, call start_components. -", "# generate the possible destinations list (all commitments, but leaving", "uploading commitments, as clients are doing this. remtime = covert_T0", "= round(time.time()) self.sendall(pb.FusionBegin(tier = self.tier, covert_domain = annhost_b, covert_port =", "who are waiting due to tags being full self.tags =", "components phase remtime = covert_T0 + Protocol.TS_EXPECTING_COVERT_COMPONENTS - time.monotonic() assert", "to join, reject) max_tier_client_tags = 100 # For a given", "ordering correlations about which client sent which proof proofs.sort(key =", "signatures). - To reset the server for a new round,", "gen_keypair() round_pubkey = covert_Cpub # start to accept covert components", "new phase started. As # an anti-spam measure we only", "on stop we don't care.) with self.lock: for t, pool", "in mytiers: pool = mytierpools[t] pool.add(client) if len(pool.pool) >= Params.max_clients:", "from {len(self.clients)} clients (dropped {prev_client_count - len(self.clients)})\") total_excess_fees = sum(f", "covert signature acceptance. {missing_sigs} missing :{'(' if missing_sigs else ')'}\")", "including without limitation the rights to use, copy, modify, merge,", "fill_threshold self.tag_max = tag_max # how many clients can share", "may run an SSL server proxy such as nginx for", "network self.announcehost = announcehost self.donation_address = donation_address self.waiting_pools = {t:", "in c.blinds], server_time = round_time )) msg = c.recv('playercommit') commit_messages", "if mtype == 'ping': continue if client.got_submit: # We got", "= t size_best = size if time_best is None: self.tier_best_starttime", "= 62 elif min_safe_clients * num_components >= 0xfc: # the", "collector): with collector: msg = client.recv('myproofslist') seed = msg.random_number if", "self.reset_timer() # Uncomment the following to: Remove from spawned clients", "clients are doing this. remtime = covert_T0 + Protocol.T_START_COMPS -", "raise FusionError(f'Rejected client: {msg}') class ClientTag(bytes): \"\"\" enhanced bytes object", "in range(len(tx.inputs()))] pubkeys = [bytes.fromhex(inp['pubkeys'][0]) for inp in tx.inputs()] covert_server.start_signatures(sighashes,pubkeys)", "for _ in self.clients] for src_client, relays in results: for", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "We got a second submission before a new phase started.", "# clients who are waiting due to tags being full", "have aborted earlier but this # way third parties can't", "share same tag (in pool and queue) def check_add(self, client):", "has exited. For this reason we try to # not", "# we aren't collecting any results, rather just marking that", "# Permission is hereby granted, free of charge, to any", "fusion with {len(self.clients)} players at tier={self.tier}') covert_server = CovertServer(self.bindhost, upnp", "self.tier_best = tier_best def start_fuse(self, tier): \"\"\" Immediately launch Fusion", "enumerate(tx.inputs()): prevout_spenders[f\"{inp['prevout_hash']}:{inp['prevout_n']} {inp['pubkeys'][0]}\"].append(i) for prevout, spenders in prevout_spenders.items(): if len(spenders)", "check many inputs against blockchain. if len(msg.blames) > len(proofs): client.error('too", "server side. \"\"\" def __init__(self, network, tier, clients, bindhost, upnp", "# repeatedly run rounds until successful or exception while True:", "= True self.print_error(\"problem detected: too few components submitted\") if total_excess_fees", "if client not in pool.pool: continue status = pb.TierStatusUpdate.TierStatus(players =", "if len(self.fails) + len(self.results) >= self.num_results: self.done_ev.set() return True class", "that we sent 'startround' message to players; this # will", "in component_master_list] del component_master_list # Do some preliminary checks to", "continue if src_client.dead: # If the blamed client is already", "= False def end_components(self): with self.lock: ret = self.components del", "import PrintError, ServerError, TimeoutException from . import fusion_pb2 as pb", "None: self.tier_best_starttime = None else: self.tier_best_starttime = max(time_best + Params.start_time_min,", "covert_T0 = time.monotonic() self.print_error(f\"startround sent at {time.time()}; accepting covert components\")", "bad components: {bad_components}\") if len(self.clients) < 2: # Sanity check", "self.reset_timer() def run(self): try: super().run() finally: self.waiting_pools.clear() # gc clean", "source commitment idx removes ordering correlations about which client sent", "< time_best or size > size_best: time_best = ft tier_best", "None, announcehost = None): super().__init__(name=\"FusionController\") self.network = network self.tier =", "# How many clients can share same tag on a", "covert_domain = annhost_b, covert_port = annport, covert_ssl = False, server_time", ":{'(' if missing_sigs else ')'}\") # mark all missing-signature components", "'checking finished' so that if all blames are checked, we", "a lot of logs noisy = False # How long", "Before start of covert components phase, call start_components. - To", "This is fine # since it might be a resubmission", "tag: this IP cannot be present in too many fuses.", "different sources, with a deadline. def __init__(self, num_results, done_on_fail =", "if len(self.clients) < 2: # Sanity check for testing --", "= covert_T0 + Protocol.TS_EXPECTING_COVERT_COMPONENTS - time.monotonic() assert remtime > 0,", "return self def __exit__(self, exc_type, exc_value, traceback): if exc_type is", "send_ok(self,): self.send(pb.OK(), timeout=5) def send_error(self, msg): self.send(pb.Error(message = msg), timeout=5)", "c.blind_sig_requests)] c.addjob(clientjob_send, pb.BlindSigResponses(scalars = scalars)) del c.blinds, c.blind_sig_requests del results,", "(announcing as: {annhost_b}:{annport})') begin_time = round(time.time()) self.sendall(pb.FusionBegin(tier = self.tier, covert_domain", "bad_components = set() ### if skip_signatures: self.print_error(\"skipping covert signature acceptance\")", "= client.recv('myproofslist') seed = msg.random_number if sha256(seed) != client.random_number_commitment: client.error(\"seed", "pool. msg = client.recv('joinpools', timeout=120) if len(msg.tiers) == 0: client.error(\"No", "# further, search for duplicated inputs (through matching the prevout", "unlimited access client.tags = [] else: # Default tag: this", "< Params.min_safe_clients: for c in live: c.kill(\"too few remaining live", "than one # signed, then it's malicious behaviour! if sum((signatures[i]", "ended up client_commit_indexes = [[None]*Params.num_components for _ in self.clients] for", "+ Params.start_time_spacing) self.tier_best = tier_best def start_fuse(self, tier): \"\"\" Immediately", "# Upload the full commitment list; we're a bit generous", "Params.component_feerate, min_excess_fee = Params.min_excess_fee, max_excess_fee = Params.max_excess_fee, tiers = Params.tiers,", "covert_server.start() self.print_error(f'Covert server started @ {covert_server.host}:{covert_server.port} (announcing as: {annhost_b}:{annport})') begin_time", "self.reset_timer() inftime = float('inf') while True: with self.lock: if self.stopping", "except AttributeError: client.error('signature submitted at wrong time') client.send_ok() client.got_submit =", "dest_client_idx, dest_key_idx)) if not collector.add((client, relays)): client.error(\"late proofs\") for client", "self.print_error(f\"ending covert component acceptance. {len(component_master_list)} received.\") # Sort the components", "are waiting due to tags being full self.tags = defaultdict(TagStatus)", "= True): self.num_results = int(num_results) self.done_on_fail = bool(done_on_fail) self.done_ev =", "def send(self, submsg, timeout=None): send_pb(self.connection, pb.CovertResponse, submsg, timeout=timeout) def send_ok(self,):", "submsg, timeout=timeout) def send_error(self, msg): self.send(pb.Error(message = msg), timeout=Protocol.STANDARD_TIMEOUT) def", "proof\") # they should only be 129 bytes long. #", "bad input but it was fine ({outpoint})\") # At this", "[None]*num_inputs self.sighashes = sighashes self.pubkeys = pubkeys for c in", "self.print_error(f\"startround sent at {time.time()}; accepting covert components\") # Await commitment", "+ Protocol.STANDARD_TIMEOUT) # Now, repackage the proofs according to destination.", "for comp, (sort_key, contrib) in component_master_list] del component_master_list # Do", "self.<variables> that may change. for blame in msg.blames: try: encproof,", "tx.inputs()] covert_server.start_signatures(sighashes,pubkeys) self.sendall(pb.ShareCovertComponents(components = all_components, session_hash = session_hash)) # Sleep", "= [c for c in self.clients if not c.dead] self.check_client_count()", "= msg.encrypted_proofs if len(proofs) != Params.num_components: client.error(\"wrong number of proofs\")", "in self.clients: c.addjob(clientjob_goodbye, 'internal server error') finally: covert_server.stop() for c", "such as nginx for that purpose. \"\"\" import secrets import", "please switch servers\") else: client.print_error(\"👀 No genesis hash declared by", "a waiting pool for a specific tier \"\"\" def __init__(self,", "try: del self.round_pubkey del self.components del self.feerate except AttributeError: pass", "did pool exceed fill_threshold self.tag_max = tag_max # how many", "commit, ci, cj in commitment_master_list if ci != myindex] N", "self.sendall(pb.FusionResult(ok = True, txsignatures = signatures)) return True self.sendall(pb.FusionResult(ok =", "or ft < time_best or size > size_best: time_best =", "the client declares the genesis_hash, we # do indeed disallow", "not reference self.<variables> that may change. for blame in msg.blames:", "of signatures (which will have None at positions of missing", "self.lock: try: self.components[msg.component] = (sort_key, contrib) except AttributeError: client.error('component submitted", "# We got a second submission before a new phase", "doesn't even make sense with one player. for c in", "Params.tiers} self.t_last_fuse = time.monotonic() # when the last fuse happened;", "want before starting a fusion? min_clients = 8 # If", "self.tags = defaultdict(TagStatus) # how are the various tags self.fill_threshold", "covert_server): covert_priv, covert_Upub, covert_Cpub = gen_keypair() round_pubkey = covert_Cpub #", "but we don't allow it to consume our CPU power.", "clients who didn't manage to give a good commitment. prev_client_count", "{src_commitment_idx}): {ret}') continue if src_client.dead: # If the blamed client", "= 400 # whether to print a lot of logs", "submsg, mtype = recv_pb(self.connection, pb.ClientMessage, *expected_msg_names, timeout=timeout) return submsg def", "finally: covert_server.stop() for c in self.clients: c.addjob(clientjob_goodbye, None) self.clients =", "in self.clients: scalars = [b.sign(covert_priv, e) for b,e in zip(c.blinds,", "for tag in msg.tags: if len(tag.id) > 20: client.error(\"Tag id", "threading.Lock() seen_salthashes = set() # Send start message to players;", "round_time, all_commitments, all_components) #TODO : Check the inputs and outputs", "remaining live players\") raise FusionError(\"too few remaining live players\") def", "for duplicated inputs (through matching the prevout and claimed pubkey).", "as an absolute minimum (for privacy)? min_safe_clients = 6 #", "the end of covert signatures phase, owner calls end_signatures, which", "if it has only been above min_clients for a short", "more than one pool, we don't have bias towards any", "= calc_initial_hash(self.tier, annhost_b, annport, False, begin_time) time.sleep(Protocol.WARMUP_TIME) # repeatedly run", ">= self.tag_max: return \"too many clients with same tag\" def", "= Protocol.STANDARD_TIMEOUT): for client in self.clients: client.addjob(clientjob_send, msg, timeout) def", "for non-cryptographic purposes import random rng = random.Random() rng.seed(secrets.token_bytes(32)) def", "s in E12] # How many clients do we want", "end of covert signatures phase, owner calls end_signatures, which returns", "list, then separate it out. component_master_list.sort(key=lambda x:x[1][0]) all_components = [comp", "# Sanity check for testing -- the proof sharing thing", "= CovertServer(self.bindhost, upnp = self.upnp) try: annhost = covert_server.host if", "> 20: client.error(\"Tag id too long\") if not (0 <", "this round_time = round(time.time()) collector = ResultsCollector(len(self.clients), done_on_fail = False)", "bytes object to represent a pool tag \"\"\" __slots__ =", "pool.remove(client): pool.try_move_from_queue() if self.tier_best in mytierpools: # we left from", "def __init__(self, bindhost, port=0, upnp = None): super().__init__(bindhost, port, CovertClientThread,", "min_safe_clients * num_components >= 0xfc: # the smallest fusion could", "src_client.dead: # If the blamed client is already dead, don't", "if remtime > 0: time.sleep(remtime) # Upload the full commitment", "tx, input_indices = tx_from_components(all_components, session_hash) sighashes = [sha256(sha256(bytes.fromhex(tx.serialize_preimage(i, 0x41, use_cache", "> 200 for p in proofs): client.error(\"too-long proof\") # they", "ValidationError as e: self.print_error(\"got bad blame; clamed reason was: \"+repr(blame.blame_reason))", "client.send(pb.TheirProofsList(proofs = [ dict(encrypted_proof=x, src_commitment_idx=y, dst_key_idx=z) for x,y,z, _ in", "assert network assert isinstance(donation_address, (Address, type(None))) if not schnorr.has_fast_sign() or", "Tor component submission? # - how long from one round's", "# signatures. This makes it slightly harder for one of", "submitted largest possible component (uncompressed p2pkh input), how many could", "client.error(\"late proofs\") for client in self.clients: client.addjob(client_get_proofs, collector) results =", "self.print_error(f\"Ended with error: {e}\") except Exception as e: self.print_error('Failed with", "self.send_error(msg) raise FusionError(f'Rejected client: {msg}') class CovertServer(GenericServer): \"\"\" Server for", "round_time )) msg = c.recv('playercommit') commit_messages = check_playercommit(msg, Params.min_excess_fee, Params.max_excess_fee,", "is full). start_time_spacing = 120 # But don't start a", "add(self, client): can_pool = True for t in client.tags: ts", "to start a fusion. New clients get a ClientThread made", "inftime = float('inf') while True: with self.lock: if self.stopping or", "in_pool = False try: self.queue.remove(client) except ValueError: return False else:", "WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT", "reference self.<variables> that may change. for blame in msg.blames: try:", "a good commitment. prev_client_count = len(self.clients) self.clients = [c for", "min_safe_clients - 1) // min_safe_clients # How many clients can", "schnorr.verify(pubkey, sig, sighash): raise ValidationError('bad transaction signature') if existing_sig: #", "missing-signature components as bad. bad_inputs = set(i for i,sig in", "phase') if mtype == 'component': try: round_pubkey = self.round_pubkey feerate", "smallest fusion # (these overhead numbers assume op_return script size", "= e.server_msg self.print_error(f\"could not broadcast the transaction! {nice_msg}\") except TimeoutException:", "pool for a specific tier \"\"\" def __init__(self, fill_threshold, tag_max):", "clients to choose their pool. msg = client.recv('joinpools', timeout=120) if", "= False while True: msg, mtype = client.recv('component', 'signature', 'ping',", "= bytes([maxsimul, len(ipb)]) + ipb + tagbytes return super().__new__(cls, b)", "- 1) // min_safe_clients # How many clients can share", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, #", "for i in spenders) != 1: bad_inputs.update(spenders) if bad_inputs: bad_components.update(input_indices[i]", "= min(remtime, self.tier_best_starttime - tnow) if remtime <= 0: self.start_fuse(t)", "= [1.0, 1.5, 2.2, 3.3, 4.7, 6.8] E12 = [1.0,", "max_excess_fee = 300000 # sats tiers = [round(b*s) for b", "of logs noisy = False # How long covert connections", "Choose the minimum excess fee based on dividing the overhead", "clients with same tag\" def _add_pool(self, client): self.pool.add(client) for t", "all_commitments, all_components) #TODO : Check the inputs and outputs to", "commitment list; we're a bit generous with the timeout but", "e.args server_msg = e.server_msg self.print_error(f\"could not broadcast the transaction! {nice_msg}\")", "at wrong time') sort_key, contrib = check_covert_component(msg, round_pubkey, feerate) with", "= [sha256(sha256(bytes.fromhex(tx.serialize_preimage(i, 0x41, use_cache = True)))) for i in range(len(tx.inputs()))]", "so start immediately self.start_fuse(t) return # we have added to", "round if started at this tier self.queue = list() #", "small head start in relaying, before sharing the # signatures.", "in client.tags: ts = self.tags[t] ts.pool += 1 if len(self.pool)", "the inputs is signed, we don't punish him # because", "= client_commit_indexes[myindex][i] relays.append((proof, src_commitment_idx, dest_client_idx, dest_key_idx)) if not collector.add((client, relays)):", "# not reference self.<variables> that may change. for blame in", "restriction, # including without limitation the rights to use, copy,", "import traceback from collections import defaultdict import oregano.schnorr as schnorr", "client.tags: ts = self.tags[t] ts.all_ -= 1 if in_pool: ts.pool", "randomly chosen destinations, same way as client did. relays =", "which client sent which proof proofs.sort(key = lambda x:x[1]) client.send(pb.TheirProofsList(proofs", "if len(pool.pool) >= Params.max_clients: # pool filled up to the", "give a good commitment. prev_client_count = len(self.clients) self.clients = [c", "fusion will use 1-byte varint for both inputs and outputs", "clients get a ClientThread made for them, and they are", "ClientHandlerThread, GenericServer, get_current_genesis_hash from .protocol import Protocol from .util import", "NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR", "transaction! \" + txid) try: self.network.broadcast_transaction2(tx, timeout=3) except ServerError as", "note where each client's commitments ended up client_commit_indexes = [[None]*Params.num_components", "wait this long before starting the next one (unless hit", "assert N == len(all_commitments) - Params.num_components # calculate the randomly", "to stay open without activity. # note this needs to", "results results = collector.gather(deadline = covert_T0 + Protocol.TS_EXPECTING_COMMITMENTS) # Filter", "to last possible Tor component submission? # - how long", "self.clients: c.addjob(clientjob_goodbye, 'internal server error') finally: covert_server.stop() for c in", "= None): super().__init__(bindhost, port, CovertClientThread, upnp = upnp) self.round_pubkey =", "[] def __enter__(self, ): return self def __exit__(self, exc_type, exc_value,", "= False try: self.queue.remove(client) except ValueError: return False else: in_pool", "already dead, don't waste more time. # Since nothing after", "= [] for client in self.queue: for t in client.tags:", "for src_client, relays in results: for proof, src_commitment_idx, dest_client_idx, dest_key_idx", "return True def try_move_from_queue(self): # attempt to move clients from", "== len(pubkeys) self.signatures = [None]*num_inputs self.sighashes = sighashes self.pubkeys =", "message: {e} (you claimed: {blame.blame_reason!r})') continue if isinstance(ret, str): self.print_error(f\"verified", "submitted at wrong time') else: assert mtype == 'signature' try:", "both inputs and outputs lists overhead = 62 elif min_safe_clients", "msg), timeout=Protocol.STANDARD_TIMEOUT) def error(self, msg): self.send_error(msg) raise FusionError(f'Rejected client: {msg}')", "tier self.reset_timer() inftime = float('inf') while True: with self.lock: if", "transaction! {nice_msg}\") except TimeoutException: self.print_error(\"timed out while trying to broadcast", "submission? # - how long from one round's component submission", "= list(clients) self.bindhost = bindhost self.upnp = upnp self.announcehost =", "0: # cleanup for no-longer-used tags del self.tags[t] return True", "a result. # Note that we could have aborted earlier", "after ack failed delivery, # but we don't allow it", "components: {bad_components}\") if len(self.clients) < 2: # Sanity check for", "waiting pools. Once a Fusion thread is started, the ClientThreads", "= start_ev if client_ip.startswith('127.'): # localhost is whitelisted to allow", "already have this signature. This is fine # since it", "Protocol.BLAME_VERIFY_TIME) # More than one blame per proof is malicious.", "proof in enumerate(proofs): dest_client_idx, dest_key_idx = possible_commitment_destinations[rand_position(seed, N, i)] src_commitment_idx", "self.daemon = True def sendall(self, msg, timeout = Protocol.STANDARD_TIMEOUT): for", "if not collector.add((c, msg.initial_commitments, msg.excess_fee)): c.error(\"late commitment\") # record for", "= [ dict(encrypted_proof=x, src_commitment_idx=y, dst_key_idx=z) for x,y,z, _ in proofs]))", "if not schnorr.has_fast_sign() or not schnorr.has_fast_verify(): raise RuntimeError(\"Fusion requires libsecp256k1\")", "= bindhost self.upnp = upnp self.announcehost = announcehost self.daemon =", "= None, announcehost = None, donation_address = None): assert network", "= False, server_time = begin_time)) self.last_hash = calc_initial_hash(self.tier, annhost_b, annport,", "self.clients = list(clients) self.bindhost = bindhost self.upnp = upnp self.announcehost", "self.t_last_fuse = time.monotonic() # when the last fuse happened; as", "= Params.noisy covert_server.start() self.print_error(f'Covert server started @ {covert_server.host}:{covert_server.port} (announcing as:", "repackage the proofs according to destination. proofs_to_relay = [list() for", "support, however a server admin may run an SSL server", "Boot client # immediately since client may be trying to", "self.done_on_fail: self.done_ev.set() elif len(self.fails) + len(getattr(self, 'results', ())) >= self.num_results:", "finally: self.waiting_pools.clear() # gc clean def reset_timer(self, ): \"\"\" Scan", "assert mtype == 'signature' try: sighash = self.sighashes[msg.which_input] pubkey =", "remtime > 0: time.sleep(remtime) # Upload the full commitment list;", "sharing the # signatures. This makes it slightly harder for", "else: assert mtype == 'signature' try: sighash = self.sighashes[msg.which_input] pubkey", "at wrong time') except IndexError: raise ValidationError('which_input too high') sig", "0xfc: # the smallest fusion could require 3-byte varint for", "[schnorr.BlindSigner() for _co in range(Params.num_components)] lock = threading.Lock() seen_salthashes =", "set() # Send start message to players; record the time", "round_pubkey = self.round_pubkey feerate = self.feerate _ = self.components except", "optional and we tolerate it # missing. However, if the", "recv_pb, ClientHandlerThread, GenericServer, get_current_genesis_hash from .protocol import Protocol from .util", "AttributeError: pass def new_client_job(self, client): client.got_submit = False while True:", "= all_components, session_hash = session_hash)) # Sleep until end of", "check_input_electrumx(self.network, ret) except ValidationError as e: reason = f'{e.args[0]} ({outpoint})'", "= self.clients.index(client) possible_commitment_destinations = [(ci,cj) for commit, ci, cj in", "< 2: # Sanity check for testing -- the proof", "collect statuses, also check start times. statuses = dict() tfill_thresh", "privacy leak by the ommission. continue assert ret, 'expecting input", "with exception!') traceback.print_exc(file=sys.stderr) for c in self.clients: c.addjob(clientjob_goodbye, 'internal server", "FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE", "covert_server.host if self.announcehost is None else self.announcehost annhost_b = annhost.encode('ascii')", "// min_safe_clients # How many clients can share same tag", "__new__(cls, ipstr, tagbytes, maxsimul): ipb = ipstr.encode() b = bytes([maxsimul,", "self.stopping: return client.error(f\"Invalid tier selected: {t}\") try: mytiers = list(mytierpools)", "submsg def send(self, submsg, timeout=Protocol.STANDARD_TIMEOUT): send_pb(self.connection, pb.ServerMessage, submsg, timeout=timeout) def", "run rounds until successful or exception while True: covert_server.reset() #", "private key. If more than one # signed, then it's", "our covert timeline. covert_T0 = time.monotonic() self.print_error(f\"startround sent at {time.time()};", "bad proof (for {src_commitment_idx}): {ret}\") src_client.kill(f'bad proof (for {src_commitment_idx}): {ret}')", "free of charge, to any person # obtaining a copy", "def run (self, ): self.print_error(f'Starting fusion with {len(self.clients)} players at", "covert signatures phase, owner calls end_signatures, which returns a list", "self.waiting_pools.items(): ft = pool.fill_time if ft is None: continue size", "zip(c.blinds, c.blind_sig_requests)] c.addjob(clientjob_send, pb.BlindSigResponses(scalars = scalars)) del c.blinds, c.blind_sig_requests del", "report back to the # verifier, there is no privacy", "if t == self.tier_best: # this is the favoured pool,", "9.1] # TODO - make these configurable class Params: num_components", "fee based on dividing the overhead amongst players, in the", "len(sig) != 64: raise ValidationError('signature length is wrong') # It", "a while if many # checks against blockchain need to", "tx_from_components(all_components, session_hash) sighashes = [sha256(sha256(bytes.fromhex(tx.serialize_preimage(i, 0x41, use_cache = True)))) for", "= collector.gather(deadline = covert_T0 + Protocol.TS_EXPECTING_COMMITMENTS) # Filter clients who", "signatures = list(covert_server.end_signatures()) missing_sigs = len([s for s in signatures", "last fuse happened; as a placeholder, set this to startup", "def __init__(self, config, network, bindhost, port, upnp = None, announcehost", "{bad_components}\") if len(self.clients) < 2: # Sanity check for testing", "chosen_clients: pool.remove(c) pool.try_move_from_queue() # Update timing info self.t_last_fuse = time.monotonic()", "< self.fill_threshold: self.fill_time = None for t in client.tags: ts", "None at positions of missing signatures). - To reset the", "is optional and we tolerate it # missing. However, if", "return len(chosen_clients) def new_client_job(self, client): client_ip = client.connection.socket.getpeername()[0] msg =", "we'll let them slide...\") if self.stopping: return donation_address = ''", "key. If more than one # signed, then it's malicious", "commitment\") proofs = msg.encrypted_proofs if len(proofs) != Params.num_components: client.error(\"wrong number", "len(newhashes) seen_salthashes.update(newhashes) if len(seen_salthashes) != expected_len: c.error('duplicate component commitment') if", "E12 = [1.0, 1.2, 1.5, 1.8, 2.2, 2.7, 3.3, 3.9,", "tier={self.tier}') covert_server = CovertServer(self.bindhost, upnp = self.upnp) try: annhost =", "session_hash)) # Sleep until end of covert signatures phase remtime", "self.bindhost = bindhost self.upnp = upnp self.announcehost = announcehost self.daemon", "do we want before starting a fusion? min_clients = 8", "who will be put into fusion round if started at", "as e: self.print_error(f\"Ended with error: {e}\") except Exception as e:", "if not collector.add((client, relays)): client.error(\"late proofs\") for client in self.clients:", "= all_commitments[client_commit_indexes[myindex][dest_key_idx]] try: ret = validate_blame(blame, encproof, src_commit_blob, dest_commit_blob, all_components,", "[bytes.fromhex(inp['pubkeys'][0]) for inp in tx.inputs()] covert_server.start_signatures(sighashes,pubkeys) self.sendall(pb.ShareCovertComponents(components = all_components, session_hash", "the player. # we aren't collecting any results, rather just", "OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT,", "import Protocol from .util import (FusionError, sha256, calc_initial_hash, calc_round_hash, gen_keypair,", "[c for c in self.clients if not c.dead] if len(live)", "time.monotonic() assert remtime > 0, \"timings set up incorrectly\" time.sleep(remtime)", "status = pb.TierStatusUpdate.TierStatus(players = len(pool.pool), min_players = Params.min_clients) remtime =", "def maxsimul(self): return self[0] class TagStatus: __slots__ = ('pool', 'all_')", "pb.CovertResponse, submsg, timeout=timeout) def send_ok(self,): self.send(pb.OK(), timeout=5) def send_error(self, msg):", "many players can they represent in the same fuse? ip_max_simul_fuse", "obtaining a copy of this software and associated documentation files", "this tier self.queue = list() # clients who are waiting", "open without activity. # note this needs to consider the", "def add(self, client): can_pool = True for t in client.tags:", "client.error(\"Tag id too long\") if not (0 < tag.limit <", "outputs lists overhead = 60 else: # the smallest fusion", "# scan the commitment list and note where each client's", "= Params.min_excess_fee, max_excess_fee = Params.max_excess_fee, tiers = Params.tiers, donation_address =", "consume our CPU power. if sig != existing_sig: if not", "> size_best: time_best = ft tier_best = t size_best =", "is None: self.tier_best_starttime = None else: self.tier_best_starttime = max(time_best +", "in live: c.kill(\"too few remaining live players\") raise FusionError(\"too few", "b in [10000, 100000, 1000000, 10000000, 100000000] for s in", "for inp in tx.inputs()] covert_server.start_signatures(sighashes,pubkeys) self.sendall(pb.ShareCovertComponents(components = all_components, session_hash =", "Params.component_feerate) # generate blind nonces (slow!) for c in self.clients:", "Protocol.BLAME_VERIFY_TIME * 2) self.sendall(pb.RestartRound()) class CovertClientThread(ClientHandlerThread): def recv(self, *expected_msg_names, timeout=None):", "client.error('too many blames') if len(set(blame.which_proof for blame in msg.blames)) !=", "not (0 < tag.limit < 6): client.error(\"Tag limit out of", "client.tags = [] else: # Default tag: this IP cannot", "returns a list of signatures (which will have None at", "can_pool = False if can_pool: self._add_pool(client) else: self.queue.append(client) return can_pool", "Resistor \"E series\" values -- round numbers that are almost", "[1.0, 1.2, 1.5, 1.8, 2.2, 2.7, 3.3, 3.9, 4.7, 5.6,", "start in relaying, before sharing the # signatures. This makes", "5 (lokad) + 33 (session hash) ) if min_safe_clients *", "exceed fill_threshold self.tag_max = tag_max # how many clients can", "launch Fusion at the selected tier. \"\"\" with self.lock: chosen_clients", "components\") # Await commitment messages then process results results =", "defaultdict(list) for i, inp in enumerate(tx.inputs()): prevout_spenders[f\"{inp['prevout_hash']}:{inp['prevout_n']} {inp['pubkeys'][0]}\"].append(i) for prevout,", "since client may be trying to DoS us by #", "i, (inp, sig) in enumerate(zip(tx.inputs(), signatures)): inp['signatures'][0] = sig.hex() +", "ValidationError as e: reason = f'{e.args[0]} ({outpoint})' self.print_error(f\"blaming[{src_commitment_idx}] for bad", "didn't have private key. If more than one # signed,", "{msg}') class ClientTag(bytes): \"\"\" enhanced bytes object to represent a", "myindex = self.clients.index(client) possible_commitment_destinations = [(ci,cj) for commit, ci, cj", "ts = self.tags[t] ts.all_ -= 1 if in_pool: ts.pool -=", "len(self.clients)})\") total_excess_fees = sum(f for _,_,f in results) # Generate", "c.got_submit = False def end_signatures(self): with self.lock: ret = self.signatures", "a resubmission after ack failed delivery, # but we don't", "clients to trigger setting fill_time self.fill_time = None # when", "for clients to choose their pool. msg = client.recv('joinpools', timeout=120)", "allowed to stay open without activity. # note this needs", "generate blind nonces (slow!) for c in self.clients: c.blinds =", "punish him # because he's the honest guy and all", "the # signing phase and go directly to blame, or", "component_master_list # Do some preliminary checks to see whether we", "rest of this function might run for a while if", "collecting any results, rather just marking that # 'checking finished'", "exc_value, traceback): if exc_type is not None: self.fails.append(exc_value) if self.done_on_fail:", "Clean up dead clients self.clients = [c for c in", "ipb + tagbytes return super().__new__(cls, b) @property def maxsimul(self): return", "msg.genesis_hash: if msg.genesis_hash != get_current_genesis_hash(): # For now, msg.genesis_hash is", "then separate it out. component_master_list.sort(key=lambda x:x[1][0]) all_components = [comp for", "result): with self.lock: try: self.results.append(result) except AttributeError: return False else:", "should just skip the # signing phase and go directly", "start_components. - To signal the end of covert components phase,", "Before start of covert signatures phase, owner calls start_signatures. -", "appropriate to add some 'ban score' to the player. #", "no privacy leak by the ommission. continue assert ret, 'expecting", "are doing this. remtime = covert_T0 + Protocol.T_START_COMPS - time.monotonic()", "False) def client_start(c, collector): with collector: c.send(pb.StartRound(round_pubkey = round_pubkey, blind_nonce_points", "= [round(b*s) for b in [10000, 100000, 1000000, 10000000, 100000000]", "many tags\") # Event for signalling us that a pool", "to consume our CPU power. if sig != existing_sig: if", "always be unique) \"\"\" with self.lock: time_best = None tier_best", "= [schnorr.BlindSigner() for _co in range(Params.num_components)] lock = threading.Lock() seen_salthashes", "proofs.sort(key = lambda x:x[1]) client.send(pb.TheirProofsList(proofs = [ dict(encrypted_proof=x, src_commitment_idx=y, dst_key_idx=z)", "fusion if it has only been above min_clients for a", "i in bad_inputs) else: for i, (inp, sig) in enumerate(zip(tx.inputs(),", "submsg, timeout=None): send_pb(self.connection, pb.CovertResponse, submsg, timeout=timeout) def send_ok(self,): self.send(pb.OK(), timeout=5)", "whether to print a lot of logs noisy = False", "client.error(\"This server is on a different chain, please switch servers\")", "testnet # and we are mainnet, etc. client.error(\"This server is", "in self.clients: c.blinds = [schnorr.BlindSigner() for _co in range(Params.num_components)] lock", "tagbytes return super().__new__(cls, b) @property def maxsimul(self): return self[0] class", "commitment_master_list = [(commit, ci, cj) for ci, (_, commitments, _)", "the favoured tier self.reset_timer() inftime = float('inf') while True: with", "upnp = upnp) self.config = config self.network = network self.announcehost", "= False, bad_components = sorted(bad_components))) ### self.print_error(f\"entering blame phase. bad", "msg = client.recv('clienthello') if msg.version != Protocol.VERSION: client.error(\"Mismatched protocol version,", "Params.start_time_min, self.t_last_fuse + Params.start_time_spacing) self.tier_best = tier_best def start_fuse(self, tier):", "c in self.clients if not c.dead] self.check_client_count() if self.run_round(covert_server): break", "makes it slightly harder for one of the players to", "\"timings set up incorrectly\" time.sleep(remtime) component_master_list = list(covert_server.end_components().items()) self.print_error(f\"ending covert", ".protocol import Protocol from .util import (FusionError, sha256, calc_initial_hash, calc_round_hash,", "AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR", "else: if len(self.fails) + len(self.results) >= self.num_results: self.done_ev.set() return True", "self.sendall(pb.AllCommitments(initial_commitments = all_commitments), timeout=Protocol.TS_EXPECTING_COVERT_SIGNATURES) # Sleep until end of covert", "waiting pool for a specific tier \"\"\" def __init__(self, fill_threshold,", "by source commitment idx removes ordering correlations about which client", "len(msg.blames) > len(proofs): client.error('too many blames') if len(set(blame.which_proof for blame", "- If no pools are filled then there is no", "type(None))) if not schnorr.has_fast_sign() or not schnorr.has_fast_verify(): raise RuntimeError(\"Fusion requires", "100000000] for s in E12] # How many clients do", "- time.monotonic() if remtime > 0: time.sleep(remtime) # Upload the", "_ in self.clients] for i, (commit, ci, cj) in enumerate(commitment_master_list):", "len(possible_commitment_destinations) assert N == len(all_commitments) - Params.num_components # calculate the", "self.config = config self.network = network self.announcehost = announcehost self.donation_address", "point can report back to the # verifier, there is", "any person # obtaining a copy of this software and", "announcehost self.daemon = True def sendall(self, msg, timeout = Protocol.STANDARD_TIMEOUT):", "spenders in prevout_spenders.items(): if len(spenders) == 1: continue self.print_error(f\"multi-spend of", "!= Params.num_components: client.error(\"wrong number of proofs\") if any(len(p) > 200", "feerate self.round_pubkey = round_pubkey for c in self.spawned_clients: c.got_submit =", "and ts.all_ >= self.tag_max: return \"too many clients with same", "start next round right away. collector.add(None) for idx, (client, proofs)", "= True)) else: self.print_error(\"starting covert signature acceptance\") tx, input_indices =", "pool and queue) def check_add(self, client): for t in client.tags:", "False if can_pool: self._add_pool(client) else: self.queue.append(client) return can_pool def remove(self,", "statuses)) start_ev.wait(2) except: # Remove client from waiting pools on", "if len(seen_salthashes) != expected_len: c.error('duplicate component commitment') if not collector.add((c,", "default, will bind to an ephemeral port. - Before start", "is not None: client.error(res) for t in mytiers: pool =", "timing of a given input's signature submission. raise ValidationError('conflicting valid", "Immediately launch Fusion at the selected tier. \"\"\" with self.lock:", "self.check_client_count() self.print_error(f\"got commitments from {len(self.clients)} clients (dropped {prev_client_count - len(self.clients)})\")", "the smallest fusion could require 3-byte varint for both inputs", "subjective. It would be # appropriate to add some 'ban", "client in self.clients: client.addjob(client_get_proofs, collector) results = collector.gather(deadline = time.monotonic()", "mytierpools = {t: self.waiting_pools[t] for t in msg.tiers} except KeyError:", "component_feerate = 1000 # sats/kB max_excess_fee = 300000 # sats", "self.done_on_fail = bool(done_on_fail) self.done_ev = threading.Event() self.lock = threading.Lock() self.results", "self.announcehost) fusion.start() return len(chosen_clients) def new_client_job(self, client): client_ip = client.connection.socket.getpeername()[0]", "are already removed; on stop we don't care.) with self.lock:", "= network self.tier = tier self.clients = list(clients) self.bindhost =", "c.random_number_commitment = msg.random_number_commitment for client in self.clients: client.addjob(client_start, collector) #", "= {t: WaitingPool(Params.min_clients, Params.max_tier_client_tags) for t in Params.tiers} self.t_last_fuse =", "= tx.txid() self.print_error(\"completed the transaction! \" + txid) try: self.network.broadcast_transaction2(tx,", "= possible_commitment_destinations[rand_position(seed, N, i)] src_commitment_idx = client_commit_indexes[myindex][i] relays.append((proof, src_commitment_idx, dest_client_idx,", "message to players; this # will form the basis of", "-- the proof sharing thing doesn't even make sense with", "encproof, src_commitment_idx, dest_key_idx, src_client = proofs[blame.which_proof] except IndexError: client.kill(f'bad proof", "verifier, there is no privacy leak by the ommission. continue", "client sent which proof proofs.sort(key = lambda x:x[1]) client.send(pb.TheirProofsList(proofs =", "= sig except AttributeError: client.error('signature submitted at wrong time') client.send_ok()", "skip_signatures = True self.print_error(\"problem detected: too few components submitted\") if", "except: # Remove client from waiting pools on failure (on", "then it's malicious behaviour! if sum((signatures[i] is not None) for", "bit before uploading commitments, as clients are doing this. remtime", "for this long. start_time_max = 1200 # Inter-fusion delay --", "+ Protocol.TS_EXPECTING_COVERT_SIGNATURES - time.monotonic() if remtime < 0: # really", "with the earliest fill time; - If no pools are", "= self.tags[t] if ts.pool >= t.maxsimul: break else: self._add_pool(client) moved.append(client)", "who didn't have private key. If more than one #", "in msg.tiers} except KeyError: if self.stopping: return client.error(f\"Invalid tier selected:", "exc_type is not None: self.fails.append(exc_value) if self.done_on_fail: self.done_ev.set() elif len(self.fails)", "inp['signatures'][0] = sig.hex() + '41' assert tx.is_complete() txid = tx.txid()", "tx.txid() self.print_error(\"completed the transaction! \" + txid) try: self.network.broadcast_transaction2(tx, timeout=3)", "signature. This is fine # since it might be a", "None): baddies = set(self.clients).difference(goodclients) for c in baddies: c.kill(reason) def", "else: # Default tag: this IP cannot be present in", "remtime)) with self.lock: ret = self.results del self.results return ret", "lambda x:x[1]) client.send(pb.TheirProofsList(proofs = [ dict(encrypted_proof=x, src_commitment_idx=y, dst_key_idx=z) for x,y,z,", "nonces (slow!) for c in self.clients: c.blinds = [schnorr.BlindSigner() for", "= list(covert_server.end_signatures()) missing_sigs = len([s for s in signatures if", "before uploading commitments, as clients are doing this. remtime =", "self.print_error(f'Covert server started @ {covert_server.host}:{covert_server.port} (announcing as: {annhost_b}:{annport})') begin_time =", "input's signature submission. raise ValidationError('conflicting valid signature') with self.lock: try:", "+ Protocol.T_START_COMPS - time.monotonic() if remtime > 0: time.sleep(remtime) #", "myindex, proofs, collector): with collector: # an in-place sort by", "port, upnp = None, announcehost = None, donation_address = None):", "self.fill_threshold: self.fill_time = None for t in client.tags: ts =", "but remember exactly where each commitment originated. commitment_master_list = [(commit,", "encproof, src_commit_blob, dest_commit_blob, all_components, bad_components, Params.component_feerate) except ValidationError as e:", "= int(num_results) self.done_on_fail = bool(done_on_fail) self.done_ev = threading.Event() self.lock =", "calc_initial_hash(self.tier, annhost_b, annport, False, begin_time) time.sleep(Protocol.WARMUP_TIME) # repeatedly run rounds", "True self.print_error(\"problem detected: excess fee mismatch\") self.last_hash = session_hash =", "them slide...\") if self.stopping: return donation_address = '' if isinstance(self.donation_address,", "e: self.print_error(\"got bad blame; clamed reason was: \"+repr(blame.blame_reason)) client.kill(f'bad blame", "= None else: self.tier_best_starttime = max(time_best + Params.start_time_min, self.t_last_fuse +", "= len(sighashes) assert num_inputs == len(pubkeys) self.signatures = [None]*num_inputs self.sighashes", "for covert submissions. How it works: - Launch the server", "None size_best = 0 for t, pool in self.waiting_pools.items(): ft", "set(m.salted_component_hash for m in commit_messages) with lock: expected_len = len(seen_salthashes)", "client.tags: ts = self.tags.get(t) if ts is not None and", "ts = self.tags[t] ts.pool += 1 if len(self.pool) == self.fill_threshold:", "defaultdict import oregano.schnorr as schnorr from oregano.address import Address from", "spenders) != 1: bad_inputs.update(spenders) if bad_inputs: bad_components.update(input_indices[i] for i in", "assert ret, 'expecting input component' outpoint = ret.prev_txid[::-1].hex() + ':'", "per proof is malicious. Boot client # immediately since client", "run an SSL server proxy such as nginx for that", "unique) \"\"\" with self.lock: time_best = None tier_best = None", "clients list, so that the fusion can continue independently of", "at {time.time()}; accepting covert components\") # Await commitment messages then", "{repr(e)} ({outpoint})\") else: self.print_error(f\"player indicated bad input but it was", "self.tier_best_starttime = None else: self.tier_best_starttime = max(time_best + Params.start_time_min, self.t_last_fuse", "- Launch the server at any time. By default, will", "# Default tag: this IP cannot be present in too", "pool tag \"\"\" __slots__ = () def __new__(cls, ipstr, tagbytes,", "or not schnorr.has_fast_verify(): raise RuntimeError(\"Fusion requires libsecp256k1\") super().__init__(bindhost, port, ClientThread,", "run(self): try: super().run() finally: self.waiting_pools.clear() # gc clean def reset_timer(self,", "self.queue.remove(client) class FusionServer(GenericServer): \"\"\"Server for clients waiting to start a", "chosen_clients, self.bindhost, upnp = self.upnp, announcehost = self.announcehost) fusion.start() return", "dest_commit_blob = all_commitments[client_commit_indexes[myindex][dest_key_idx]] try: ret = validate_blame(blame, encproof, src_commit_blob, dest_commit_blob,", "from .util import (FusionError, sha256, calc_initial_hash, calc_round_hash, gen_keypair, tx_from_components, rand_position)", "from different sources, with a deadline. def __init__(self, num_results, done_on_fail", "many inputs against blockchain. if len(msg.blames) > len(proofs): client.error('too many", "collector.add(None) for idx, (client, proofs) in enumerate(zip(self.clients, proofs_to_relay)): client.addjob(client_get_blames, idx,", "a new round, call .reset(); to kill all connections, call", "transaction! misconfigured?\") # This probably indicates misconfiguration since fusion server", "- len(self.clients)})\") total_excess_fees = sum(f for _,_,f in results) #", "contrib) in component_master_list] component_contribs = [contrib for comp, (sort_key, contrib)", "# how many clients can share same tag (in pool", "random.Random() rng.seed(secrets.token_bytes(32)) def clientjob_send(client, msg, timeout = Protocol.STANDARD_TIMEOUT): client.send(msg, timeout=timeout)", "pool.try_move_from_queue() # Update timing info self.t_last_fuse = time.monotonic() self.reset_timer() #", "to have a good connection to the EC server. Report", "max time or pool is full). start_time_spacing = 120 #", "component_feerate = Params.component_feerate, min_excess_fee = Params.min_excess_fee, max_excess_fee = Params.max_excess_fee, tiers", "mytierpools.items(): if client not in pool.pool: continue status = pb.TierStatusUpdate.TierStatus(players", "!= myindex] N = len(possible_commitment_destinations) assert N == len(all_commitments) -", "did. relays = [] for i, proof in enumerate(proofs): dest_client_idx,", "not collector.add((client, relays)): client.error(\"late proofs\") for client in self.clients: client.addjob(client_get_proofs,", "not None: client.send_error(text) raise client.Disconnect class ClientThread(ClientHandlerThread): \"\"\"Basic thread per", "granted, free of charge, to any person # obtaining a", "= sum(f for _,_,f in results) # Generate scrambled commitment", "min_safe_clients = 6 # Choose the minimum excess fee based", "signatures (which will have None at positions of missing signatures).", "copies of the Software, # and to permit persons to", "del self.tags[t] return True def try_move_from_queue(self): # attempt to move", "with {len(self.clients)} players at tier={self.tier}') covert_server = CovertServer(self.bindhost, upnp =", "matching the prevout and claimed pubkey). prevout_spenders = defaultdict(list) for", "prevout_spenders.items(): if len(spenders) == 1: continue self.print_error(f\"multi-spend of f{prevout} detected\")", "failed delivery, # but we don't allow it to consume", "an SSL server proxy such as nginx for that purpose.", "need as an absolute minimum (for privacy)? min_safe_clients = 6", "upnp = self.upnp, announcehost = self.announcehost) fusion.start() return len(chosen_clients) def", "= donation_address self.waiting_pools = {t: WaitingPool(Params.min_clients, Params.max_tier_client_tags) for t in", "they are e.g. on testnet # and we are mainnet,", "is not None: # a non-favoured pool will start eventually", "malicious. Boot client # immediately since client may be trying", "del self.feerate except AttributeError: pass try: del self.sighashes del self.pubkeys", "contribs list, then separate it out. component_master_list.sort(key=lambda x:x[1][0]) all_components =", "only be 129 bytes long. # generate the possible destinations", "from . import fusion_pb2 as pb from .comms import send_pb,", "= Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME) # More than one blame per", "commitment_master_list[src_commitment_idx] dest_commit_blob = all_commitments[client_commit_indexes[myindex][dest_key_idx]] try: ret = validate_blame(blame, encproof, src_commit_blob,", "now, msg.genesis_hash is optional and we tolerate it # missing.", "all_commitments[client_commit_indexes[myindex][dest_key_idx]] try: ret = validate_blame(blame, encproof, src_commit_blob, dest_commit_blob, all_components, bad_components,", "rng.shuffle(mytiers) # shuffle the adding order so that if filling", "min_players = Params.min_clients) remtime = inftime if pool.fill_time is not", "del self.components return ret def start_signatures(self, sighashes, pubkeys): num_inputs =", "even have reasonable # privacy with what we have. bad_components", "= set(self.clients).difference(goodclients) for c in baddies: c.kill(reason) def run_round(self, covert_server):", "# How long covert connections are allowed to stay open", "covert_server.start_signatures(sighashes,pubkeys) self.sendall(pb.ShareCovertComponents(components = all_components, session_hash = session_hash)) # Sleep until", "the players to # broadcast a malleated version by re-signing", "if sig is None) # further, search for duplicated inputs", "super().run() finally: self.waiting_pools.clear() # gc clean def reset_timer(self, ): \"\"\"", "add(self, result): with self.lock: try: self.results.append(result) except AttributeError: return False", "self.announcehost annhost_b = annhost.encode('ascii') annport = covert_server.port covert_server.noisy = Params.noisy", "(uncompressed p2pkh input), how many could we take until the", "tiers = Params.tiers, donation_address = donation_address )) # We allow", "with self.lock: try: self.results.append(result) except AttributeError: return False else: if", "lightweight Ergon client # CashFusion - an advanced coin anonymizer", "\"\"\" enhanced bytes object to represent a pool tag \"\"\"", "that are almost geometrically uniform E6 = [1.0, 1.5, 2.2,", "all_commitments), timeout=Protocol.TS_EXPECTING_COVERT_SIGNATURES) # Sleep until end of covert components phase", "if mtype == 'component': try: round_pubkey = self.round_pubkey feerate =", "'41' assert tx.is_complete() txid = tx.txid() self.print_error(\"completed the transaction! \"", "rng.shuffle(commitment_master_list) all_commitments = tuple(commit for commit,ci,cj in commitment_master_list) # Send", "submission before a new phase started. As # an anti-spam", "Exception as e: self.print_error('Failed with exception!') traceback.print_exc(file=sys.stderr) for c in", "in bad_inputs) else: for i, (inp, sig) in enumerate(zip(tx.inputs(), signatures)):", "from one round's component submission to the next round's component", "IndexError: client.kill(f'bad proof index {blame.which_proof} / {len(proofs)}') continue src_commit_blob, src_commit_client_idx,", "in results] self.check_client_count() self.print_error(f\"got commitments from {len(self.clients)} clients (dropped {prev_client_count", "started, the ClientThreads are passed over to a FusionController to", "the originating client's commitments). myindex = self.clients.index(client) possible_commitment_destinations = [(ci,cj)", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR", "for client in self.queue: for t in client.tags: ts =", "excess fee based on dividing the overhead amongst players, in", "list(covert_server.end_components().items()) self.print_error(f\"ending covert component acceptance. {len(component_master_list)} received.\") # Sort the", "covert_priv, covert_Upub, covert_Cpub = gen_keypair() round_pubkey = covert_Cpub # start", "c.kill('blame yourself!') return # scan the commitment list and note", "= collector.gather(deadline = time.monotonic() + Protocol.STANDARD_TIMEOUT) # Now, repackage the", "proof (for {src_commitment_idx}): {ret}\") src_client.kill(f'bad proof (for {src_commitment_idx}): {ret}') continue", "except TimeoutException: self.print_error(\"timed out while trying to broadcast transaction! misconfigured?\")", "values -- round numbers that are almost geometrically uniform E6", "reset the server for a new round, call .reset(); to", "for a new round, call .reset(); to kill all connections,", "for c in self.clients: scalars = [b.sign(covert_priv, e) for b,e", "the # signatures. This makes it slightly harder for one", "self.tag_max: return \"too many clients with same tag\" def _add_pool(self,", "prevout_spenders[f\"{inp['prevout_hash']}:{inp['prevout_n']} {inp['pubkeys'][0]}\"].append(i) for prevout, spenders in prevout_spenders.items(): if len(spenders) ==", "would be # appropriate to add some 'ban score' to", "\"\"\" Server for covert submissions. How it works: - Launch", "to: Remove from spawned clients list, so that the fusion", "- how long from first connection to last possible Tor", "we # do indeed disallow them connecting if they are", "To signal the end of covert signatures phase, owner calls", "don't allow it to consume our CPU power. if sig", "with exception {repr(e)} ({outpoint})\") else: self.print_error(f\"player indicated bad input but", "= set() # clients who will be put into fusion", "they should only be 129 bytes long. # generate the", "= None for t in client.tags: ts = self.tags[t] ts.all_", "in chosen_clients: pool.remove(c) pool.try_move_from_queue() # Update timing info self.t_last_fuse =", "of f{prevout} detected\") # If exactly one of the inputs", "covert_Upub, covert_Cpub = gen_keypair() round_pubkey = covert_Cpub # start to", "# We received a distinct valid signature. This is not", "long. # generate the possible destinations list (all commitments, but", "this signature. This is fine # since it might be", "[] for client in self.queue: for t in client.tags: ts", "sats tiers = [round(b*s) for b in [10000, 100000, 1000000,", "live players\") raise FusionError(\"too few remaining live players\") def run", "1 if len(self.pool) == self.fill_threshold: self.fill_time = time.monotonic() def add(self,", "sha256(seed) != client.random_number_commitment: client.error(\"seed did not match commitment\") proofs =", "= self.donation_address.to_full_ui_string() client.send(pb.ServerHello( num_components = Params.num_components, component_feerate = Params.component_feerate, min_excess_fee", "= Protocol.STANDARD_TIMEOUT): client.send(msg, timeout=timeout) def clientjob_goodbye(client, text): # a gentler", "(since fill time is a float, this will almost always", "is whitelisted to allow unlimited access client.tags = [] else:", "e: self.print_error('Failed with exception!') traceback.print_exc(file=sys.stderr) for c in self.clients: c.addjob(clientjob_goodbye,", "ServerError as e: nice_msg, = e.args server_msg = e.server_msg self.print_error(f\"could", "done, perhaps even still # running after run_round has exited.", "to the maximum size, so start immediately self.start_fuse(t) return #", "= tier self.clients = list(clients) self.bindhost = bindhost self.upnp =", "for b,e in zip(c.blinds, c.blind_sig_requests)] c.addjob(clientjob_send, pb.BlindSigResponses(scalars = scalars)) del", "commit_messages = check_playercommit(msg, Params.min_excess_fee, Params.max_excess_fee, Params.num_components) newhashes = set(m.salted_component_hash for", "have bias towards any particular tier with self.lock: if self.stopping:", "prevout, spenders in prevout_spenders.items(): if len(spenders) == 1: continue self.print_error(f\"multi-spend", "amount - fee). - Before start of covert signatures phase,", "blame in msg.blames)) != len(msg.blames): client.error('multiple blames point to same", "donation_address )) # We allow a long timeout for clients", "proofs_to_relay)): client.addjob(client_get_blames, idx, proofs, collector) _ = collector.gather(deadline = time.monotonic()", "thing doesn't even make sense with one player. for c", "# Every round, clients leave ... How many clients do", "check_playercommit(msg, Params.min_excess_fee, Params.max_excess_fee, Params.num_components) newhashes = set(m.salted_component_hash for m in", "if filling more than one pool, we don't have bias", "# cleanup for no-longer-used tags del self.tags[t] return True def", "basis of our covert timeline. covert_T0 = time.monotonic() self.print_error(f\"startround sent", "with same tag\" def _add_pool(self, client): self.pool.add(client) for t in", "None and ts.all_ >= self.tag_max: return \"too many clients with", "done_on_fail = False) def client_get_blames(client, myindex, proofs, collector): with collector:", "may be trying to DoS us by # making us", "but leaving out the originating client's commitments). myindex = self.clients.index(client)", "We received a distinct valid signature. This is not #", "# Remove client from waiting pools on failure (on success,", "him # because he's the honest guy and all the", "client_start(c, collector): with collector: c.send(pb.StartRound(round_pubkey = round_pubkey, blind_nonce_points = [b.get_R()", "for a while if many # checks against blockchain need", "was: \"+repr(blame.blame_reason)) client.kill(f'bad blame message: {e} (you claimed: {blame.blame_reason!r})') continue", "self.spawned_clients: c.got_submit = False def end_signatures(self): with self.lock: ret =", "before a new phase started. As # an anti-spam measure", "that the fusion can continue independently of waiting server. #", "sighashes = [sha256(sha256(bytes.fromhex(tx.serialize_preimage(i, 0x41, use_cache = True)))) for i in", "rounds.\"\"\" def __init__(self, config, network, bindhost, port, upnp = None,", "pool with the earliest fill time; - If no pools", "minimum excess fee based on dividing the overhead amongst players,", "= False def end_signatures(self): with self.lock: ret = self.signatures del", "# publish, distribute, sublicense, and/or sell copies of the Software,", "time.sleep(2) self.sendall(pb.FusionResult(ok = True, txsignatures = signatures)) return True self.sendall(pb.FusionResult(ok", "time') except IndexError: raise ValidationError('which_input too high') sig = msg.txsignature", "the pool with the earliest fill time; - If no", "= [(commit, ci, cj) for ci, (_, commitments, _) in", "return # add this client to waiting pools for pool", "for c in self.clients: c.kill('blame yourself!') return # scan the", "yourself!') return # scan the commitment list and note where", "works: - Launch the server at any time. By default,", "annport, False, begin_time) time.sleep(Protocol.WARMUP_TIME) # repeatedly run rounds until successful", "# broadcast a malleated version by re-signing one of their", "we break the connection as a result. # Note that", "as: {annhost_b}:{annport})') begin_time = round(time.time()) self.sendall(pb.FusionBegin(tier = self.tier, covert_domain =", "# Sleep a bit before uploading commitments, as clients are", "if src_client.dead: # If the blamed client is already dead,", "record the time we did this round_time = round(time.time()) collector", "# Since nothing after this point can report back to", "- Params.num_components # calculate the randomly chosen destinations, same way", "*expected_msg_names, timeout=None): submsg, mtype = recv_pb(self.connection, pb.CovertMessage, *expected_msg_names, timeout=timeout) return", "server is on a different chain, please switch servers\") else:", "= self.announcehost) fusion.start() return len(chosen_clients) def new_client_job(self, client): client_ip =", "tier_best def start_fuse(self, tier): \"\"\" Immediately launch Fusion at the", "are e.g. on testnet # and we are mainnet, etc.", "SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR", "proofs_to_relay[dest_client_idx].append((proof, src_commitment_idx, dest_key_idx, src_client)) live_clients = len(results) collector = ResultsCollector(live_clients,", "in self.clients: client.addjob(client_start, collector) # Record the time that we", "fusion.start() return len(chosen_clients) def new_client_job(self, client): client_ip = client.connection.socket.getpeername()[0] msg", "dict() self.feerate = feerate self.round_pubkey = round_pubkey for c in", "deal in the Software without restriction, # including without limitation", "# the smallest fusion could require 3-byte varint for both", "do so, # subject to the following conditions: # #", "client.addjob(client_get_proofs, collector) results = collector.gather(deadline = time.monotonic() + Protocol.STANDARD_TIMEOUT) #", "leave ... How many clients do we need as an", "can't abuse us to find out the # timing of", "msg.random_number if sha256(seed) != client.random_number_commitment: client.error(\"seed did not match commitment\")", "perhaps even still # running after run_round has exited. For", "tag \"\"\" __slots__ = () def __new__(cls, ipstr, tagbytes, maxsimul):", "# as an 'internal server error'. raise else: self.print_error(\"broadcast was", "success, we are already removed; on stop we don't care.)", "pool is full). start_time_spacing = 120 # But don't start", "e: nice_msg, = e.args server_msg = e.server_msg self.print_error(f\"could not broadcast", "c.blinds, c.blind_sig_requests del results, collector # Sleep a bit before", "that # 'checking finished' so that if all blames are", "phase. bad components: {bad_components}\") if len(self.clients) < 2: # Sanity", "msg = client.recv('blames', timeout = Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME) # More", "{reason}\") src_client.kill('you provided a bad input: ' + reason) continue", "so that if all blames are checked, we # can", "0 self.all_ = 0 class WaitingPool: \"\"\" a waiting pool", "for c in live: c.kill(\"too few remaining live players\") raise", "SSL support, however a server admin may run an SSL", "\"\"\" __slots__ = () def __new__(cls, ipstr, tagbytes, maxsimul): ipb", "run the rounds.\"\"\" def __init__(self, config, network, bindhost, port, upnp", "6): client.error(\"Tag limit out of range\") ip = '' if", "blame; clamed reason was: \"+repr(blame.blame_reason)) client.kill(f'bad blame message: {e} (you", "allow a long timeout for clients to choose their pool.", "non-favoured pool will start eventually remtime = pool.fill_time - tfill_thresh", "run_round(self, covert_server): covert_priv, covert_Upub, covert_Cpub = gen_keypair() round_pubkey = covert_Cpub", "- tfill_thresh if t == self.tier_best: # this is the", "self.all_ = 0 class WaitingPool: \"\"\" a waiting pool for", "at a special time remtime = min(remtime, self.tier_best_starttime - tnow)", "way as client did. relays = [] for i, proof", "timing info self.t_last_fuse = time.monotonic() self.reset_timer() # Uncomment the following", "[ClientTag(client_ip, b'', Params.ip_max_simul_fuse)] for tag in msg.tags: if len(tag.id) >", "a given IP, how many players can they represent in", "# a non-favoured pool will start eventually remtime = pool.fill_time", "as pb from .comms import send_pb, recv_pb, ClientHandlerThread, GenericServer, get_current_genesis_hash", "time we did this round_time = round(time.time()) collector = ResultsCollector(len(self.clients),", "def new_client_job(self, client): client.got_submit = False while True: msg, mtype", "= (overhead + min_safe_clients - 1) // min_safe_clients # How", "signalling us that a pool started. start_ev = threading.Event() client.start_ev", "overhead = 60 else: # the smallest fusion will use", "FusionError(\"way too slow\") time.sleep(remtime) signatures = list(covert_server.end_signatures()) missing_sigs = len([s", "is no privacy leak by the ommission. continue assert ret,", "[sha256(sha256(bytes.fromhex(tx.serialize_preimage(i, 0x41, use_cache = True)))) for i in range(len(tx.inputs()))] pubkeys", "({outpoint})\") # At this point we could blame the originator,", "= defaultdict(TagStatus) # how are the various tags self.fill_threshold =", "protocol version, please upgrade\") if msg.genesis_hash: if msg.genesis_hash != get_current_genesis_hash():", "TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR", "signing phase and go directly to blame, or maybe even", "if len(proofs) != Params.num_components: client.error(\"wrong number of proofs\") if any(len(p)", "from server side. \"\"\" def __init__(self, network, tier, clients, bindhost,", "not c.dead] if len(live) < Params.min_safe_clients: for c in live:", "self.sendall(pb.ShareCovertComponents(components = all_components, session_hash = session_hash)) # Sleep until end", "client.addjob(client_get_blames, idx, proofs, collector) _ = collector.gather(deadline = time.monotonic() +", "received a distinct valid signature. This is not # allowed", "relays: proofs_to_relay[dest_client_idx].append((proof, src_commitment_idx, dest_key_idx, src_client)) live_clients = len(results) collector =", "time. # Since nothing after this point can report back", "the result would exceed 100 kB standard tx size limitation?", "+ 5 (lokad) + 33 (session hash) ) if min_safe_clients", "= False if can_pool: self._add_pool(client) else: self.queue.append(client) return can_pool def", "for c in baddies: c.kill(reason) def run_round(self, covert_server): covert_priv, covert_Upub,", "OK. self.sendall(pb.AllCommitments(initial_commitments = all_commitments), timeout=Protocol.TS_EXPECTING_COVERT_SIGNATURES) # Sleep until end of", "have None at positions of missing signatures). - To reset", "src_client = proofs[blame.which_proof] except IndexError: client.kill(f'bad proof index {blame.which_proof} /", "self.sighashes = sighashes self.pubkeys = pubkeys for c in self.spawned_clients:", "pool, can start at a special time remtime = min(remtime,", "enumerate(signatures) if sig is None) # further, search for duplicated", "__init__(self, fill_threshold, tag_max): self.pool = set() # clients who will", "manage to give a good commitment. prev_client_count = len(self.clients) self.clients", "client_get_blames(client, myindex, proofs, collector): with collector: # an in-place sort", "self.tier, covert_domain = annhost_b, covert_port = annport, covert_ssl = False,", "return client.error(f\"Invalid tier selected: {t}\") try: mytiers = list(mytierpools) rng.shuffle(mytiers)", "t, pool in mytierpools.items(): if client not in pool.pool: continue", "min_excess_fee = (overhead + min_safe_clients - 1) // min_safe_clients #", "msg.random_number_commitment for client in self.clients: client.addjob(client_start, collector) # Record the", "None def start_components(self, round_pubkey, feerate): self.components = dict() self.feerate =", "6.8, 8.2] E24 = [1.0, 1.1, 1.2, 1.3, 1.5, 1.6,", "self.clients: scalars = [b.sign(covert_priv, e) for b,e in zip(c.blinds, c.blind_sig_requests)]", "class ClientTag(bytes): \"\"\" enhanced bytes object to represent a pool", "3-byte varint for either inputs or outputs lists overhead =", "= pb.TierStatusUpdate.TierStatus(players = len(pool.pool), min_players = Params.min_clients) remtime = inftime", "= 100 # For a given IP, how many players", "could have aborted earlier but this # way third parties", "None, announcehost = None, donation_address = None): assert network assert", "sources, with a deadline. def __init__(self, num_results, done_on_fail = True):", "ClientThread, upnp = upnp) self.config = config self.network = network", "before sharing the # signatures. This makes it slightly harder", "{len(self.clients)} players at tier={self.tier}') covert_server = CovertServer(self.bindhost, upnp = self.upnp)", "2.7, 3.3, 3.9, 4.7, 5.6, 6.8, 8.2] E24 = [1.0,", "be 129 bytes long. # generate the possible destinations list", "an ephemeral port. - Before start of covert components phase,", "sighash = self.sighashes[msg.which_input] pubkey = self.pubkeys[msg.which_input] existing_sig = self.signatures[msg.which_input] except", "tagbytes, maxsimul): ipb = ipstr.encode() b = bytes([maxsimul, len(ipb)]) +", "OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT", "since it might be a resubmission after ack failed delivery,", "by re-signing one of their inputs. time.sleep(2) self.sendall(pb.FusionResult(ok = True,", "def send_ok(self,): self.send(pb.OK(), timeout=5) def send_error(self, msg): self.send(pb.Error(message = msg),", "= msg.blind_sig_requests c.random_number_commitment = msg.random_number_commitment for client in self.clients: client.addjob(client_start,", "Protocol.STANDARD_TIMEOUT) # Now, repackage the proofs according to destination. proofs_to_relay", "may change. for blame in msg.blames: try: encproof, src_commitment_idx, dest_key_idx,", "check_covert_component, validate_blame, ValidationError, check_input_electrumx) # Resistor \"E series\" values --", "= True if len(self.pool) < self.fill_threshold: self.fill_time = None for", "phase and go directly to blame, or maybe even restart", "If more than one # signed, then it's malicious behaviour!", "len(tag.id) > 20: client.error(\"Tag id too long\") if not (0", "until successful or exception while True: covert_server.reset() # Clean up", "try to join, reject) max_tier_client_tags = 100 # For a", "\"\"\" def __init__(self, network, tier, clients, bindhost, upnp = None,", "!= expected_len: c.error('duplicate component commitment') if not collector.add((c, msg.initial_commitments, msg.excess_fee)):", "SSL server proxy such as nginx for that purpose. \"\"\"", "= self.tags.get(t) if ts is not None and ts.all_ >=", "for t in Params.tiers} self.t_last_fuse = time.monotonic() # when the", "blockchain. if len(msg.blames) > len(proofs): client.error('too many blames') if len(set(blame.which_proof", "that may change. for blame in msg.blames: try: encproof, src_commitment_idx,", "players; this # will form the basis of our covert", "for _ in self.clients] for i, (commit, ci, cj) in", "traceback.print_exc(file=sys.stderr) for c in self.clients: c.addjob(clientjob_goodbye, 'internal server error') finally:", "# For a given IP, how many players can they", "FusionError(\"too few remaining live players\") def run (self, ): self.print_error(f'Starting", "covert components\") # Await commitment messages then process results results", "be unique) \"\"\" with self.lock: time_best = None tier_best =", "= round_time )) msg = c.recv('playercommit') commit_messages = check_playercommit(msg, Params.min_excess_fee,", "maybe even restart / end # without sharing components. skip_signatures", "len(self.pool) == self.fill_threshold: self.fill_time = time.monotonic() def add(self, client): can_pool", "to the EC server. Report this back to clients #", "_ = commitment_master_list[src_commitment_idx] dest_commit_blob = all_commitments[client_commit_indexes[myindex][dest_key_idx]] try: ret = validate_blame(blame,", "all blames are checked, we # can start next round", "self.sighashes[msg.which_input] pubkey = self.pubkeys[msg.which_input] existing_sig = self.signatures[msg.which_input] except AttributeError: client.error('signature", "True def sendall(self, msg, timeout = Protocol.STANDARD_TIMEOUT): for client in", "are the various tags self.fill_threshold = fill_threshold # minimum number", "expected_len: c.error('duplicate component commitment') if not collector.add((c, msg.initial_commitments, msg.excess_fee)): c.error(\"late", "claimed pubkey). prevout_spenders = defaultdict(list) for i, inp in enumerate(tx.inputs()):", "upnp) self.config = config self.network = network self.announcehost = announcehost", "RuntimeError(\"Fusion requires libsecp256k1\") super().__init__(bindhost, port, ClientThread, upnp = upnp) self.config", "contrib}, where contrib is (+- amount - fee). - Before", "except ValidationError as e: self.print_error(\"got bad blame; clamed reason was:", "phase, owner calls end_signatures, which returns a list of signatures", "commitment') if not collector.add((c, msg.initial_commitments, msg.excess_fee)): c.error(\"late commitment\") # record", "maxsimul): ipb = ipstr.encode() b = bytes([maxsimul, len(ipb)]) + ipb", "the transaction! \" + txid) try: self.network.broadcast_transaction2(tx, timeout=3) except ServerError", "pool will start eventually remtime = pool.fill_time - tfill_thresh if", "an absolute minimum (for privacy)? min_safe_clients = 6 # Choose", "if sig != existing_sig: if not schnorr.verify(pubkey, sig, sighash): raise", "len(self.clients)*Params.num_components: skip_signatures = True self.print_error(\"problem detected: too few components submitted\")", "... How many clients do we need as an absolute", "calls start_signatures. - To signal the end of covert signatures", "to do so, # subject to the following conditions: #", "self.done_ev.wait(max(0., remtime)) with self.lock: ret = self.results del self.results return", "self.network.broadcast_transaction2(tx, timeout=3) except ServerError as e: nice_msg, = e.args server_msg", "the components & contribs list, then separate it out. component_master_list.sort(key=lambda", "= announcehost self.daemon = True def sendall(self, msg, timeout =", "del component_master_list # Do some preliminary checks to see whether", "collector # Sleep a bit before uploading commitments, as clients", "= len(self.clients) self.clients = [c for c, _, _ in", "bad. bad_inputs = set(i for i,sig in enumerate(signatures) if sig", "len(getattr(self, 'results', ())) >= self.num_results: self.done_ev.set() def gather(self, *, deadline):", "left from best pool, so it might not be best", "self.print_error('Failed with exception!') traceback.print_exc(file=sys.stderr) for c in self.clients: c.addjob(clientjob_goodbye, 'internal", "pool.try_move_from_queue() if self.tier_best in mytierpools: # we left from best", "By default, will bind to an ephemeral port. - Before", "many clients do we need as an absolute minimum (for", "cj) in enumerate(commitment_master_list): client_commit_indexes[ci][cj] = i collector = ResultsCollector(len(self.clients), done_on_fail", "= f'{e.args[0]} ({outpoint})' self.print_error(f\"blaming[{src_commitment_idx}] for bad input: {reason}\") src_client.kill('you provided", "# when the last fuse happened; as a placeholder, set", "DoS us by # making us check many inputs against", "and outputs lists overhead = 62 elif min_safe_clients * num_components", "for a short time (unless pool is full). start_time_min =", "not schnorr.verify(pubkey, sig, sighash): raise ValidationError('bad transaction signature') if existing_sig:", "the Software without restriction, # including without limitation the rights", "allow it to consume our CPU power. if sig !=", "need to be done, perhaps even still # running after", "if self.stopping or start_ev.is_set(): return tnow = time.monotonic() # scan", "them connecting if they are e.g. on testnet # and", "signal the end of covert signatures phase, owner calls end_signatures,", "min(remtime, self.tier_best_starttime - tnow) if remtime <= 0: self.start_fuse(t) return", "parties can't abuse us to find out the # timing", "blames point to same proof') # Note, the rest of", "100000, 1000000, 10000000, 100000000] for s in E12] # How", "[] self.fails = [] def __enter__(self, ): return self def", "= self.tier, covert_domain = annhost_b, covert_port = annport, covert_ssl =", "fuse? ip_max_simul_fuse = 3 # Guaranteed time to launch a", "# Notify that we will start. for c in chosen_clients:", "an advanced coin anonymizer # # Copyright (C) 2020 <NAME>", "3.3, 4.7, 6.8] E12 = [1.0, 1.2, 1.5, 1.8, 2.2,", "all_components, bad_components, Params.component_feerate) except ValidationError as e: self.print_error(\"got bad blame;", "= 40 # used for non-cryptographic purposes import random rng", "server. Report this back to clients # as an 'internal", "covert submissions. How it works: - Launch the server at", "covert_server = CovertServer(self.bindhost, upnp = self.upnp) try: annhost = covert_server.host", "so, # subject to the following conditions: # # The", "= covert_T0 + Protocol.TS_EXPECTING_COMMITMENTS) # Filter clients who didn't manage", "calc_initial_hash, calc_round_hash, gen_keypair, tx_from_components, rand_position) from .validation import (check_playercommit, check_covert_component,", "sys import threading import time import traceback from collections import", "to destination. proofs_to_relay = [list() for _ in self.clients] for", "len(live) < Params.min_safe_clients: for c in live: c.kill(\"too few remaining", "except ServerError as e: nice_msg, = e.args server_msg = e.server_msg", "only allow one submission per connection # per phase. client.error('multiple", "self.lock: if self.stopping: return # add this client to waiting", "False def end_signatures(self): with self.lock: ret = self.signatures del self.signatures", "len(set(blame.which_proof for blame in msg.blames)) != len(msg.blames): client.error('multiple blames point", "True: with self.lock: if self.stopping or start_ev.is_set(): return tnow =", "second submission before a new phase started. As # an", "rng.seed(secrets.token_bytes(32)) def clientjob_send(client, msg, timeout = Protocol.STANDARD_TIMEOUT): client.send(msg, timeout=timeout) def", "exception {repr(e)} ({outpoint})\") else: self.print_error(f\"player indicated bad input but it", "# (these overhead numbers assume op_return script size of 1", "for pool in mytierpools.values(): res = pool.check_add(client) if res is", "favoured tier self.reset_timer() inftime = float('inf') while True: with self.lock:", "adding order so that if filling more than one pool,", "if isinstance(ret, str): self.print_error(f\"verified a bad proof (for {src_commitment_idx}): {ret}\")", "while trying to broadcast transaction! misconfigured?\") # This probably indicates", "accepting covert components\") # Await commitment messages then process results", "= sorted(bad_components))) ### self.print_error(f\"entering blame phase. bad components: {bad_components}\") if", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "[1.0, 1.1, 1.2, 1.3, 1.5, 1.6, 1.8, 2.0, 2.2, 2.4,", "already removed; on stop we don't care.) with self.lock: for", "commitments ended up client_commit_indexes = [[None]*Params.num_components for _ in self.clients]", "us by # making us check many inputs against blockchain.", "len(self.fails) + len(getattr(self, 'results', ())) >= self.num_results: self.done_ev.set() def gather(self,", "self.components = dict() self.feerate = feerate self.round_pubkey = round_pubkey for", "self.print_error(f\"verified a bad proof (for {src_commitment_idx}): {ret}\") src_client.kill(f'bad proof (for", "AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM,", "= '' if tag.no_ip else client_ip client.tags.append(ClientTag(ip, tag.id, tag.limit)) try:", "waste more time. # Since nothing after this point can", "[10000, 100000, 1000000, 10000000, 100000000] for s in E12] #", "that we could have aborted earlier but this # way", "pool.fill_time is not None: # a non-favoured pool will start", "proof sharing thing doesn't even make sense with one player.", "= False) def client_start(c, collector): with collector: c.send(pb.StartRound(round_pubkey = round_pubkey,", "last possible Tor component submission? # - how long from", "= Params.min_clients) remtime = inftime if pool.fill_time is not None:", "ServerError, TimeoutException from . import fusion_pb2 as pb from .comms", "= gen_keypair() round_pubkey = covert_Cpub # start to accept covert", "b in c.blinds], server_time = round_time )) msg = c.recv('playercommit')", "64: raise ValidationError('signature length is wrong') # It might be", "is wrong') # It might be we already have this", "round_pubkey, round_time, all_commitments, all_components) #TODO : Check the inputs and", "min_safe_clients * num_components >= 2 * 0xfc: # the smallest", "if sum((signatures[i] is not None) for i in spenders) !=", "# More than one blame per proof is malicious. Boot", "will have None at positions of missing signatures). - To", "return can_pool def remove(self, client): # make sure to call", "= self.upnp, announcehost = self.announcehost) fusion.start() return len(chosen_clients) def new_client_job(self,", "list(self.waiting_pools[tier].pool) # Notify that we will start. for c in", "but checking failed with exception {repr(e)} ({outpoint})\") else: self.print_error(f\"player indicated", "slightly harder for one of the players to # broadcast", "time; - If no pools are filled then there is", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #", "pubkey = self.pubkeys[msg.which_input] existing_sig = self.signatures[msg.which_input] except AttributeError: client.error('signature submitted", "submitted\") if total_excess_fees != sum(component_contribs): skip_signatures = True self.print_error(\"problem detected:", "on a given tier (if more try to join, reject)", "self.donation_address.to_full_ui_string() client.send(pb.ServerHello( num_components = Params.num_components, component_feerate = Params.component_feerate, min_excess_fee =", "# TODO - make these configurable class Params: num_components =", "remtime = covert_T0 + Protocol.TS_EXPECTING_COVERT_COMPONENTS - time.monotonic() assert remtime >", "<reponame>MrNaif2018/Oregano #!/usr/bin/env python3 # # Oregano - a lightweight Ergon", "run for a while if many # checks against blockchain", "and/or sell copies of the Software, # and to permit", "ts = self.tags[t] ts.all_ += 1 if ts.pool >= t.maxsimul:", "4.7, 5.1, 5.6, 6.2, 6.8, 7.5, 8.2, 9.1] # TODO", "same phase') if mtype == 'component': try: round_pubkey = self.round_pubkey", "documentation files # (the \"Software\"), to deal in the Software", "Sleep until end of covert signatures phase remtime = covert_T0", "5: client.error(\"Too many tags\") # Event for signalling us that", "= tuple(commit for commit,ci,cj in commitment_master_list) # Send blind signatures", "How many clients do we need as an absolute minimum", "+ ipb + tagbytes return super().__new__(cls, b) @property def maxsimul(self):", "contrib is (+- amount - fee). - Before start of", "all pools for t, pool in self.waiting_pools.items(): for c in", "= proofs[blame.which_proof] except IndexError: client.kill(f'bad proof index {blame.which_proof} / {len(proofs)}')", "self.pubkeys = pubkeys for c in self.spawned_clients: c.got_submit = False", "commitment messages then process results results = collector.gather(deadline = covert_T0", "component submission to the next round's component submission? COVERT_CLIENT_TIMEOUT =", "')'}\") # mark all missing-signature components as bad. bad_inputs =", "same fuse? ip_max_simul_fuse = 3 # Guaranteed time to launch", "self.stopping: return # add this client to waiting pools for", "according to destination. proofs_to_relay = [list() for _ in self.clients]", "the same fuse? ip_max_simul_fuse = 3 # Guaranteed time to", "connection to last possible Tor component submission? # - how", "# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,", "t == self.tier_best: # this is the favoured pool, can", "each client's commitments ended up client_commit_indexes = [[None]*Params.num_components for _", "None]) ### self.print_error(f\"ending covert signature acceptance. {missing_sigs} missing :{'(' if", "= recv_pb(self.connection, pb.ClientMessage, *expected_msg_names, timeout=timeout) return submsg def send(self, submsg,", "for comp, (sort_key, contrib) in component_master_list] component_contribs = [contrib for", "the proofs according to destination. proofs_to_relay = [list() for _", "this try: self.pool.remove(client) except KeyError: in_pool = False try: self.queue.remove(client)", "msg = client.recv('joinpools', timeout=120) if len(msg.tiers) == 0: client.error(\"No tiers\")", "varint for either inputs or outputs lists overhead = 60", "one pool, we don't have bias towards any particular tier", "signatures)) return True self.sendall(pb.FusionResult(ok = False, bad_components = sorted(bad_components))) ###", "don't care.) with self.lock: for t, pool in mytierpools.items(): if", "filled then there is no favoured fuse. (since fill time", "# Kick off the fusion. rng.shuffle(chosen_clients) fusion = FusionController(self. network,", "series\" values -- round numbers that are almost geometrically uniform", "self.results return ret def add(self, result): with self.lock: try: self.results.append(result)", "msg.initial_commitments, msg.excess_fee)): c.error(\"late commitment\") # record for later c.blind_sig_requests =", "timeout=Protocol.STANDARD_TIMEOUT): submsg, mtype = recv_pb(self.connection, pb.ClientMessage, *expected_msg_names, timeout=timeout) return submsg", "= [] for i, proof in enumerate(proofs): dest_client_idx, dest_key_idx =", "_add_pool(self, client): self.pool.add(client) for t in client.tags: ts = self.tags[t]", "from waiting pools on failure (on success, we are already", "e) for b,e in zip(c.blinds, c.blind_sig_requests)] c.addjob(clientjob_send, pb.BlindSigResponses(scalars = scalars))", "an 'internal server error'. raise else: self.print_error(\"broadcast was successful!\") #", "mytierpools: # we left from best pool, so it might", "copy, modify, merge, # publish, distribute, sublicense, and/or sell copies", "offer SSL support, however a server admin may run an", "tier \"\"\" def __init__(self, fill_threshold, tag_max): self.pool = set() #", "client may be trying to DoS us by # making", "58 min_excess_fee = (overhead + min_safe_clients - 1) // min_safe_clients", "commitment\") # record for later c.blind_sig_requests = msg.blind_sig_requests c.random_number_commitment =", "all_components, skip_signatures = True)) else: self.print_error(\"starting covert signature acceptance\") tx,", "a bad proof (for {src_commitment_idx}): {ret}\") src_client.kill(f'bad proof (for {src_commitment_idx}):", "submitted at wrong time') except IndexError: raise ValidationError('which_input too high')", "above copyright notice and this permission notice shall be #", "FusionController(threading.Thread, PrintError): \"\"\" This controls the Fusion rounds running from", "idx, proofs, collector) _ = collector.gather(deadline = time.monotonic() + Protocol.STANDARD_TIMEOUT", "done_on_fail = True): self.num_results = int(num_results) self.done_on_fail = bool(done_on_fail) self.done_ev", "long\") if not (0 < tag.limit < 6): client.error(\"Tag limit", "1.8, 2.0, 2.2, 2.4, 2.7, 3.0, 3.3, 3.6, 3.9, 4.3,", "possible destinations list (all commitments, but leaving out the originating", "to the player. # we aren't collecting any results, rather", "isinstance(ret, str): self.print_error(f\"verified a bad proof (for {src_commitment_idx}): {ret}\") src_client.kill(f'bad", "\"\"\" A basic server implementation for CashFusion. Does not natively", "the rounds.\"\"\" def __init__(self, config, network, bindhost, port, upnp =", "happen, we had plenty of time raise FusionError(\"way too slow\")", "wrong time') sort_key, contrib = check_covert_component(msg, round_pubkey, feerate) with self.lock:", "to kill all connections, call .stop(). \"\"\" def __init__(self, bindhost,", "amongst players, in the smallest fusion # (these overhead numbers", "which proof proofs.sort(key = lambda x:x[1]) client.send(pb.TheirProofsList(proofs = [ dict(encrypted_proof=x,", "is on a different chain, please switch servers\") else: client.print_error(\"👀", "aren't collecting any results, rather just marking that # 'checking", "{component: contrib}, where contrib is (+- amount - fee). -", "import sys import threading import time import traceback from collections", "live: c.kill(\"too few remaining live players\") raise FusionError(\"too few remaining", "anonymizer # # Copyright (C) 2020 <NAME> # # Permission", "OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION", "= 300000 # sats tiers = [round(b*s) for b in", "a Fusion thread is started, the ClientThreads are passed over", "Protocol.T_START_COMPS - time.monotonic() if remtime > 0: time.sleep(remtime) # Upload", "time.monotonic() if remtime > 0: time.sleep(remtime) # Upload the full", "error'. raise else: self.print_error(\"broadcast was successful!\") # Give our transaction", "in proofs])) msg = client.recv('blames', timeout = Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME)", "some 'ban score' to the player. # we aren't collecting", "OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #", "OR OTHER DEALINGS IN THE # SOFTWARE. \"\"\" A basic", "with error: {e}\") except Exception as e: self.print_error('Failed with exception!')", "timeout but that's OK. self.sendall(pb.AllCommitments(initial_commitments = all_commitments), timeout=Protocol.TS_EXPECTING_COVERT_SIGNATURES) # Sleep", "pass try: del self.sighashes del self.pubkeys except AttributeError: pass def", "Protocol.TS_EXPECTING_COMMITMENTS) # Filter clients who didn't manage to give a", "# sats tiers = [round(b*s) for b in [10000, 100000,", "c in chosen_clients: c.start_ev.set() # Remove those clients from all", "self.signatures del self.signatures return ret def reset(self): try: del self.round_pubkey", "positions of missing signatures). - To reset the server for", "2020 <NAME> # # Permission is hereby granted, free of", "min_safe_clients # How many clients can share same tag on", "end of covert components phase, owner calls end_components, which returns", "with self.lock: chosen_clients = list(self.waiting_pools[tier].pool) # Notify that we will", "without sharing components. skip_signatures = False if len(all_components) != len(self.clients)*Params.num_components:", "client's commitments ended up client_commit_indexes = [[None]*Params.num_components for _ in", "will almost always be unique) \"\"\" with self.lock: time_best =", "be done, perhaps even still # running after run_round has", "# including without limitation the rights to use, copy, modify,", "the smallest fusion # (these overhead numbers assume op_return script", "we will start. for c in chosen_clients: c.start_ev.set() # Remove", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "bias towards any particular tier with self.lock: if self.stopping: return", "in commit_messages) with lock: expected_len = len(seen_salthashes) + len(newhashes) seen_salthashes.update(newhashes)", "are checked, we # can start next round right away.", "the honest guy and all the other components were #", "IN # CONNECTION WITH THE SOFTWARE OR THE USE OR", "the # timing of a given input's signature submission. raise", "client, we'll let them slide...\") if self.stopping: return donation_address =", "5.6, 6.2, 6.8, 7.5, 8.2, 9.1] # TODO - make", "self.tags.get(t) if ts is not None and ts.all_ >= self.tag_max:", "commit,ci,cj in commitment_master_list) # Send blind signatures for c in", "= mytierpools[t] pool.add(client) if len(pool.pool) >= Params.max_clients: # pool filled", "{e}\") except Exception as e: self.print_error('Failed with exception!') traceback.print_exc(file=sys.stderr) for", "4.7, 5.6, 6.8, 8.2] E24 = [1.0, 1.1, 1.2, 1.3,", "many could we take until the result would exceed 100", "annhost_b, covert_port = annport, covert_ssl = False, server_time = begin_time))", "or substantial portions of the Software. # # THE SOFTWARE", "with collector: c.send(pb.StartRound(round_pubkey = round_pubkey, blind_nonce_points = [b.get_R() for b", "as e: nice_msg, = e.args server_msg = e.server_msg self.print_error(f\"could not", "blame in msg.blames: try: encproof, src_commitment_idx, dest_key_idx, src_client = proofs[blame.which_proof]", "happened; as a placeholder, set this to startup time. self.reset_timer()", "to trigger setting fill_time self.fill_time = None # when did", "# Inter-fusion delay -- after starting any fusion, wait this", "in results: for proof, src_commitment_idx, dest_client_idx, dest_key_idx in relays: proofs_to_relay[dest_client_idx].append((proof,", "OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT", "on a different chain, please switch servers\") else: client.print_error(\"👀 No", "server at any time. By default, will bind to an", "= pubkeys for c in self.spawned_clients: c.got_submit = False def", "0, \"timings set up incorrectly\" time.sleep(remtime) component_master_list = list(covert_server.end_components().items()) self.print_error(f\"ending", "provided a bad input: ' + reason) continue except Exception", "if time_best is None: self.tier_best_starttime = None else: self.tier_best_starttime =", "1000000, 10000000, 100000000] for s in E12] # How many", "Protocol.TS_EXPECTING_COVERT_SIGNATURES - time.monotonic() if remtime < 0: # really shouldn't", "def recv(self, *expected_msg_names, timeout=None): submsg, mtype = recv_pb(self.connection, pb.CovertMessage, *expected_msg_names,", "owner calls end_signatures, which returns a list of signatures (which", "proxy such as nginx for that purpose. \"\"\" import secrets", "Sort the components & contribs list, then separate it out.", "while True: with self.lock: if self.stopping or start_ev.is_set(): return tnow", "non-cryptographic purposes import random rng = random.Random() rng.seed(secrets.token_bytes(32)) def clientjob_send(client,", "begin_time = round(time.time()) self.sendall(pb.FusionBegin(tier = self.tier, covert_domain = annhost_b, covert_port", "c in self.clients: c.addjob(clientjob_goodbye, 'internal server error') finally: covert_server.stop() for", "this point we could blame the originator, however # blockchain", "float, this will almost always be unique) \"\"\" with self.lock:", "a float, this will almost always be unique) \"\"\" with", "at tier={self.tier}') covert_server = CovertServer(self.bindhost, upnp = self.upnp) try: annhost", "with collector: # an in-place sort by source commitment idx", "should only be 129 bytes long. # generate the possible", "the full commitment list; we're a bit generous with the", "to be done, perhaps even still # running after run_round", "fill_time self.fill_time = None # when did pool exceed fill_threshold", "self.clients: client.addjob(client_get_proofs, collector) results = collector.gather(deadline = time.monotonic() + Protocol.STANDARD_TIMEOUT)", "we are already removed; on stop we don't care.) with", "server error'. raise else: self.print_error(\"broadcast was successful!\") # Give our", "self.print_error(\"skipping covert signature acceptance\") self.sendall(pb.ShareCovertComponents(components = all_components, skip_signatures = True))", "== len(all_commitments) - Params.num_components # calculate the randomly chosen destinations,", "= config self.network = network self.announcehost = announcehost self.donation_address =", "server ought # to have a good connection to the", "a server admin may run an SSL server proxy such", "specific tier \"\"\" def __init__(self, fill_threshold, tag_max): self.pool = set()", "client # CashFusion - an advanced coin anonymizer # #", "signatures if s is None]) ### self.print_error(f\"ending covert signature acceptance.", "covert components phase remtime = covert_T0 + Protocol.TS_EXPECTING_COVERT_COMPONENTS - time.monotonic()", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", ". import fusion_pb2 as pb from .comms import send_pb, recv_pb,", "with a deadline. def __init__(self, num_results, done_on_fail = True): self.num_results", "< 0: # really shouldn't happen, we had plenty of", "= self.pubkeys[msg.which_input] existing_sig = self.signatures[msg.which_input] except AttributeError: client.error('signature submitted at", "inp in tx.inputs()] covert_server.start_signatures(sighashes,pubkeys) self.sendall(pb.ShareCovertComponents(components = all_components, session_hash = session_hash))", "don't waste more time. # Since nothing after this point", "tiers = [round(b*s) for b in [10000, 100000, 1000000, 10000000,", "'internal server error'. raise else: self.print_error(\"broadcast was successful!\") # Give", "sha256, calc_initial_hash, calc_round_hash, gen_keypair, tx_from_components, rand_position) from .validation import (check_playercommit,", "def start_fuse(self, tier): \"\"\" Immediately launch Fusion at the selected", "tag on a given tier (if more try to join,", "component submission? # - how long from one round's component", "in relays: proofs_to_relay[dest_client_idx].append((proof, src_commitment_idx, dest_key_idx, src_client)) live_clients = len(results) collector", "-- after starting any fusion, wait this long before starting", "covert_Cpub # start to accept covert components covert_server.start_components(round_pubkey, Params.component_feerate) #", "ts.all_ >= self.tag_max: return \"too many clients with same tag\"", "= collector.gather(deadline = time.monotonic() + Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME * 2)", "excess fee mismatch\") self.last_hash = session_hash = calc_round_hash(self.last_hash, round_pubkey, round_time,", "thread per connected client.\"\"\" def recv(self, *expected_msg_names, timeout=Protocol.STANDARD_TIMEOUT): submsg, mtype", "= list(mytierpools) rng.shuffle(mytiers) # shuffle the adding order so that", "Remove client from waiting pools on failure (on success, we", "try: round_pubkey = self.round_pubkey feerate = self.feerate _ = self.components", "for c in self.spawned_clients: c.got_submit = False def end_signatures(self): with", "time. self.reset_timer() def run(self): try: super().run() finally: self.waiting_pools.clear() # gc", "clients, bindhost, upnp = None, announcehost = None): super().__init__(name=\"FusionController\") self.network", "can share same tag on a given tier (if more", "use, copy, modify, merge, # publish, distribute, sublicense, and/or sell", "tags self.fill_threshold = fill_threshold # minimum number of pool clients", "client): can_pool = True for t in client.tags: ts =", "= time.monotonic() self.reset_timer() # Uncomment the following to: Remove from", "inputs is signed, we don't punish him # because he's", "results: for proof, src_commitment_idx, dest_client_idx, dest_key_idx in relays: proofs_to_relay[dest_client_idx].append((proof, src_commitment_idx,", "+ Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME * 2) self.sendall(pb.RestartRound()) class CovertClientThread(ClientHandlerThread): def", "to permit persons to whom the Software is furnished to", "to a FusionController to run the rounds.\"\"\" def __init__(self, config,", "We allow a long timeout for clients to choose their", "raise ValidationError('bad transaction signature') if existing_sig: # We received a", "m in commit_messages) with lock: expected_len = len(seen_salthashes) + len(newhashes)", "only been above min_clients for a short time (unless pool", "collector) results = collector.gather(deadline = time.monotonic() + Protocol.STANDARD_TIMEOUT) # Now,", "self.clients: c.blinds = [schnorr.BlindSigner() for _co in range(Params.num_components)] lock =", "incorrectly\" time.sleep(remtime) component_master_list = list(covert_server.end_components().items()) self.print_error(f\"ending covert component acceptance. {len(component_master_list)}", "and go directly to blame, or maybe even restart /", "make these configurable class Params: num_components = 23 component_feerate =", "def new_client_job(self, client): client_ip = client.connection.socket.getpeername()[0] msg = client.recv('clienthello') if", "in client.tags: ts = self.tags[t] ts.all_ += 1 if ts.pool", "1: bad_inputs.update(spenders) if bad_inputs: bad_components.update(input_indices[i] for i in bad_inputs) else:", "True if len(self.pool) < self.fill_threshold: self.fill_time = None for t", "donation_address = None): assert network assert isinstance(donation_address, (Address, type(None))) if", "{blame.which_proof} / {len(proofs)}') continue src_commit_blob, src_commit_client_idx, _ = commitment_master_list[src_commitment_idx] dest_commit_blob", "coin anonymizer # # Copyright (C) 2020 <NAME> # #", "message to players; record the time we did this round_time", "without restriction, # including without limitation the rights to use,", "6.8, 7.5, 8.2, 9.1] # TODO - make these configurable", "pb.ServerMessage, submsg, timeout=timeout) def send_error(self, msg): self.send(pb.Error(message = msg), timeout=Protocol.STANDARD_TIMEOUT)", "choose their pool. msg = client.recv('joinpools', timeout=120) if len(msg.tiers) ==", "for t, pool in self.waiting_pools.items(): for c in chosen_clients: pool.remove(c)", "{t: self.waiting_pools[t] for t in msg.tiers} except KeyError: if self.stopping:", "'ping': continue if client.got_submit: # We got a second submission", "the transaction! {nice_msg}\") except TimeoutException: self.print_error(\"timed out while trying to", "AttributeError: client.error('signature submitted at wrong time') client.send_ok() client.got_submit = True", "= 23 component_feerate = 1000 # sats/kB max_excess_fee = 300000", "if started at this tier self.queue = list() # clients", "directly to blame, or maybe even restart / end #", "# but we don't allow it to consume our CPU", "# gc def kick_missing_clients(self, goodclients, reason = None): baddies =", "= self.tags[t] ts.pool += 1 if len(self.pool) == self.fill_threshold: self.fill_time", "in self.clients if not c.dead] self.check_client_count() if self.run_round(covert_server): break self.print_error('Ended", "for both inputs and outputs lists overhead = 62 elif", "Remove those clients from all pools for t, pool in", "how long from one round's component submission to the next", "__slots__ = ('pool', 'all_') def __init__(self): self.pool = 0 self.all_", "form the basis of our covert timeline. covert_T0 = time.monotonic()", "1.5, 1.6, 1.8, 2.0, 2.2, 2.4, 2.7, 3.0, 3.3, 3.6,", "def _add_pool(self, client): self.pool.add(client) for t in client.tags: ts =", "generate the possible destinations list (all commitments, but leaving out", "ft is None: continue size = len(pool.pool) if size >=", "while True: msg, mtype = client.recv('component', 'signature', 'ping', timeout =", "for _,_,f in results) # Generate scrambled commitment list, but", "= [] else: # Default tag: this IP cannot be", "except FusionError as e: self.print_error(f\"Ended with error: {e}\") except Exception", "= set(m.salted_component_hash for m in commit_messages) with lock: expected_len =", "tier self.clients = list(clients) self.bindhost = bindhost self.upnp = upnp", "self.fill_time = time.monotonic() def add(self, client): can_pool = True for", "WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING", "c in self.spawned_clients: c.got_submit = False def end_components(self): with self.lock:", "scalars)) del c.blinds, c.blind_sig_requests del results, collector # Sleep a", "def clientjob_goodbye(client, text): # a gentler goodbye than killing if", "ft = pool.fill_time if ft is None: continue size =", "self def __exit__(self, exc_type, exc_value, traceback): if exc_type is not", "the following conditions: # # The above copyright notice and", ">= 2 * 0xfc: # the smallest fusion could require", "have private key. If more than one # signed, then", "self.waiting_pools[t] for t in msg.tiers} except KeyError: if self.stopping: return", "the basis of our covert timeline. covert_T0 = time.monotonic() self.print_error(f\"startround", "raise class ResultsCollector: # Collect submissions from different sources, with", "we are mainnet, etc. client.error(\"This server is on a different", "to accept covert components covert_server.start_components(round_pubkey, Params.component_feerate) # generate blind nonces", "which returns a dict of {component: contrib}, where contrib is", "for t in client.tags: ts = self.tags[t] ts.all_ -= 1", "+ Protocol.TS_EXPECTING_COMMITMENTS) # Filter clients who didn't manage to give", "mtype == 'component': try: round_pubkey = self.round_pubkey feerate = self.feerate", "collector) _ = collector.gather(deadline = time.monotonic() + Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME", "one submission per connection # per phase. client.error('multiple submission in", "time to launch a fusion if the pool has stayed", "bool(done_on_fail) self.done_ev = threading.Event() self.lock = threading.Lock() self.results = []", "proofs, collector) _ = collector.gather(deadline = time.monotonic() + Protocol.STANDARD_TIMEOUT +", "set up incorrectly\" time.sleep(remtime) component_master_list = list(covert_server.end_components().items()) self.print_error(f\"ending covert component", "the overhead amongst players, in the smallest fusion # (these", "= upnp self.announcehost = announcehost self.daemon = True def sendall(self,", "elif min_safe_clients * num_components >= 0xfc: # the smallest fusion", "rng.shuffle(chosen_clients) fusion = FusionController(self. network, tier, chosen_clients, self.bindhost, upnp =", "= 120 # But don't start a fusion if it", "= False # How long covert connections are allowed to", "[1.0, 1.5, 2.2, 3.3, 4.7, 6.8] E12 = [1.0, 1.2,", "# when did pool exceed fill_threshold self.tag_max = tag_max #", "up dead clients self.clients = [c for c in self.clients", "self.lock: for t, pool in mytierpools.items(): if pool.remove(client): pool.try_move_from_queue() if", "connection # per phase. client.error('multiple submission in same phase') if", ": Check the inputs and outputs to see if we", "could require 3-byte varint for either inputs or outputs lists", "client in self.clients: client.addjob(client_start, collector) # Record the time that", "for CashFusion. Does not natively offer SSL support, however a", "time.monotonic() # when the last fuse happened; as a placeholder,", "commitment_master_list) # Send blind signatures for c in self.clients: scalars", "what we have. bad_components = set() ### if skip_signatures: self.print_error(\"skipping", "== 'component': try: round_pubkey = self.round_pubkey feerate = self.feerate _", "self.print_error(\"problem detected: too few components submitted\") if total_excess_fees != sum(component_contribs):", "\"\"\" def __init__(self, bindhost, port=0, upnp = None): super().__init__(bindhost, port,", "msg.blames: try: encproof, src_commitment_idx, dest_key_idx, src_client = proofs[blame.which_proof] except IndexError:", "return submsg def send(self, submsg, timeout=Protocol.STANDARD_TIMEOUT): send_pb(self.connection, pb.ServerMessage, submsg, timeout=timeout)", "res is not None: client.error(res) for t in mytiers: pool", "= i collector = ResultsCollector(len(self.clients), done_on_fail = False) def client_get_proofs(client,", "status client.send(pb.TierStatusUpdate(statuses = statuses)) start_ev.wait(2) except: # Remove client from", "pass def new_client_job(self, client): client.got_submit = False while True: msg,", "Note, the rest of this function might run for a", "the pool(s) with the most number of players, - Choose", "we have added to pools, which may have changed the", "super().__new__(cls, b) @property def maxsimul(self): return self[0] class TagStatus: __slots__", "dst_key_idx=z) for x,y,z, _ in proofs])) msg = client.recv('blames', timeout", "'expecting input component' outpoint = ret.prev_txid[::-1].hex() + ':' + str(ret.prev_index)", "round, clients leave ... How many clients do we need", "results, collector # Sleep a bit before uploading commitments, as", "in enumerate(tx.inputs()): prevout_spenders[f\"{inp['prevout_hash']}:{inp['prevout_n']} {inp['pubkeys'][0]}\"].append(i) for prevout, spenders in prevout_spenders.items(): if", "contrib) except AttributeError: client.error('component submitted at wrong time') else: assert", "in msg.blames)) != len(msg.blames): client.error('multiple blames point to same proof')", "sendall(self, msg, timeout = Protocol.STANDARD_TIMEOUT): for client in self.clients: client.addjob(clientjob_send,", "'startround' message to players; this # will form the basis", "in enumerate(signatures) if sig is None) # further, search for", "class WaitingPool: \"\"\" a waiting pool for a specific tier", "300000 # sats tiers = [round(b*s) for b in [10000,", "ommission. continue assert ret, 'expecting input component' outpoint = ret.prev_txid[::-1].hex()", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "tier (if more try to join, reject) max_tier_client_tags = 100", "except ValueError: return False else: in_pool = True if len(self.pool)", "pools are filled then there is no favoured fuse. (since", "remtime != inftime: status.time_remaining = round(remtime) statuses[t] = status client.send(pb.TierStatusUpdate(statuses", "lock = threading.Lock() seen_salthashes = set() # Send start message", "src_client.kill('you provided a bad input: ' + reason) continue except", "away. collector.add(None) for idx, (client, proofs) in enumerate(zip(self.clients, proofs_to_relay)): client.addjob(client_get_blames,", "start at a special time remtime = min(remtime, self.tier_best_starttime -", "running after run_round has exited. For this reason we try", "filling more than one pool, we don't have bias towards", "they are put into the waiting pools. Once a Fusion", "the ommission. continue assert ret, 'expecting input component' outpoint =", "self.queue: for t in client.tags: ts = self.tags[t] if ts.pool", "2) self.sendall(pb.RestartRound()) class CovertClientThread(ClientHandlerThread): def recv(self, *expected_msg_names, timeout=None): submsg, mtype", "live players\") def run (self, ): self.print_error(f'Starting fusion with {len(self.clients)}", "None, donation_address = None): assert network assert isinstance(donation_address, (Address, type(None)))", "[(commit, ci, cj) for ci, (_, commitments, _) in enumerate(results)", "USE OR OTHER DEALINGS IN THE # SOFTWARE. \"\"\" A", "= len(pool.pool) if size >= size_best: if time_best is None", "try: check_input_electrumx(self.network, ret) except ValidationError as e: reason = f'{e.args[0]}", "msg = c.recv('playercommit') commit_messages = check_playercommit(msg, Params.min_excess_fee, Params.max_excess_fee, Params.num_components) newhashes", "while True: covert_server.reset() # Clean up dead clients self.clients =", "False, bad_components = sorted(bad_components))) ### self.print_error(f\"entering blame phase. bad components:", "ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN #", "if self.stopping: return donation_address = '' if isinstance(self.donation_address, Address): donation_address", "33 (session hash) ) if min_safe_clients * num_components >= 2", "timeout=timeout) def send_ok(self,): self.send(pb.OK(), timeout=5) def send_error(self, msg): self.send(pb.Error(message =", "python3 # # Oregano - a lightweight Ergon client #", "# clients who will be put into fusion round if", "client.send(pb.ServerHello( num_components = Params.num_components, component_feerate = Params.component_feerate, min_excess_fee = Params.min_excess_fee,", "import (FusionError, sha256, calc_initial_hash, calc_round_hash, gen_keypair, tx_from_components, rand_position) from .validation", "from oregano.util import PrintError, ServerError, TimeoutException from . import fusion_pb2", "not None and ts.all_ >= self.tag_max: return \"too many clients", "session_hash) sighashes = [sha256(sha256(bytes.fromhex(tx.serialize_preimage(i, 0x41, use_cache = True)))) for i", "{missing_sigs} missing :{'(' if missing_sigs else ')'}\") # mark all", "above min_clients for a short time (unless pool is full).", "= len(seen_salthashes) + len(newhashes) seen_salthashes.update(newhashes) if len(seen_salthashes) != expected_len: c.error('duplicate", "Await commitment messages then process results results = collector.gather(deadline =", "sum((signatures[i] is not None) for i in spenders) != 1:", "Scan pools for the favoured fuse: - Out of the", "reason = None): baddies = set(self.clients).difference(goodclients) for c in baddies:", "for t in mytiers: pool = mytierpools[t] pool.add(client) if len(pool.pool)", "ts = self.tags.get(t) if ts is not None and ts.all_", "proofs_to_relay = [list() for _ in self.clients] for src_client, relays", "from collections import defaultdict import oregano.schnorr as schnorr from oregano.address", "len(sighashes) assert num_inputs == len(pubkeys) self.signatures = [None]*num_inputs self.sighashes =", "is not # allowed and we break the connection as", "As # an anti-spam measure we only allow one submission", "client # immediately since client may be trying to DoS", "DEALINGS IN THE # SOFTWARE. \"\"\" A basic server implementation", "with collector: msg = client.recv('myproofslist') seed = msg.random_number if sha256(seed)", "per phase. client.error('multiple submission in same phase') if mtype ==", "to call try_move_from_queue() after calling this try: self.pool.remove(client) except KeyError:", "# mark all missing-signature components as bad. bad_inputs = set(i", "= client.recv('joinpools', timeout=120) if len(msg.tiers) == 0: client.error(\"No tiers\") if", "a copy of this software and associated documentation files #", "return ret def reset(self): try: del self.round_pubkey del self.components del", "= client.recv('component', 'signature', 'ping', timeout = COVERT_CLIENT_TIMEOUT) if mtype ==", "msg.genesis_hash != get_current_genesis_hash(): # For now, msg.genesis_hash is optional and", "ClientThread made for them, and they are put into the", "self.last_hash = calc_initial_hash(self.tier, annhost_b, annport, False, begin_time) time.sleep(Protocol.WARMUP_TIME) # repeatedly", "somewhat subjective. It would be # appropriate to add some", "False def end_components(self): with self.lock: ret = self.components del self.components", "done_on_fail = False) def client_get_proofs(client, collector): with collector: msg =", "src_commitment_idx = client_commit_indexes[myindex][i] relays.append((proof, src_commitment_idx, dest_client_idx, dest_key_idx)) if not collector.add((client,", "self.components del self.components return ret def start_signatures(self, sighashes, pubkeys): num_inputs", "commitments from {len(self.clients)} clients (dropped {prev_client_count - len(self.clients)})\") total_excess_fees =", "break else: self._add_pool(client) moved.append(client) for client in moved: self.queue.remove(client) class", "= covert_Cpub # start to accept covert components covert_server.start_components(round_pubkey, Params.component_feerate)", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR", "tfill_thresh if t == self.tier_best: # this is the favoured", "phase, owner calls end_components, which returns a dict of {component:", "however # blockchain checks are somewhat subjective. It would be", "players can they represent in the same fuse? ip_max_simul_fuse =", "client.error(\"wrong number of proofs\") if any(len(p) > 200 for p", "self.lock: try: self.results.append(result) except AttributeError: return False else: if len(self.fails)", "status.time_remaining = round(remtime) statuses[t] = status client.send(pb.TierStatusUpdate(statuses = statuses)) start_ev.wait(2)", "try_move_from_queue(self): # attempt to move clients from queue into pool", "for b in c.blinds], server_time = round_time )) msg =", "remtime > 0, \"timings set up incorrectly\" time.sleep(remtime) component_master_list =", "smallest fusion could require 3-byte varint for either inputs or", "components covert_server.start_components(round_pubkey, Params.component_feerate) # generate blind nonces (slow!) for c", "= annhost.encode('ascii') annport = covert_server.port covert_server.noisy = Params.noisy covert_server.start() self.print_error(f'Covert", "{nice_msg}\") except TimeoutException: self.print_error(\"timed out while trying to broadcast transaction!", "blame the originator, however # blockchain checks are somewhat subjective.", "# this is the favoured pool, can start at a", "check_add(self, client): for t in client.tags: ts = self.tags.get(t) if", "400 # whether to print a lot of logs noisy", "super().__init__(bindhost, port, CovertClientThread, upnp = upnp) self.round_pubkey = None def", "# Give our transaction a small head start in relaying,", "time.sleep(remtime) signatures = list(covert_server.end_signatures()) missing_sigs = len([s for s in", "dest_key_idx, src_client)) live_clients = len(results) collector = ResultsCollector(live_clients, done_on_fail =", "min_excess_fee = Params.min_excess_fee, max_excess_fee = Params.max_excess_fee, tiers = Params.tiers, donation_address", "if res is not None: client.error(res) for t in mytiers:", "= time.monotonic() + Protocol.STANDARD_TIMEOUT) # Now, repackage the proofs according", "max_clients = (100000 - 12) // (num_components * 173) #", "def gather(self, *, deadline): remtime = deadline - time.monotonic() self.done_ev.wait(max(0.,", "next one (unless hit max time or pool is full).", "self.tier_best: # this is the favoured pool, can start at", "Check the inputs and outputs to see if we even", "we should just skip the # signing phase and go", "proofs])) msg = client.recv('blames', timeout = Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME) #", "a list of signatures (which will have None at positions", "# It might be we already have this signature. This", "num_components >= 2 * 0xfc: # the smallest fusion could", "logs noisy = False # How long covert connections are", "for t, pool in self.waiting_pools.items(): ft = pool.fill_time if ft", "pool in mytierpools.items(): if client not in pool.pool: continue status", "from .protocol import Protocol from .util import (FusionError, sha256, calc_initial_hash,", "msg, timeout) def check_client_count(self,): live = [c for c in", "in component_master_list] component_contribs = [contrib for comp, (sort_key, contrib) in", "raise ValidationError('which_input too high') sig = msg.txsignature if len(sig) !=", "list(covert_server.end_signatures()) missing_sigs = len([s for s in signatures if s", "we already have this signature. This is fine # since", "= list(covert_server.end_components().items()) self.print_error(f\"ending covert component acceptance. {len(component_master_list)} received.\") # Sort", "# But don't start a fusion if it has only", "announcehost = self.announcehost) fusion.start() return len(chosen_clients) def new_client_job(self, client): client_ip", "of missing signatures). - To reset the server for a", "each commitment originated. commitment_master_list = [(commit, ci, cj) for ci,", "__init__(self, num_results, done_on_fail = True): self.num_results = int(num_results) self.done_on_fail =", ".util import (FusionError, sha256, calc_initial_hash, calc_round_hash, gen_keypair, tx_from_components, rand_position) from", "IP, how many players can they represent in the same", "been above min_clients for a short time (unless pool is", "for t in client.tags: ts = self.tags[t] ts.all_ += 1", "our transaction a small head start in relaying, before sharing", "sighashes self.pubkeys = pubkeys for c in self.spawned_clients: c.got_submit =", "client.recv('clienthello') if msg.version != Protocol.VERSION: client.error(\"Mismatched protocol version, please upgrade\")", "for c in self.spawned_clients: c.got_submit = False def end_components(self): with", "Params.num_components, component_feerate = Params.component_feerate, min_excess_fee = Params.min_excess_fee, max_excess_fee = Params.max_excess_fee,", "FusionController(self. network, tier, chosen_clients, self.bindhost, upnp = self.upnp, announcehost =", "self.tier_best in mytierpools: # we left from best pool, so", "connection to the EC server. Report this back to clients", "out the # timing of a given input's signature submission.", "cleanup for no-longer-used tags del self.tags[t] return True def try_move_from_queue(self):", "<NAME> # # Permission is hereby granted, free of charge,", "of covert components phase, call start_components. - To signal the", "self.fill_time = None for t in client.tags: ts = self.tags[t]", "def recv(self, *expected_msg_names, timeout=Protocol.STANDARD_TIMEOUT): submsg, mtype = recv_pb(self.connection, pb.ClientMessage, *expected_msg_names,", "src_commitment_idx=y, dst_key_idx=z) for x,y,z, _ in proofs])) msg = client.recv('blames',", "skip_signatures: self.print_error(\"skipping covert signature acceptance\") self.sendall(pb.ShareCovertComponents(components = all_components, skip_signatures =", "= all_components, skip_signatures = True)) else: self.print_error(\"starting covert signature acceptance\")", "get a ClientThread made for them, and they are put", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "starting a fusion? min_clients = 8 # If all clients", "True): self.num_results = int(num_results) self.done_on_fail = bool(done_on_fail) self.done_ev = threading.Event()", "clients from queue into pool moved = [] for client", "fusion_pb2 as pb from .comms import send_pb, recv_pb, ClientHandlerThread, GenericServer,", "exception!') traceback.print_exc(file=sys.stderr) for c in self.clients: c.addjob(clientjob_goodbye, 'internal server error')", "pool.check_add(client) if res is not None: client.error(res) for t in", "# Uncomment the following to: Remove from spawned clients list,", "in enumerate(results) for cj,commit in enumerate(commitments)] rng.shuffle(commitment_master_list) all_commitments = tuple(commit", "res = pool.check_add(client) if res is not None: client.error(res) for", "[b.get_R() for b in c.blinds], server_time = round_time )) msg", "overhead amongst players, in the smallest fusion # (these overhead", "or above min_clients for this long. start_time_max = 1200 #", "an in-place sort by source commitment idx removes ordering correlations", "calls end_components, which returns a dict of {component: contrib}, where", "absolute minimum (for privacy)? min_safe_clients = 6 # Choose the", "None): assert network assert isinstance(donation_address, (Address, type(None))) if not schnorr.has_fast_sign()", "self.lock = threading.Lock() self.results = [] self.fails = [] def", "True: covert_server.reset() # Clean up dead clients self.clients = [c", "time. By default, will bind to an ephemeral port. -", "out while trying to broadcast transaction! misconfigured?\") # This probably", "the fusion can continue independently of waiting server. # self.spawned_clients.difference_update(chosen_clients)", "not # allowed and we break the connection as a", "3.9, 4.3, 4.7, 5.1, 5.6, 6.2, 6.8, 7.5, 8.2, 9.1]", "client.recv('blames', timeout = Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME) # More than one", "= COVERT_CLIENT_TIMEOUT) if mtype == 'ping': continue if client.got_submit: #", "IN THE # SOFTWARE. \"\"\" A basic server implementation for", "1 if ts.pool >= t.maxsimul: can_pool = False if can_pool:", "pool, so it might not be best anymore. self.reset_timer() raise", "try: encproof, src_commitment_idx, dest_key_idx, src_client = proofs[blame.which_proof] except IndexError: client.kill(f'bad", "rand_position) from .validation import (check_playercommit, check_covert_component, validate_blame, ValidationError, check_input_electrumx) #", "component acceptance. {len(component_master_list)} received.\") # Sort the components & contribs", "40 # used for non-cryptographic purposes import random rng =", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN", "wrong time') except IndexError: raise ValidationError('which_input too high') sig =", "us to find out the # timing of a given", "are put into the waiting pools. Once a Fusion thread", "Generate scrambled commitment list, but remember exactly where each commitment", "queue) def check_add(self, client): for t in client.tags: ts =", "a small head start in relaying, before sharing the #", "few components submitted\") if total_excess_fees != sum(component_contribs): skip_signatures = True", "covert timeline. covert_T0 = time.monotonic() self.print_error(f\"startround sent at {time.time()}; accepting", "timeout=None): submsg, mtype = recv_pb(self.connection, pb.CovertMessage, *expected_msg_names, timeout=timeout) return submsg,", "self.signatures[msg.which_input] = sig except AttributeError: client.error('signature submitted at wrong time')", "())) >= self.num_results: self.done_ev.set() def gather(self, *, deadline): remtime =", "To reset the server for a new round, call .reset();", "msg.txsignature if len(sig) != 64: raise ValidationError('signature length is wrong')", "len(pool.pool) >= Params.max_clients: # pool filled up to the maximum", "spawned clients list, so that the fusion can continue independently", "b = bytes([maxsimul, len(ipb)]) + ipb + tagbytes return super().__new__(cls,", "not broadcast the transaction! {nice_msg}\") except TimeoutException: self.print_error(\"timed out while", "any(len(p) > 200 for p in proofs): client.error(\"too-long proof\") #", "queue into pool moved = [] for client in self.queue:", "pools for the favoured fuse: - Out of the pool(s)", "and associated documentation files # (the \"Software\"), to deal in", "OTHER DEALINGS IN THE # SOFTWARE. \"\"\" A basic server", "sum(component_contribs): skip_signatures = True self.print_error(\"problem detected: excess fee mismatch\") self.last_hash", "self.donation_address = donation_address self.waiting_pools = {t: WaitingPool(Params.min_clients, Params.max_tier_client_tags) for t", "range(Params.num_components)] lock = threading.Lock() seen_salthashes = set() # Send start", "signature') if existing_sig: # We received a distinct valid signature.", "enumerate(zip(self.clients, proofs_to_relay)): client.addjob(client_get_blames, idx, proofs, collector) _ = collector.gather(deadline =", "(100000 - 12) // (num_components * 173) # Every round,", "right away. collector.add(None) for idx, (client, proofs) in enumerate(zip(self.clients, proofs_to_relay)):", "# - how long from first connection to last possible", "client.error('multiple submission in same phase') if mtype == 'component': try:", "4.3, 4.7, 5.1, 5.6, 6.2, 6.8, 7.5, 8.2, 9.1] #", "E6 = [1.0, 1.5, 2.2, 3.3, 4.7, 6.8] E12 =", "0: self.start_fuse(t) return elif remtime != inftime: status.time_remaining = round(remtime)", "try: self.signatures[msg.which_input] = sig except AttributeError: client.error('signature submitted at wrong", "removes ordering correlations about which client sent which proof proofs.sort(key", "from .validation import (check_playercommit, check_covert_component, validate_blame, ValidationError, check_input_electrumx) # Resistor", "- time.monotonic() if remtime < 0: # really shouldn't happen,", "tier): \"\"\" Immediately launch Fusion at the selected tier. \"\"\"", "ret, 'expecting input component' outpoint = ret.prev_txid[::-1].hex() + ':' +", "minimum number of pool clients to trigger setting fill_time self.fill_time", "bindhost self.upnp = upnp self.announcehost = announcehost self.daemon = True", "except AttributeError: return False else: if len(self.fails) + len(self.results) >=", "# will form the basis of our covert timeline. covert_T0", "covert_server.start_components(round_pubkey, Params.component_feerate) # generate blind nonces (slow!) for c in", "self.components[msg.component] = (sort_key, contrib) except AttributeError: client.error('component submitted at wrong", "if len(sig) != 64: raise ValidationError('signature length is wrong') #", "is not None: client.send_error(text) raise client.Disconnect class ClientThread(ClientHandlerThread): \"\"\"Basic thread", "of a given input's signature submission. raise ValidationError('conflicting valid signature')", "submsg, timeout=Protocol.STANDARD_TIMEOUT): send_pb(self.connection, pb.ServerMessage, submsg, timeout=timeout) def send_error(self, msg): self.send(pb.Error(message", "else: # the smallest fusion will use 1-byte varint for", "waiting server. # self.spawned_clients.difference_update(chosen_clients) # Kick off the fusion. rng.shuffle(chosen_clients)", "by client, we'll let them slide...\") if self.stopping: return donation_address", "to same proof') # Note, the rest of this function", "if ci != myindex] N = len(possible_commitment_destinations) assert N ==", "1 + 5 (lokad) + 33 (session hash) ) if", "submissions from different sources, with a deadline. def __init__(self, num_results,", "we sent 'startround' message to players; this # will form", "3.0, 3.3, 3.6, 3.9, 4.3, 4.7, 5.1, 5.6, 6.2, 6.8,", "pubkeys for c in self.spawned_clients: c.got_submit = False def end_signatures(self):", "c.got_submit = False def end_components(self): with self.lock: ret = self.components", "pubkeys): num_inputs = len(sighashes) assert num_inputs == len(pubkeys) self.signatures =", "the EC server. Report this back to clients # as", "None): super().__init__(name=\"FusionController\") self.network = network self.tier = tier self.clients =", "= status client.send(pb.TierStatusUpdate(statuses = statuses)) start_ev.wait(2) except: # Remove client", "chosen_clients: c.start_ev.set() # Remove those clients from all pools for", "in self.clients: client.addjob(client_get_proofs, collector) results = collector.gather(deadline = time.monotonic() +", "= ipstr.encode() b = bytes([maxsimul, len(ipb)]) + ipb + tagbytes", "{src_commitment_idx}): {ret}\") src_client.kill(f'bad proof (for {src_commitment_idx}): {ret}') continue if src_client.dead:", "before starting the next one (unless hit max time or", "self.sendall(pb.RestartRound()) class CovertClientThread(ClientHandlerThread): def recv(self, *expected_msg_names, timeout=None): submsg, mtype =", "# Oregano - a lightweight Ergon client # CashFusion -", "the smallest fusion will use 1-byte varint for both inputs", "fine ({outpoint})\") # At this point we could blame the", "try_move_from_queue() after calling this try: self.pool.remove(client) except KeyError: in_pool =", "= client.recv('clienthello') if msg.version != Protocol.VERSION: client.error(\"Mismatched protocol version, please", "self.done_ev = threading.Event() self.lock = threading.Lock() self.results = [] self.fails", "i collector = ResultsCollector(len(self.clients), done_on_fail = False) def client_get_proofs(client, collector):", "CovertClientThread, upnp = upnp) self.round_pubkey = None def start_components(self, round_pubkey,", "0 for t, pool in self.waiting_pools.items(): ft = pool.fill_time if", "# # Copyright (C) 2020 <NAME> # # Permission is", "have reasonable # privacy with what we have. bad_components =", "ret def add(self, result): with self.lock: try: self.results.append(result) except AttributeError:", "has stayed at or above min_clients for this long. start_time_max", "client_ip = client.connection.socket.getpeername()[0] msg = client.recv('clienthello') if msg.version != Protocol.VERSION:", "pool = mytierpools[t] pool.add(client) if len(pool.pool) >= Params.max_clients: # pool", "they represent in the same fuse? ip_max_simul_fuse = 3 #", "self.done_ev.set() def gather(self, *, deadline): remtime = deadline - time.monotonic()", "cj) for ci, (_, commitments, _) in enumerate(results) for cj,commit", "del self.pubkeys except AttributeError: pass def new_client_job(self, client): client.got_submit =", "a new phase started. As # an anti-spam measure we", "self.start_fuse(t) return # we have added to pools, which may", "in enumerate(zip(self.clients, proofs_to_relay)): client.addjob(client_get_blames, idx, proofs, collector) _ = collector.gather(deadline", "src_commit_blob, dest_commit_blob, all_components, bad_components, Params.component_feerate) except ValidationError as e: self.print_error(\"got", "fusion, wait this long before starting the next one (unless", "src_commitment_idx, dest_key_idx, src_client = proofs[blame.which_proof] except IndexError: client.kill(f'bad proof index", "send_error(self, msg): self.send(pb.Error(message = msg), timeout=5) def error(self, msg): self.send_error(msg)", "moved = [] for client in self.queue: for t in", "Since nothing after this point can report back to the", "for c, _, _ in results] self.check_client_count() self.print_error(f\"got commitments from", "c in chosen_clients: pool.remove(c) pool.try_move_from_queue() # Update timing info self.t_last_fuse", "client.start_ev = start_ev if client_ip.startswith('127.'): # localhost is whitelisted to", "pool has stayed at or above min_clients for this long.", "time_best is None or ft < time_best or size >", "= time.monotonic() self.print_error(f\"startround sent at {time.time()}; accepting covert components\") #", "client.error('multiple blames point to same proof') # Note, the rest", "AttributeError: client.error('component submitted at wrong time') else: assert mtype ==", "make sense with one player. for c in self.clients: c.kill('blame", "self.print_error(\"completed the transaction! \" + txid) try: self.network.broadcast_transaction2(tx, timeout=3) except", "__enter__(self, ): return self def __exit__(self, exc_type, exc_value, traceback): if", "ret.prev_txid[::-1].hex() + ':' + str(ret.prev_index) try: check_input_electrumx(self.network, ret) except ValidationError", "self.tags[t] ts.all_ += 1 if ts.pool >= t.maxsimul: can_pool =", "announcehost self.donation_address = donation_address self.waiting_pools = {t: WaitingPool(Params.min_clients, Params.max_tier_client_tags) for", "':' + str(ret.prev_index) try: check_input_electrumx(self.network, ret) except ValidationError as e:", "def try_move_from_queue(self): # attempt to move clients from queue into", "score' to the player. # we aren't collecting any results,", "to deal in the Software without restriction, # including without", "# Update timing info self.t_last_fuse = time.monotonic() self.reset_timer() # Uncomment", "= check_playercommit(msg, Params.min_excess_fee, Params.max_excess_fee, Params.num_components) newhashes = set(m.salted_component_hash for m", "collector.add((c, msg.initial_commitments, msg.excess_fee)): c.error(\"late commitment\") # record for later c.blind_sig_requests", "self.pubkeys except AttributeError: pass def new_client_job(self, client): client.got_submit = False", "to the following conditions: # # The above copyright notice", "How many clients do we want before starting a fusion?", "self.round_pubkey = round_pubkey for c in self.spawned_clients: c.got_submit = False", "round numbers that are almost geometrically uniform E6 = [1.0,", "launch a fusion if the pool has stayed at or", "t, pool in self.waiting_pools.items(): for c in chosen_clients: pool.remove(c) pool.try_move_from_queue()", "else self.announcehost annhost_b = annhost.encode('ascii') annport = covert_server.port covert_server.noisy =", "input component' outpoint = ret.prev_txid[::-1].hex() + ':' + str(ret.prev_index) try:", "c.kill(\"too few remaining live players\") raise FusionError(\"too few remaining live", "commitments). myindex = self.clients.index(client) possible_commitment_destinations = [(ci,cj) for commit, ci,", "fusion could require 3-byte varint for both inputs and outputs", "Once a Fusion thread is started, the ClientThreads are passed", "self.clients: client.addjob(clientjob_send, msg, timeout) def check_client_count(self,): live = [c for", "= self.components except AttributeError: client.error('component submitted at wrong time') sort_key,", "None else self.announcehost annhost_b = annhost.encode('ascii') annport = covert_server.port covert_server.noisy", "a bit generous with the timeout but that's OK. self.sendall(pb.AllCommitments(initial_commitments", "No genesis hash declared by client, we'll let them slide...\")", "fee). - Before start of covert signatures phase, owner calls", "self.print_error(f\"entering blame phase. bad components: {bad_components}\") if len(self.clients) < 2:", "(_, commitments, _) in enumerate(results) for cj,commit in enumerate(commitments)] rng.shuffle(commitment_master_list)", "proofs, collector): with collector: # an in-place sort by source", "'results', ())) >= self.num_results: self.done_ev.set() def gather(self, *, deadline): remtime", "time.monotonic() self.print_error(f\"startround sent at {time.time()}; accepting covert components\") # Await", "players, in the smallest fusion # (these overhead numbers assume", "set(self.clients).difference(goodclients) for c in baddies: c.kill(reason) def run_round(self, covert_server): covert_priv,", "in moved: self.queue.remove(client) class FusionServer(GenericServer): \"\"\"Server for clients waiting to", "tier_best = None size_best = 0 for t, pool in", "else client_ip client.tags.append(ClientTag(ip, tag.id, tag.limit)) try: mytierpools = {t: self.waiting_pools[t]", "broadcast a malleated version by re-signing one of their inputs.", "proof') # Note, the rest of this function might run", "FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT", "can_pool: self._add_pool(client) else: self.queue.append(client) return can_pool def remove(self, client): #", "collector): with collector: # an in-place sort by source commitment", "tier, chosen_clients, self.bindhost, upnp = self.upnp, announcehost = self.announcehost) fusion.start()", "fee mismatch\") self.last_hash = session_hash = calc_round_hash(self.last_hash, round_pubkey, round_time, all_commitments,", "NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE", "is no favoured fuse. (since fill time is a float,", "as bad. bad_inputs = set(i for i,sig in enumerate(signatures) if", "schnorr.has_fast_sign() or not schnorr.has_fast_verify(): raise RuntimeError(\"Fusion requires libsecp256k1\") super().__init__(bindhost, port,", "one of the players to # broadcast a malleated version", "same tag (in pool and queue) def check_add(self, client): for", "Params.start_time_spacing) self.tier_best = tier_best def start_fuse(self, tier): \"\"\" Immediately launch", "in self.spawned_clients: c.got_submit = False def end_signatures(self): with self.lock: ret", "): self.print_error(f'Starting fusion with {len(self.clients)} players at tier={self.tier}') covert_server =", "self.signatures return ret def reset(self): try: del self.round_pubkey del self.components", "{len(component_master_list)} received.\") # Sort the components & contribs list, then", "self.print_error('Ended successfully!') except FusionError as e: self.print_error(f\"Ended with error: {e}\")", "allowed and we break the connection as a result. #", "full). start_time_spacing = 120 # But don't start a fusion", "= ('pool', 'all_') def __init__(self): self.pool = 0 self.all_ =", "[c for c, _, _ in results] self.check_client_count() self.print_error(f\"got commitments", "Uncomment the following to: Remove from spawned clients list, so", "= (100000 - 12) // (num_components * 173) # Every", "filled up to the maximum size, so start immediately self.start_fuse(t)", "smallest fusion will use 1-byte varint for both inputs and", "tiers\") if len(msg.tags) > 5: client.error(\"Too many tags\") # Event", "at positions of missing signatures). - To reset the server", "= True def sendall(self, msg, timeout = Protocol.STANDARD_TIMEOUT): for client", "deadline): remtime = deadline - time.monotonic() self.done_ev.wait(max(0., remtime)) with self.lock:", "signatures phase, owner calls end_signatures, which returns a list of", "to players; record the time we did this round_time =", "= round_pubkey, blind_nonce_points = [b.get_R() for b in c.blinds], server_time", "blind signatures for c in self.clients: scalars = [b.sign(covert_priv, e)", "way third parties can't abuse us to find out the", "# This probably indicates misconfiguration since fusion server ought #", "for c in self.clients: c.addjob(clientjob_goodbye, 'internal server error') finally: covert_server.stop()", "WaitingPool: \"\"\" a waiting pool for a specific tier \"\"\"", "end_components, which returns a dict of {component: contrib}, where contrib", "their inputs. time.sleep(2) self.sendall(pb.FusionResult(ok = True, txsignatures = signatures)) return", "from queue into pool moved = [] for client in", "fusion round if started at this tier self.queue = list()", "ip = '' if tag.no_ip else client_ip client.tags.append(ClientTag(ip, tag.id, tag.limit))", "len(chosen_clients) def new_client_job(self, client): client_ip = client.connection.socket.getpeername()[0] msg = client.recv('clienthello')", "= set() # Send start message to players; record the", "Protocol.STANDARD_TIMEOUT): client.send(msg, timeout=timeout) def clientjob_goodbye(client, text): # a gentler goodbye", "timeout=timeout) return submsg def send(self, submsg, timeout=Protocol.STANDARD_TIMEOUT): send_pb(self.connection, pb.ServerMessage, submsg,", "submission to the next round's component submission? COVERT_CLIENT_TIMEOUT = 40", "e.server_msg self.print_error(f\"could not broadcast the transaction! {nice_msg}\") except TimeoutException: self.print_error(\"timed", "with one player. for c in self.clients: c.kill('blame yourself!') return", "end_signatures, which returns a list of signatures (which will have", "with lock: expected_len = len(seen_salthashes) + len(newhashes) seen_salthashes.update(newhashes) if len(seen_salthashes)", "# can start next round right away. collector.add(None) for idx,", "claimed: {blame.blame_reason!r})') continue if isinstance(ret, str): self.print_error(f\"verified a bad proof", "3-byte varint for both inputs and outputs lists overhead =", "up to the maximum size, so start immediately self.start_fuse(t) return", "proofs[blame.which_proof] except IndexError: client.kill(f'bad proof index {blame.which_proof} / {len(proofs)}') continue", "return donation_address = '' if isinstance(self.donation_address, Address): donation_address = self.donation_address.to_full_ui_string()", "except IndexError: raise ValidationError('which_input too high') sig = msg.txsignature if", "and outputs to see if we even have reasonable #", "is the favoured pool, can start at a special time", "server_time = round_time )) msg = c.recv('playercommit') commit_messages = check_playercommit(msg,", "acceptance\") tx, input_indices = tx_from_components(all_components, session_hash) sighashes = [sha256(sha256(bytes.fromhex(tx.serialize_preimage(i, 0x41,", "try: super().run() finally: self.waiting_pools.clear() # gc clean def reset_timer(self, ):", "= 60 else: # the smallest fusion will use 1-byte", "New clients get a ClientThread made for them, and they", "almost geometrically uniform E6 = [1.0, 1.5, 2.2, 3.3, 4.7,", "prev_client_count = len(self.clients) self.clients = [c for c, _, _", "client.tags.append(ClientTag(ip, tag.id, tag.limit)) try: mytierpools = {t: self.waiting_pools[t] for t", "for i, (commit, ci, cj) in enumerate(commitment_master_list): client_commit_indexes[ci][cj] = i", "b) @property def maxsimul(self): return self[0] class TagStatus: __slots__ =", "FusionError as e: self.print_error(f\"Ended with error: {e}\") except Exception as", "join, reject) max_tier_client_tags = 100 # For a given IP,", "a given tier (if more try to join, reject) max_tier_client_tags", "of charge, to any person # obtaining a copy of", "declares the genesis_hash, we # do indeed disallow them connecting", "components & contribs list, then separate it out. component_master_list.sort(key=lambda x:x[1][0])", "above min_clients for this long. start_time_max = 1200 # Inter-fusion", "self.network = network self.announcehost = announcehost self.donation_address = donation_address self.waiting_pools", "in pool.pool: continue status = pb.TierStatusUpdate.TierStatus(players = len(pool.pool), min_players =", "in msg.blames: try: encproof, src_commitment_idx, dest_key_idx, src_client = proofs[blame.which_proof] except", "is hereby granted, free of charge, to any person #", "is signed, we don't punish him # because he's the", "> 5: client.error(\"Too many tags\") # Event for signalling us", "point we could blame the originator, however # blockchain checks", "have. bad_components = set() ### if skip_signatures: self.print_error(\"skipping covert signature", "cannot be present in too many fuses. client.tags = [ClientTag(client_ip,", "separate it out. component_master_list.sort(key=lambda x:x[1][0]) all_components = [comp for comp,", "Protocol from .util import (FusionError, sha256, calc_initial_hash, calc_round_hash, gen_keypair, tx_from_components,", "is None: continue size = len(pool.pool) if size >= size_best:", "software and associated documentation files # (the \"Software\"), to deal", "commitments, as clients are doing this. remtime = covert_T0 +", "# they should only be 129 bytes long. # generate", "# Record the time that we sent 'startround' message to", "100 kB standard tx size limitation? max_clients = (100000 -", "raise FusionError(\"too few remaining live players\") def run (self, ):", "annhost = covert_server.host if self.announcehost is None else self.announcehost annhost_b", "= True)))) for i in range(len(tx.inputs()))] pubkeys = [bytes.fromhex(inp['pubkeys'][0]) for", "self.feerate = feerate self.round_pubkey = round_pubkey for c in self.spawned_clients:", "proofs\") if any(len(p) > 200 for p in proofs): client.error(\"too-long", "in baddies: c.kill(reason) def run_round(self, covert_server): covert_priv, covert_Upub, covert_Cpub =", "that purpose. \"\"\" import secrets import sys import threading import", "Params.num_components) newhashes = set(m.salted_component_hash for m in commit_messages) with lock:", "# just imposters who didn't have private key. If more", "to waiting pools for pool in mytierpools.values(): res = pool.check_add(client)", "N == len(all_commitments) - Params.num_components # calculate the randomly chosen", "t in client.tags: ts = self.tags[t] if ts.pool >= t.maxsimul:", "kill all connections, call .stop(). \"\"\" def __init__(self, bindhost, port=0,", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY,", "self.tier_best_starttime - tnow) if remtime <= 0: self.start_fuse(t) return elif", "client.kill(f'bad blame message: {e} (you claimed: {blame.blame_reason!r})') continue if isinstance(ret,", "continue if isinstance(ret, str): self.print_error(f\"verified a bad proof (for {src_commitment_idx}):", "del self.results return ret def add(self, result): with self.lock: try:", "import random rng = random.Random() rng.seed(secrets.token_bytes(32)) def clientjob_send(client, msg, timeout", "THE # SOFTWARE. \"\"\" A basic server implementation for CashFusion.", "i, inp in enumerate(tx.inputs()): prevout_spenders[f\"{inp['prevout_hash']}:{inp['prevout_n']} {inp['pubkeys'][0]}\"].append(i) for prevout, spenders in", "Protocol.VERSION: client.error(\"Mismatched protocol version, please upgrade\") if msg.genesis_hash: if msg.genesis_hash", "for client in self.clients: client.addjob(clientjob_send, msg, timeout) def check_client_count(self,): live", "self.signatures[msg.which_input] except AttributeError: client.error('signature submitted at wrong time') except IndexError:", "size_best = 0 for t, pool in self.waiting_pools.items(): ft =", "the selected tier. \"\"\" with self.lock: chosen_clients = list(self.waiting_pools[tier].pool) #", "__init__(self, network, tier, clients, bindhost, upnp = None, announcehost =", "component_master_list = list(covert_server.end_components().items()) self.print_error(f\"ending covert component acceptance. {len(component_master_list)} received.\") #", "localhost is whitelisted to allow unlimited access client.tags = []", "bad_inputs) else: for i, (inp, sig) in enumerate(zip(tx.inputs(), signatures)): inp['signatures'][0]", "tier_best = t size_best = size if time_best is None:", "function might run for a while if many # checks", "error(self, msg): self.send_error(msg) raise FusionError(f'Rejected client: {msg}') class ClientTag(bytes): \"\"\"", "us check many inputs against blockchain. if len(msg.blames) > len(proofs):", "continue if client.got_submit: # We got a second submission before", "statuses[t] = status client.send(pb.TierStatusUpdate(statuses = statuses)) start_ev.wait(2) except: # Remove", "# # Oregano - a lightweight Ergon client # CashFusion", "self.waiting_pools.clear() # gc clean def reset_timer(self, ): \"\"\" Scan pools", "we don't have bias towards any particular tier with self.lock:", "sorted(bad_components))) ### self.print_error(f\"entering blame phase. bad components: {bad_components}\") if len(self.clients)", "pubkeys = [bytes.fromhex(inp['pubkeys'][0]) for inp in tx.inputs()] covert_server.start_signatures(sighashes,pubkeys) self.sendall(pb.ShareCovertComponents(components =", "collector: msg = client.recv('myproofslist') seed = msg.random_number if sha256(seed) !=", "= covert_T0 + Protocol.T_START_COMPS - time.monotonic() if remtime > 0:", "prevout and claimed pubkey). prevout_spenders = defaultdict(list) for i, inp", "__init__(self, bindhost, port=0, upnp = None): super().__init__(bindhost, port, CovertClientThread, upnp", "- Choose the pool with the earliest fill time; -", "index {blame.which_proof} / {len(proofs)}') continue src_commit_blob, src_commit_client_idx, _ = commitment_master_list[src_commitment_idx]", "timeout=120) if len(msg.tiers) == 0: client.error(\"No tiers\") if len(msg.tags) >", "= defaultdict(list) for i, inp in enumerate(tx.inputs()): prevout_spenders[f\"{inp['prevout_hash']}:{inp['prevout_n']} {inp['pubkeys'][0]}\"].append(i) for", "A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL", "CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION", "this software and associated documentation files # (the \"Software\"), to", "gc def kick_missing_clients(self, goodclients, reason = None): baddies = set(self.clients).difference(goodclients)", "of the inputs is signed, we don't punish him #", "still # running after run_round has exited. For this reason", "be # included in all copies or substantial portions of", "session_hash = session_hash)) # Sleep until end of covert signatures", "covert components phase, owner calls end_components, which returns a dict", "def reset_timer(self, ): \"\"\" Scan pools for the favoured fuse:", "maximum interval between messages: # - how long from first", "t in client.tags: ts = self.tags[t] ts.pool += 1 if", "!= Protocol.VERSION: client.error(\"Mismatched protocol version, please upgrade\") if msg.genesis_hash: if", "remtime <= 0: self.start_fuse(t) return elif remtime != inftime: status.time_remaining", "skip the # signing phase and go directly to blame,", "distinct valid signature. This is not # allowed and we", "submission? COVERT_CLIENT_TIMEOUT = 40 # used for non-cryptographic purposes import", "self.check_client_count() if self.run_round(covert_server): break self.print_error('Ended successfully!') except FusionError as e:", "if sha256(seed) != client.random_number_commitment: client.error(\"seed did not match commitment\") proofs", "annhost_b = annhost.encode('ascii') annport = covert_server.port covert_server.noisy = Params.noisy covert_server.start()", "# localhost is whitelisted to allow unlimited access client.tags =", "OR IN # CONNECTION WITH THE SOFTWARE OR THE USE", "collector.gather(deadline = time.monotonic() + Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME * 2) self.sendall(pb.RestartRound())", "if self.stopping: return # add this client to waiting pools", "({outpoint})\") else: self.print_error(f\"player indicated bad input but it was fine", "we want before starting a fusion? min_clients = 8 #", "KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "in tx.inputs()] covert_server.start_signatures(sighashes,pubkeys) self.sendall(pb.ShareCovertComponents(components = all_components, session_hash = session_hash)) #", "len(pubkeys) self.signatures = [None]*num_inputs self.sighashes = sighashes self.pubkeys = pubkeys", "stayed at or above min_clients for this long. start_time_max =", "detected: too few components submitted\") if total_excess_fees != sum(component_contribs): skip_signatures", "order so that if filling more than one pool, we", "bad input: {reason}\") src_client.kill('you provided a bad input: ' +", "* 2) self.sendall(pb.RestartRound()) class CovertClientThread(ClientHandlerThread): def recv(self, *expected_msg_names, timeout=None): submsg,", "until end of covert signatures phase remtime = covert_T0 +", "possible Tor component submission? # - how long from one", "just marking that # 'checking finished' so that if all", "overhead = 62 elif min_safe_clients * num_components >= 0xfc: #", "ci, cj) in enumerate(commitment_master_list): client_commit_indexes[ci][cj] = i collector = ResultsCollector(len(self.clients),", "len(results) collector = ResultsCollector(live_clients, done_on_fail = False) def client_get_blames(client, myindex,", "few remaining live players\") def run (self, ): self.print_error(f'Starting fusion", "upnp = self.upnp) try: annhost = covert_server.host if self.announcehost is", "str(ret.prev_index) try: check_input_electrumx(self.network, ret) except ValidationError as e: reason =", "be # appropriate to add some 'ban score' to the", "feerate) with self.lock: try: self.components[msg.component] = (sort_key, contrib) except AttributeError:", "except ValidationError as e: reason = f'{e.args[0]} ({outpoint})' self.print_error(f\"blaming[{src_commitment_idx}] for", "rng = random.Random() rng.seed(secrets.token_bytes(32)) def clientjob_send(client, msg, timeout = Protocol.STANDARD_TIMEOUT):", "assume op_return script size of 1 + 5 (lokad) +", "to run the rounds.\"\"\" def __init__(self, config, network, bindhost, port,", "to the next round's component submission? COVERT_CLIENT_TIMEOUT = 40 #", "set() ### if skip_signatures: self.print_error(\"skipping covert signature acceptance\") self.sendall(pb.ShareCovertComponents(components =", "idx removes ordering correlations about which client sent which proof", "self.upnp = upnp self.announcehost = announcehost self.daemon = True def", "into the waiting pools. Once a Fusion thread is started,", "outpoint = ret.prev_txid[::-1].hex() + ':' + str(ret.prev_index) try: check_input_electrumx(self.network, ret)", "return True class FusionController(threading.Thread, PrintError): \"\"\" This controls the Fusion", "nginx for that purpose. \"\"\" import secrets import sys import", "{t: WaitingPool(Params.min_clients, Params.max_tier_client_tags) for t in Params.tiers} self.t_last_fuse = time.monotonic()", "client.error(\"Tag limit out of range\") ip = '' if tag.no_ip", "would exceed 100 kB standard tx size limitation? max_clients =", "try: self.results.append(result) except AttributeError: return False else: if len(self.fails) +", "collections import defaultdict import oregano.schnorr as schnorr from oregano.address import", "if pool.remove(client): pool.try_move_from_queue() if self.tier_best in mytierpools: # we left", "class FusionController(threading.Thread, PrintError): \"\"\" This controls the Fusion rounds running", "t in client.tags: ts = self.tags[t] ts.all_ -= 1 if", "length is wrong') # It might be we already have", "covert_server.stop() for c in self.clients: c.addjob(clientjob_goodbye, None) self.clients = []", "- To signal the end of covert signatures phase, owner", "= 0 self.all_ = 0 class WaitingPool: \"\"\" a waiting", "if in_pool: ts.pool -= 1 if ts.all_ == 0: #", "signatures phase, owner calls start_signatures. - To signal the end", "be a resubmission after ack failed delivery, # but we", "WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND", "killing if text is not None: client.send_error(text) raise client.Disconnect class", "# # The above copyright notice and this permission notice", "a fusion if it has only been above min_clients for", "self.done_ev.set() return True class FusionController(threading.Thread, PrintError): \"\"\" This controls the", "self.tier = tier self.clients = list(clients) self.bindhost = bindhost self.upnp", "GenericServer, get_current_genesis_hash from .protocol import Protocol from .util import (FusionError,", "tier with self.lock: if self.stopping: return # add this client", "server proxy such as nginx for that purpose. \"\"\" import", "noisy = False # How long covert connections are allowed", "might be a resubmission after ack failed delivery, # but", "(FusionError, sha256, calc_initial_hash, calc_round_hash, gen_keypair, tx_from_components, rand_position) from .validation import", "Sleep a bit before uploading commitments, as clients are doing", "*expected_msg_names, timeout=timeout) return submsg def send(self, submsg, timeout=Protocol.STANDARD_TIMEOUT): send_pb(self.connection, pb.ServerMessage,", "a different chain, please switch servers\") else: client.print_error(\"👀 No genesis", "# Copyright (C) 2020 <NAME> # # Permission is hereby", "t size_best = size if time_best is None: self.tier_best_starttime =", "call .reset(); to kill all connections, call .stop(). \"\"\" def", "had plenty of time raise FusionError(\"way too slow\") time.sleep(remtime) signatures", "THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE", "geometrically uniform E6 = [1.0, 1.5, 2.2, 3.3, 4.7, 6.8]", "timeout) def check_client_count(self,): live = [c for c in self.clients", "full commitment list; we're a bit generous with the timeout", "this reason we try to # not reference self.<variables> that", "8.2] E24 = [1.0, 1.1, 1.2, 1.3, 1.5, 1.6, 1.8,", "into fusion round if started at this tier self.queue =", "# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT", "+ txid) try: self.network.broadcast_transaction2(tx, timeout=3) except ServerError as e: nice_msg,", "the maximum size, so start immediately self.start_fuse(t) return # we", "have a good connection to the EC server. Report this", "COVERT_CLIENT_TIMEOUT) if mtype == 'ping': continue if client.got_submit: # We", "sent 'startround' message to players; this # will form the", "sort_key, contrib = check_covert_component(msg, round_pubkey, feerate) with self.lock: try: self.components[msg.component]", "client_ip.startswith('127.'): # localhost is whitelisted to allow unlimited access client.tags", "else: self.tier_best_starttime = max(time_best + Params.start_time_min, self.t_last_fuse + Params.start_time_spacing) self.tier_best", "True class FusionController(threading.Thread, PrintError): \"\"\" This controls the Fusion rounds", "slide...\") if self.stopping: return donation_address = '' if isinstance(self.donation_address, Address):", "enumerate(commitment_master_list): client_commit_indexes[ci][cj] = i collector = ResultsCollector(len(self.clients), done_on_fail = False)", "of covert signatures phase, owner calls start_signatures. - To signal", "distribute, sublicense, and/or sell copies of the Software, # and", "in self.clients] for src_client, relays in results: for proof, src_commitment_idx,", "change. for blame in msg.blames: try: encproof, src_commitment_idx, dest_key_idx, src_client", "# really shouldn't happen, we had plenty of time raise", "super().__init__(bindhost, port, ClientThread, upnp = upnp) self.config = config self.network", "self.network = network self.tier = tier self.clients = list(clients) self.bindhost", "self.round_pubkey = None def start_components(self, round_pubkey, feerate): self.components = dict()", "): \"\"\" Scan pools for the favoured fuse: - Out", "clients who will be put into fusion round if started", "of covert components phase remtime = covert_T0 + Protocol.TS_EXPECTING_COVERT_COMPONENTS -", "# Choose the minimum excess fee based on dividing the", "announcehost = None, donation_address = None): assert network assert isinstance(donation_address,", "pool in mytierpools.values(): res = pool.check_add(client) if res is not", "players\") def run (self, ): self.print_error(f'Starting fusion with {len(self.clients)} players", "# running after run_round has exited. For this reason we", "calls end_signatures, which returns a list of signatures (which will", "begin_time)) self.last_hash = calc_initial_hash(self.tier, annhost_b, annport, False, begin_time) time.sleep(Protocol.WARMUP_TIME) #", "Params.start_time_max for t, pool in mytierpools.items(): if client not in", "Params.max_excess_fee, Params.num_components) newhashes = set(m.salted_component_hash for m in commit_messages) with", "the inputs and outputs to see if we even have", "validate_blame(blame, encproof, src_commit_blob, dest_commit_blob, all_components, bad_components, Params.component_feerate) except ValidationError as", "it out. component_master_list.sort(key=lambda x:x[1][0]) all_components = [comp for comp, (sort_key,", "+ 33 (session hash) ) if min_safe_clients * num_components >=", "(client, proofs) in enumerate(zip(self.clients, proofs_to_relay)): client.addjob(client_get_blames, idx, proofs, collector) _", "len(pool.pool), min_players = Params.min_clients) remtime = inftime if pool.fill_time is", "= covert_server.host if self.announcehost is None else self.announcehost annhost_b =", "without activity. # note this needs to consider the maximum", "# we have added to pools, which may have changed", "self.fill_threshold: self.fill_time = time.monotonic() def add(self, client): can_pool = True", "= Params.num_components, component_feerate = Params.component_feerate, min_excess_fee = Params.min_excess_fee, max_excess_fee =", "if bad_inputs: bad_components.update(input_indices[i] for i in bad_inputs) else: for i,", "# immediately since client may be trying to DoS us", "expected_len = len(seen_salthashes) + len(newhashes) seen_salthashes.update(newhashes) if len(seen_salthashes) != expected_len:", "= msg), timeout=Protocol.STANDARD_TIMEOUT) def error(self, msg): self.send_error(msg) raise FusionError(f'Rejected client:", "connections, call .stop(). \"\"\" def __init__(self, bindhost, port=0, upnp =", "for t in client.tags: ts = self.tags[t] ts.pool += 1", "present in too many fuses. client.tags = [ClientTag(client_ip, b'', Params.ip_max_simul_fuse)]", "set this to startup time. self.reset_timer() def run(self): try: super().run()", "dest_commit_blob, all_components, bad_components, Params.component_feerate) except ValidationError as e: self.print_error(\"got bad", "True: msg, mtype = client.recv('component', 'signature', 'ping', timeout = COVERT_CLIENT_TIMEOUT)", "if missing_sigs else ')'}\") # mark all missing-signature components as", "was fine ({outpoint})\") # At this point we could blame", "exactly one of the inputs is signed, we don't punish", "we take until the result would exceed 100 kB standard", "covert_T0 + Protocol.TS_EXPECTING_COMMITMENTS) # Filter clients who didn't manage to", "upnp = None, announcehost = None, donation_address = None): assert", "TagStatus: __slots__ = ('pool', 'all_') def __init__(self): self.pool = 0", "as a placeholder, set this to startup time. self.reset_timer() def", "return elif remtime != inftime: status.time_remaining = round(remtime) statuses[t] =", "except AttributeError: client.error('component submitted at wrong time') sort_key, contrib =", "result. # Note that we could have aborted earlier but", "= tx_from_components(all_components, session_hash) sighashes = [sha256(sha256(bytes.fromhex(tx.serialize_preimage(i, 0x41, use_cache = True))))", "for c in chosen_clients: pool.remove(c) pool.try_move_from_queue() # Update timing info", "this to startup time. self.reset_timer() def run(self): try: super().run() finally:", "collector: # an in-place sort by source commitment idx removes", "the various tags self.fill_threshold = fill_threshold # minimum number of", "covert signatures phase remtime = covert_T0 + Protocol.TS_EXPECTING_COVERT_SIGNATURES - time.monotonic()", "almost always be unique) \"\"\" with self.lock: time_best = None", "OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR", "commitments, _) in enumerate(results) for cj,commit in enumerate(commitments)] rng.shuffle(commitment_master_list) all_commitments", "COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR", "cj in commitment_master_list if ci != myindex] N = len(possible_commitment_destinations)", "2.0, 2.2, 2.4, 2.7, 3.0, 3.3, 3.6, 3.9, 4.3, 4.7,", "controls the Fusion rounds running from server side. \"\"\" def", "who didn't manage to give a good commitment. prev_client_count =", "he's the honest guy and all the other components were", "fuses. client.tags = [ClientTag(client_ip, b'', Params.ip_max_simul_fuse)] for tag in msg.tags:", "return # scan the commitment list and note where each", "self.lock: ret = self.signatures del self.signatures return ret def reset(self):", "share same tag on a given tier (if more try", "THE USE OR OTHER DEALINGS IN THE # SOFTWARE. \"\"\"", "self.lock: time_best = None tier_best = None size_best = 0", "info self.t_last_fuse = time.monotonic() self.reset_timer() # Uncomment the following to:", "imposters who didn't have private key. If more than one", "3.9, 4.7, 5.6, 6.8, 8.2] E24 = [1.0, 1.1, 1.2,", "long. start_time_max = 1200 # Inter-fusion delay -- after starting", "later c.blind_sig_requests = msg.blind_sig_requests c.random_number_commitment = msg.random_number_commitment for client in", "owner calls start_signatures. - To signal the end of covert", "client_ip client.tags.append(ClientTag(ip, tag.id, tag.limit)) try: mytierpools = {t: self.waiting_pools[t] for", "however a server admin may run an SSL server proxy", "exc_type, exc_value, traceback): if exc_type is not None: self.fails.append(exc_value) if", "Do some preliminary checks to see whether we should just", "tier. \"\"\" with self.lock: chosen_clients = list(self.waiting_pools[tier].pool) # Notify that", "{prev_client_count - len(self.clients)})\") total_excess_fees = sum(f for _,_,f in results)", "inp in enumerate(tx.inputs()): prevout_spenders[f\"{inp['prevout_hash']}:{inp['prevout_n']} {inp['pubkeys'][0]}\"].append(i) for prevout, spenders in prevout_spenders.items():", "for commit, ci, cj in commitment_master_list if ci != myindex]", "IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "TODO - make these configurable class Params: num_components = 23", "PrintError, ServerError, TimeoutException from . import fusion_pb2 as pb from", "if len(spenders) == 1: continue self.print_error(f\"multi-spend of f{prevout} detected\") #", "timeout=timeout) return submsg, mtype def send(self, submsg, timeout=None): send_pb(self.connection, pb.CovertResponse,", "62 elif min_safe_clients * num_components >= 0xfc: # the smallest", "t.maxsimul: break else: self._add_pool(client) moved.append(client) for client in moved: self.queue.remove(client)", "else: client.print_error(\"👀 No genesis hash declared by client, we'll let", "!= sum(component_contribs): skip_signatures = True self.print_error(\"problem detected: excess fee mismatch\")", "# The above copyright notice and this permission notice shall", "= msg.random_number_commitment for client in self.clients: client.addjob(client_start, collector) # Record", "session_hash = calc_round_hash(self.last_hash, round_pubkey, round_time, all_commitments, all_components) #TODO : Check", "# Sleep until end of covert signatures phase remtime =", "harder for one of the players to # broadcast a", "1.6, 1.8, 2.0, 2.2, 2.4, 2.7, 3.0, 3.3, 3.6, 3.9,", "try: self.network.broadcast_transaction2(tx, timeout=3) except ServerError as e: nice_msg, = e.args", "power. if sig != existing_sig: if not schnorr.verify(pubkey, sig, sighash):", "\"\"\" def __init__(self, fill_threshold, tag_max): self.pool = set() # clients", "# For now, msg.genesis_hash is optional and we tolerate it", "KeyError: if self.stopping: return client.error(f\"Invalid tier selected: {t}\") try: mytiers", "signed, we don't punish him # because he's the honest", "len(msg.blames): client.error('multiple blames point to same proof') # Note, the", "long from first connection to last possible Tor component submission?", "AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "try: del self.sighashes del self.pubkeys except AttributeError: pass def new_client_job(self,", "to tags being full self.tags = defaultdict(TagStatus) # how are", "- tnow) if remtime <= 0: self.start_fuse(t) return elif remtime", "Sanity check for testing -- the proof sharing thing doesn't", "# an in-place sort by source commitment idx removes ordering", "with self.lock: ret = self.components del self.components return ret def", "with what we have. bad_components = set() ### if skip_signatures:", "Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME) # More than one blame per proof", "del self.components del self.feerate except AttributeError: pass try: del self.sighashes", "out. component_master_list.sort(key=lambda x:x[1][0]) all_components = [comp for comp, (sort_key, contrib)", "= pool.check_add(client) if res is not None: client.error(res) for t", "may have changed the favoured tier self.reset_timer() inftime = float('inf')", "might run for a while if many # checks against", "server_time = begin_time)) self.last_hash = calc_initial_hash(self.tier, annhost_b, annport, False, begin_time)", "for that purpose. \"\"\" import secrets import sys import threading", "# How many clients do we want before starting a", "following conditions: # # The above copyright notice and this", "how many clients can share same tag (in pool and", "return \"too many clients with same tag\" def _add_pool(self, client):", "long covert connections are allowed to stay open without activity.", "OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH", "tag.no_ip else client_ip client.tags.append(ClientTag(ip, tag.id, tag.limit)) try: mytierpools = {t:", "129 bytes long. # generate the possible destinations list (all", "relays)): client.error(\"late proofs\") for client in self.clients: client.addjob(client_get_proofs, collector) results", "timeout = COVERT_CLIENT_TIMEOUT) if mtype == 'ping': continue if client.got_submit:", "a second submission before a new phase started. As #", "(you claimed: {blame.blame_reason!r})') continue if isinstance(ret, str): self.print_error(f\"verified a bad", "start message to players; record the time we did this", "Server for covert submissions. How it works: - Launch the", "client.error('signature submitted at wrong time') except IndexError: raise ValidationError('which_input too", "conditions: # # The above copyright notice and this permission", "missing signatures). - To reset the server for a new", "else: self.print_error(\"starting covert signature acceptance\") tx, input_indices = tx_from_components(all_components, session_hash)", "check start times. statuses = dict() tfill_thresh = tnow -", "('pool', 'all_') def __init__(self): self.pool = 0 self.all_ = 0", "testing -- the proof sharing thing doesn't even make sense", "uniform E6 = [1.0, 1.5, 2.2, 3.3, 4.7, 6.8] E12", "full). start_time_min = 400 # whether to print a lot", "self.run_round(covert_server): break self.print_error('Ended successfully!') except FusionError as e: self.print_error(f\"Ended with", "if client.got_submit: # We got a second submission before a", "= [b.sign(covert_priv, e) for b,e in zip(c.blinds, c.blind_sig_requests)] c.addjob(clientjob_send, pb.BlindSigResponses(scalars", "pubkey). prevout_spenders = defaultdict(list) for i, inp in enumerate(tx.inputs()): prevout_spenders[f\"{inp['prevout_hash']}:{inp['prevout_n']}", "[list() for _ in self.clients] for src_client, relays in results:", "break the connection as a result. # Note that we", "# Note, the rest of this function might run for", "will start. for c in chosen_clients: c.start_ev.set() # Remove those", "except AttributeError: pass try: del self.sighashes del self.pubkeys except AttributeError:", "client: {msg}') class CovertServer(GenericServer): \"\"\" Server for covert submissions. How", "components as bad. bad_inputs = set(i for i,sig in enumerate(signatures)", "isinstance(donation_address, (Address, type(None))) if not schnorr.has_fast_sign() or not schnorr.has_fast_verify(): raise", "relaying, before sharing the # signatures. This makes it slightly", "pool clients to trigger setting fill_time self.fill_time = None #", "# shuffle the adding order so that if filling more", "per connection # per phase. client.error('multiple submission in same phase')", "_ = self.components except AttributeError: client.error('component submitted at wrong time')", "(in pool and queue) def check_add(self, client): for t in", "# sats/kB max_excess_fee = 300000 # sats tiers = [round(b*s)", "THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY", "# note this needs to consider the maximum interval between", "a fusion? min_clients = 8 # If all clients submitted", "oregano.schnorr as schnorr from oregano.address import Address from oregano.util import", "Collect submissions from different sources, with a deadline. def __init__(self,", "blame per proof is malicious. Boot client # immediately since", "None else: self.tier_best_starttime = max(time_best + Params.start_time_min, self.t_last_fuse + Params.start_time_spacing)", "that's OK. self.sendall(pb.AllCommitments(initial_commitments = all_commitments), timeout=Protocol.TS_EXPECTING_COVERT_SIGNATURES) # Sleep until end", "3.3, 3.6, 3.9, 4.3, 4.7, 5.1, 5.6, 6.2, 6.8, 7.5,", "# Clean up dead clients self.clients = [c for c", "timeout=3) except ServerError as e: nice_msg, = e.args server_msg =", "txsignatures = signatures)) return True self.sendall(pb.FusionResult(ok = False, bad_components =", "the adding order so that if filling more than one", "list(mytierpools) rng.shuffle(mytiers) # shuffle the adding order so that if", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF", "1.2, 1.3, 1.5, 1.6, 1.8, 2.0, 2.2, 2.4, 2.7, 3.0,", "class FusionServer(GenericServer): \"\"\"Server for clients waiting to start a fusion.", "None or ft < time_best or size > size_best: time_best", "# 'checking finished' so that if all blames are checked,", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS", "* num_components >= 2 * 0xfc: # the smallest fusion", "fuse: - Out of the pool(s) with the most number", "remove(self, client): # make sure to call try_move_from_queue() after calling", "pool moved = [] for client in self.queue: for t", "except AttributeError: client.error('signature submitted at wrong time') except IndexError: raise", "self.clients: client.addjob(client_start, collector) # Record the time that we sent", "varint for both inputs and outputs lists overhead = 62", "in enumerate(commitment_master_list): client_commit_indexes[ci][cj] = i collector = ResultsCollector(len(self.clients), done_on_fail =", "= set() ### if skip_signatures: self.print_error(\"skipping covert signature acceptance\") self.sendall(pb.ShareCovertComponents(components", "of this software and associated documentation files # (the \"Software\"),", "+ tagbytes return super().__new__(cls, b) @property def maxsimul(self): return self[0]", "2.2, 3.3, 4.7, 6.8] E12 = [1.0, 1.2, 1.5, 1.8,", "whom the Software is furnished to do so, # subject", "= client.recv('blames', timeout = Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME) # More than", "= [1.0, 1.2, 1.5, 1.8, 2.2, 2.7, 3.3, 3.9, 4.7,", "existing_sig: # We received a distinct valid signature. This is", "than one blame per proof is malicious. Boot client #", "this needs to consider the maximum interval between messages: #", "call start_components. - To signal the end of covert components", "round_pubkey for c in self.spawned_clients: c.got_submit = False def end_components(self):", "activity. # note this needs to consider the maximum interval", "max_tier_client_tags = 100 # For a given IP, how many", "by # making us check many inputs against blockchain. if", "originated. commitment_master_list = [(commit, ci, cj) for ci, (_, commitments,", "clients do we need as an absolute minimum (for privacy)?", "self.pool = 0 self.all_ = 0 class WaitingPool: \"\"\" a", "too high') sig = msg.txsignature if len(sig) != 64: raise", "ts.pool >= t.maxsimul: can_pool = False if can_pool: self._add_pool(client) else:", "lists overhead = 60 else: # the smallest fusion will", "({outpoint})' self.print_error(f\"blaming[{src_commitment_idx}] for bad input: {reason}\") src_client.kill('you provided a bad", "our CPU power. if sig != existing_sig: if not schnorr.verify(pubkey,", "True self.print_error(\"problem detected: too few components submitted\") if total_excess_fees !=", "in enumerate(proofs): dest_client_idx, dest_key_idx = possible_commitment_destinations[rand_position(seed, N, i)] src_commitment_idx =", "successful!\") # Give our transaction a small head start in", "= True for t in client.tags: ts = self.tags[t] ts.all_", "for t in client.tags: ts = self.tags[t] if ts.pool >=", "results) # Generate scrambled commitment list, but remember exactly where", "> 0, \"timings set up incorrectly\" time.sleep(remtime) component_master_list = list(covert_server.end_components().items())", "if they are e.g. on testnet # and we are", "that if all blames are checked, we # can start", "try: self.pool.remove(client) except KeyError: in_pool = False try: self.queue.remove(client) except", "len(all_components) != len(self.clients)*Params.num_components: skip_signatures = True self.print_error(\"problem detected: too few", "= feerate self.round_pubkey = round_pubkey for c in self.spawned_clients: c.got_submit", "record for later c.blind_sig_requests = msg.blind_sig_requests c.random_number_commitment = msg.random_number_commitment for", "as clients are doing this. remtime = covert_T0 + Protocol.T_START_COMPS", "self.round_pubkey feerate = self.feerate _ = self.components except AttributeError: client.error('component", "!= client.random_number_commitment: client.error(\"seed did not match commitment\") proofs = msg.encrypted_proofs", "# # Permission is hereby granted, free of charge, to", "phase remtime = covert_T0 + Protocol.TS_EXPECTING_COVERT_COMPONENTS - time.monotonic() assert remtime", "try: self.queue.remove(client) except ValueError: return False else: in_pool = True", "subject to the following conditions: # # The above copyright", "don't start a fusion if it has only been above", "a gentler goodbye than killing if text is not None:", "covert_server.reset() # Clean up dead clients self.clients = [c for", "= upnp) self.config = config self.network = network self.announcehost =", "'' if tag.no_ip else client_ip client.tags.append(ClientTag(ip, tag.id, tag.limit)) try: mytierpools", "msg): self.send_error(msg) raise FusionError(f'Rejected client: {msg}') class CovertServer(GenericServer): \"\"\" Server", "if len(msg.tags) > 5: client.error(\"Too many tags\") # Event for", "WaitingPool(Params.min_clients, Params.max_tier_client_tags) for t in Params.tiers} self.t_last_fuse = time.monotonic() #", "moved.append(client) for client in moved: self.queue.remove(client) class FusionServer(GenericServer): \"\"\"Server for", "start_time_spacing = 120 # But don't start a fusion if", "round_pubkey = covert_Cpub # start to accept covert components covert_server.start_components(round_pubkey,", "pool started. start_ev = threading.Event() client.start_ev = start_ev if client_ip.startswith('127.'):", "client.print_error(\"👀 No genesis hash declared by client, we'll let them", "a bit before uploading commitments, as clients are doing this.", "a given input's signature submission. raise ValidationError('conflicting valid signature') with", "at wrong time') else: assert mtype == 'signature' try: sighash", "if skip_signatures: self.print_error(\"skipping covert signature acceptance\") self.sendall(pb.ShareCovertComponents(components = all_components, skip_signatures", "with the most number of players, - Choose the pool", "time.monotonic() self.reset_timer() # Uncomment the following to: Remove from spawned", "anti-spam measure we only allow one submission per connection #", "server admin may run an SSL server proxy such as", "It would be # appropriate to add some 'ban score'", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "# Remove those clients from all pools for t, pool", "ret = self.signatures del self.signatures return ret def reset(self): try:", "(if more try to join, reject) max_tier_client_tags = 100 #", "basic server implementation for CashFusion. Does not natively offer SSL", "6.8] E12 = [1.0, 1.2, 1.5, 1.8, 2.2, 2.7, 3.3,", "in self.clients if not c.dead] if len(live) < Params.min_safe_clients: for", "scrambled commitment list, but remember exactly where each commitment originated.", "6.2, 6.8, 7.5, 8.2, 9.1] # TODO - make these", "remaining live players\") def run (self, ): self.print_error(f'Starting fusion with", "{covert_server.host}:{covert_server.port} (announcing as: {annhost_b}:{annport})') begin_time = round(time.time()) self.sendall(pb.FusionBegin(tier = self.tier,", "def error(self, msg): self.send_error(msg) raise FusionError(f'Rejected client: {msg}') class ClientTag(bytes):", "the possible destinations list (all commitments, but leaving out the", "the last fuse happened; as a placeholder, set this to", "clients leave ... How many clients do we need as", "a good connection to the EC server. Report this back", "feerate = self.feerate _ = self.components except AttributeError: client.error('component submitted", "is started, the ClientThreads are passed over to a FusionController", "at the selected tier. \"\"\" with self.lock: chosen_clients = list(self.waiting_pools[tier].pool)", "phase, owner calls start_signatures. - To signal the end of", "= list() # clients who are waiting due to tags", "ret = validate_blame(blame, encproof, src_commit_blob, dest_commit_blob, all_components, bad_components, Params.component_feerate) except", "2.2, 2.4, 2.7, 3.0, 3.3, 3.6, 3.9, 4.3, 4.7, 5.1,", "proofs) in enumerate(zip(self.clients, proofs_to_relay)): client.addjob(client_get_blames, idx, proofs, collector) _ =", "== 'signature' try: sighash = self.sighashes[msg.which_input] pubkey = self.pubkeys[msg.which_input] existing_sig", "= list(self.waiting_pools[tier].pool) # Notify that we will start. for c", "even restart / end # without sharing components. skip_signatures =", "since fusion server ought # to have a good connection", "inftime: status.time_remaining = round(remtime) statuses[t] = status client.send(pb.TierStatusUpdate(statuses = statuses))", "bindhost, port, upnp = None, announcehost = None, donation_address =", "c.kill(reason) def run_round(self, covert_server): covert_priv, covert_Upub, covert_Cpub = gen_keypair() round_pubkey", "signature acceptance. {missing_sigs} missing :{'(' if missing_sigs else ')'}\") #", "will use 1-byte varint for both inputs and outputs lists", "pools, which may have changed the favoured tier self.reset_timer() inftime", "FusionError(f'Rejected client: {msg}') class ClientTag(bytes): \"\"\" enhanced bytes object to", "c in self.clients: c.blinds = [schnorr.BlindSigner() for _co in range(Params.num_components)]", "if remtime <= 0: self.start_fuse(t) return elif remtime != inftime:", "e: self.print_error(f\"Ended with error: {e}\") except Exception as e: self.print_error('Failed", "error: {e}\") except Exception as e: self.print_error('Failed with exception!') traceback.print_exc(file=sys.stderr)", "remtime = deadline - time.monotonic() self.done_ev.wait(max(0., remtime)) with self.lock: ret", "msg.excess_fee)): c.error(\"late commitment\") # record for later c.blind_sig_requests = msg.blind_sig_requests", "sure to call try_move_from_queue() after calling this try: self.pool.remove(client) except", "for t in client.tags: ts = self.tags.get(t) if ts is", "we had plenty of time raise FusionError(\"way too slow\") time.sleep(remtime)", "= round(time.time()) collector = ResultsCollector(len(self.clients), done_on_fail = False) def client_start(c,", "kB standard tx size limitation? max_clients = (100000 - 12)", "self.pool.remove(client) except KeyError: in_pool = False try: self.queue.remove(client) except ValueError:", "len(pool.pool) if size >= size_best: if time_best is None or", "!= inftime: status.time_remaining = round(remtime) statuses[t] = status client.send(pb.TierStatusUpdate(statuses =", "continue size = len(pool.pool) if size >= size_best: if time_best", "self.sendall(pb.FusionResult(ok = False, bad_components = sorted(bad_components))) ### self.print_error(f\"entering blame phase.", "connected client.\"\"\" def recv(self, *expected_msg_names, timeout=Protocol.STANDARD_TIMEOUT): submsg, mtype = recv_pb(self.connection,", "# and to permit persons to whom the Software is", "relays = [] for i, proof in enumerate(proofs): dest_client_idx, dest_key_idx", "# way third parties can't abuse us to find out", "them, and they are put into the waiting pools. Once", "ts.pool -= 1 if ts.all_ == 0: # cleanup for", "num_inputs = len(sighashes) assert num_inputs == len(pubkeys) self.signatures = [None]*num_inputs", "COVERT_CLIENT_TIMEOUT = 40 # used for non-cryptographic purposes import random", "if len(self.pool) < self.fill_threshold: self.fill_time = None for t in", "s in signatures if s is None]) ### self.print_error(f\"ending covert", "fusion server ought # to have a good connection to", "Inter-fusion delay -- after starting any fusion, wait this long", "if not c.dead] self.check_client_count() if self.run_round(covert_server): break self.print_error('Ended successfully!') except", "This is not # allowed and we break the connection", "scan the commitment list and note where each client's commitments", "3 # Guaranteed time to launch a fusion if the", "PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS", "This controls the Fusion rounds running from server side. \"\"\"", "than killing if text is not None: client.send_error(text) raise client.Disconnect", "client: {msg}') class ClientTag(bytes): \"\"\" enhanced bytes object to represent", "on testnet # and we are mainnet, etc. client.error(\"This server", "[[None]*Params.num_components for _ in self.clients] for i, (commit, ci, cj)", "self.done_ev.set() elif len(self.fails) + len(getattr(self, 'results', ())) >= self.num_results: self.done_ev.set()", "ResultsCollector(len(self.clients), done_on_fail = False) def client_get_proofs(client, collector): with collector: msg", "used for non-cryptographic purposes import random rng = random.Random() rng.seed(secrets.token_bytes(32))", "contrib) in component_master_list] del component_master_list # Do some preliminary checks", "FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO", "if not (0 < tag.limit < 6): client.error(\"Tag limit out", "blame, or maybe even restart / end # without sharing", "if tag.no_ip else client_ip client.tags.append(ClientTag(ip, tag.id, tag.limit)) try: mytierpools =", "sort by source commitment idx removes ordering correlations about which", "it works: - Launch the server at any time. By", "server for a new round, call .reset(); to kill all", "high') sig = msg.txsignature if len(sig) != 64: raise ValidationError('signature", "Update timing info self.t_last_fuse = time.monotonic() self.reset_timer() # Uncomment the", "e.g. on testnet # and we are mainnet, etc. client.error(\"This", "in self.waiting_pools.items(): for c in chosen_clients: pool.remove(c) pool.try_move_from_queue() # Update", "elif len(self.fails) + len(getattr(self, 'results', ())) >= self.num_results: self.done_ev.set() def", "submsg, mtype = recv_pb(self.connection, pb.CovertMessage, *expected_msg_names, timeout=timeout) return submsg, mtype", "client.addjob(client_start, collector) # Record the time that we sent 'startround'", "the next round's component submission? COVERT_CLIENT_TIMEOUT = 40 # used", "\" + txid) try: self.network.broadcast_transaction2(tx, timeout=3) except ServerError as e:", "self.queue.append(client) return can_pool def remove(self, client): # make sure to", "submsg, timeout=timeout) def send_ok(self,): self.send(pb.OK(), timeout=5) def send_error(self, msg): self.send(pb.Error(message", "port=0, upnp = None): super().__init__(bindhost, port, CovertClientThread, upnp = upnp)", "ts is not None and ts.all_ >= self.tag_max: return \"too", "< 6): client.error(\"Tag limit out of range\") ip = ''", "input: {reason}\") src_client.kill('you provided a bad input: ' + reason)", "\"\"\" Scan pools for the favoured fuse: - Out of", "the following to: Remove from spawned clients list, so that", "remtime = covert_T0 + Protocol.T_START_COMPS - time.monotonic() if remtime >", "num_components = 23 component_feerate = 1000 # sats/kB max_excess_fee =", "Out of the pool(s) with the most number of players,", "can report back to the # verifier, there is no", "ValidationError('which_input too high') sig = msg.txsignature if len(sig) != 64:", "s is None]) ### self.print_error(f\"ending covert signature acceptance. {missing_sigs} missing", "= threading.Lock() self.results = [] self.fails = [] def __enter__(self,", "where each commitment originated. commitment_master_list = [(commit, ci, cj) for", "None) for i in spenders) != 1: bad_inputs.update(spenders) if bad_inputs:", "if existing_sig: # We received a distinct valid signature. This", "and they are put into the waiting pools. Once a", "src_commit_client_idx, _ = commitment_master_list[src_commitment_idx] dest_commit_blob = all_commitments[client_commit_indexes[myindex][dest_key_idx]] try: ret =", "recv(self, *expected_msg_names, timeout=None): submsg, mtype = recv_pb(self.connection, pb.CovertMessage, *expected_msg_names, timeout=timeout)", "SOFTWARE. \"\"\" A basic server implementation for CashFusion. Does not", "total_excess_fees = sum(f for _,_,f in results) # Generate scrambled", "and we tolerate it # missing. However, if the client", "covert_T0 + Protocol.TS_EXPECTING_COVERT_SIGNATURES - time.monotonic() if remtime < 0: #", "phase, call start_components. - To signal the end of covert", "merge, # publish, distribute, sublicense, and/or sell copies of the", "Params.tiers, donation_address = donation_address )) # We allow a long", "None) self.clients = [] # gc def kick_missing_clients(self, goodclients, reason", "submission per connection # per phase. client.error('multiple submission in same", "for ci, (_, commitments, _) in enumerate(results) for cj,commit in", "if msg.version != Protocol.VERSION: client.error(\"Mismatched protocol version, please upgrade\") if", "(unless hit max time or pool is full). start_time_spacing =", "good commitment. prev_client_count = len(self.clients) self.clients = [c for c,", "else: self.print_error(\"broadcast was successful!\") # Give our transaction a small", "sharing components. skip_signatures = False if len(all_components) != len(self.clients)*Params.num_components: skip_signatures", "many clients can share same tag on a given tier", "self.clients] for src_client, relays in results: for proof, src_commitment_idx, dest_client_idx,", "existing_sig = self.signatures[msg.which_input] except AttributeError: client.error('signature submitted at wrong time')", "are allowed to stay open without activity. # note this", "len(self.results) >= self.num_results: self.done_ev.set() return True class FusionController(threading.Thread, PrintError): \"\"\"", "ipb = ipstr.encode() b = bytes([maxsimul, len(ipb)]) + ipb +", "a non-favoured pool will start eventually remtime = pool.fill_time -", "but it was fine ({outpoint})\") # At this point we", "* 173) # Every round, clients leave ... How many", "leaving out the originating client's commitments). myindex = self.clients.index(client) possible_commitment_destinations", "= [list() for _ in self.clients] for src_client, relays in", "not None: # a non-favoured pool will start eventually remtime", "for x,y,z, _ in proofs])) msg = client.recv('blames', timeout =", "= threading.Lock() seen_salthashes = set() # Send start message to", "self.print_error(f\"player indicated bad input but it was fine ({outpoint})\") #", "next round's component submission? COVERT_CLIENT_TIMEOUT = 40 # used for", "to represent a pool tag \"\"\" __slots__ = () def", "c.addjob(clientjob_goodbye, None) self.clients = [] # gc def kick_missing_clients(self, goodclients,", "if any(len(p) > 200 for p in proofs): client.error(\"too-long proof\")", "owner calls end_components, which returns a dict of {component: contrib},", "ValidationError('bad transaction signature') if existing_sig: # We received a distinct", "acceptance. {missing_sigs} missing :{'(' if missing_sigs else ')'}\") # mark", "= e.args server_msg = e.server_msg self.print_error(f\"could not broadcast the transaction!", "N, i)] src_commitment_idx = client_commit_indexes[myindex][i] relays.append((proof, src_commitment_idx, dest_client_idx, dest_key_idx)) if", "access client.tags = [] else: # Default tag: this IP", "How it works: - Launch the server at any time.", "start times. statuses = dict() tfill_thresh = tnow - Params.start_time_max", "sig = msg.txsignature if len(sig) != 64: raise ValidationError('signature length", "bindhost, port=0, upnp = None): super().__init__(bindhost, port, CovertClientThread, upnp =", "server error') finally: covert_server.stop() for c in self.clients: c.addjob(clientjob_goodbye, None)", "-- round numbers that are almost geometrically uniform E6 =", "possible_commitment_destinations[rand_position(seed, N, i)] src_commitment_idx = client_commit_indexes[myindex][i] relays.append((proof, src_commitment_idx, dest_client_idx, dest_key_idx))", "don't have bias towards any particular tier with self.lock: if", "blames are checked, we # can start next round right", "/ end # without sharing components. skip_signatures = False if", "a deadline. def __init__(self, num_results, done_on_fail = True): self.num_results =", "a bad input: ' + reason) continue except Exception as", "times. statuses = dict() tfill_thresh = tnow - Params.start_time_max for", "measure we only allow one submission per connection # per", "= bool(done_on_fail) self.done_ev = threading.Event() self.lock = threading.Lock() self.results =", "= commitment_master_list[src_commitment_idx] dest_commit_blob = all_commitments[client_commit_indexes[myindex][dest_key_idx]] try: ret = validate_blame(blame, encproof,", "tier self.queue = list() # clients who are waiting due", "= fill_threshold # minimum number of pool clients to trigger", "tnow = time.monotonic() # scan through tiers and collect statuses,", "not be best anymore. self.reset_timer() raise class ResultsCollector: # Collect", "best anymore. self.reset_timer() raise class ResultsCollector: # Collect submissions from", "notice and this permission notice shall be # included in", "if len(set(blame.which_proof for blame in msg.blames)) != len(msg.blames): client.error('multiple blames", "Params.component_feerate) except ValidationError as e: self.print_error(\"got bad blame; clamed reason", "then process results results = collector.gather(deadline = covert_T0 + Protocol.TS_EXPECTING_COMMITMENTS)", "enumerate(commitments)] rng.shuffle(commitment_master_list) all_commitments = tuple(commit for commit,ci,cj in commitment_master_list) #", "skip_signatures = True self.print_error(\"problem detected: excess fee mismatch\") self.last_hash =", "def client_get_blames(client, myindex, proofs, collector): with collector: # an in-place", "120 # But don't start a fusion if it has", "== self.fill_threshold: self.fill_time = time.monotonic() def add(self, client): can_pool =", "Params: num_components = 23 component_feerate = 1000 # sats/kB max_excess_fee", "1 if in_pool: ts.pool -= 1 if ts.all_ == 0:", "also check start times. statuses = dict() tfill_thresh = tnow", "if self.stopping: return client.error(f\"Invalid tier selected: {t}\") try: mytiers =", "a fusion. New clients get a ClientThread made for them,", "Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME * 2) self.sendall(pb.RestartRound()) class CovertClientThread(ClientHandlerThread): def recv(self,", "(num_components * 173) # Every round, clients leave ... How", "if size >= size_best: if time_best is None or ft", "whitelisted to allow unlimited access client.tags = [] else: #", "{ret}') continue if src_client.dead: # If the blamed client is", "3.6, 3.9, 4.3, 4.7, 5.1, 5.6, 6.2, 6.8, 7.5, 8.2,", "self.send(pb.Error(message = msg), timeout=Protocol.STANDARD_TIMEOUT) def error(self, msg): self.send_error(msg) raise FusionError(f'Rejected", "Upload the full commitment list; we're a bit generous with", "blockchain checks are somewhat subjective. It would be # appropriate", "{msg}') class CovertServer(GenericServer): \"\"\" Server for covert submissions. How it", "def check_add(self, client): for t in client.tags: ts = self.tags.get(t)", "if not c.dead] if len(live) < Params.min_safe_clients: for c in", "upnp self.announcehost = announcehost self.daemon = True def sendall(self, msg,", "messages then process results results = collector.gather(deadline = covert_T0 +", "= round(remtime) statuses[t] = status client.send(pb.TierStatusUpdate(statuses = statuses)) start_ev.wait(2) except:", "component_master_list] component_contribs = [contrib for comp, (sort_key, contrib) in component_master_list]", "# a gentler goodbye than killing if text is not", "to broadcast transaction! misconfigured?\") # This probably indicates misconfiguration since", "against blockchain need to be done, perhaps even still #", "= False if len(all_components) != len(self.clients)*Params.num_components: skip_signatures = True self.print_error(\"problem", "msg.encrypted_proofs if len(proofs) != Params.num_components: client.error(\"wrong number of proofs\") if", "200 for p in proofs): client.error(\"too-long proof\") # they should", "we have. bad_components = set() ### if skip_signatures: self.print_error(\"skipping covert", "min_clients = 8 # If all clients submitted largest possible", "not match commitment\") proofs = msg.encrypted_proofs if len(proofs) != Params.num_components:", "sublicense, and/or sell copies of the Software, # and to", "# obtaining a copy of this software and associated documentation", "c.start_ev.set() # Remove those clients from all pools for t,", "None: # a non-favoured pool will start eventually remtime =", "self.send_error(msg) raise FusionError(f'Rejected client: {msg}') class ClientTag(bytes): \"\"\" enhanced bytes", "tag in msg.tags: if len(tag.id) > 20: client.error(\"Tag id too", "with self.lock: if self.stopping: return # add this client to", "def client_get_proofs(client, collector): with collector: msg = client.recv('myproofslist') seed =", "continue src_commit_blob, src_commit_client_idx, _ = commitment_master_list[src_commitment_idx] dest_commit_blob = all_commitments[client_commit_indexes[myindex][dest_key_idx]] try:", "self.signatures = [None]*num_inputs self.sighashes = sighashes self.pubkeys = pubkeys for", "### if skip_signatures: self.print_error(\"skipping covert signature acceptance\") self.sendall(pb.ShareCovertComponents(components = all_components,", "= 0 for t, pool in self.waiting_pools.items(): ft = pool.fill_time", "This probably indicates misconfiguration since fusion server ought # to", "f{prevout} detected\") # If exactly one of the inputs is", "* num_components >= 0xfc: # the smallest fusion could require", "in client.tags: ts = self.tags[t] ts.all_ -= 1 if in_pool:", "for t in msg.tiers} except KeyError: if self.stopping: return client.error(f\"Invalid", "from first connection to last possible Tor component submission? #", "relays.append((proof, src_commitment_idx, dest_client_idx, dest_key_idx)) if not collector.add((client, relays)): client.error(\"late proofs\")", "#!/usr/bin/env python3 # # Oregano - a lightweight Ergon client", "acceptance. {len(component_master_list)} received.\") # Sort the components & contribs list,", "immediately self.start_fuse(t) return # we have added to pools, which", "try to # not reference self.<variables> that may change. for", "to allow unlimited access client.tags = [] else: # Default", "too slow\") time.sleep(remtime) signatures = list(covert_server.end_signatures()) missing_sigs = len([s for", "the smallest fusion could require 3-byte varint for either inputs", "third parties can't abuse us to find out the #", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "substantial portions of the Software. # # THE SOFTWARE IS", "{t}\") try: mytiers = list(mytierpools) rng.shuffle(mytiers) # shuffle the adding", "= len(pool.pool), min_players = Params.min_clients) remtime = inftime if pool.fill_time", "ts.pool >= t.maxsimul: break else: self._add_pool(client) moved.append(client) for client in", "covert components covert_server.start_components(round_pubkey, Params.component_feerate) # generate blind nonces (slow!) for", "reset_timer(self, ): \"\"\" Scan pools for the favoured fuse: -", "<= 0: self.start_fuse(t) return elif remtime != inftime: status.time_remaining =", "start a fusion if it has only been above min_clients", "dead, don't waste more time. # Since nothing after this", "[] else: # Default tag: this IP cannot be present", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "# whether to print a lot of logs noisy =", "because he's the honest guy and all the other components", "kick_missing_clients(self, goodclients, reason = None): baddies = set(self.clients).difference(goodclients) for c", "i)] src_commitment_idx = client_commit_indexes[myindex][i] relays.append((proof, src_commitment_idx, dest_client_idx, dest_key_idx)) if not", "these configurable class Params: num_components = 23 component_feerate = 1000", "donation_address = donation_address )) # We allow a long timeout", "AttributeError: client.error('signature submitted at wrong time') except IndexError: raise ValidationError('which_input", "seed = msg.random_number if sha256(seed) != client.random_number_commitment: client.error(\"seed did not", "if len(live) < Params.min_safe_clients: for c in live: c.kill(\"too few", "in commitment_master_list) # Send blind signatures for c in self.clients:", "txid = tx.txid() self.print_error(\"completed the transaction! \" + txid) try:", "IP cannot be present in too many fuses. client.tags =", "etc. client.error(\"This server is on a different chain, please switch", "super().__init__(name=\"FusionController\") self.network = network self.tier = tier self.clients = list(clients)", "# add this client to waiting pools for pool in", "timeout = Protocol.STANDARD_TIMEOUT): for client in self.clients: client.addjob(clientjob_send, msg, timeout)", "favoured fuse. (since fill time is a float, this will", "bad_components, Params.component_feerate) except ValidationError as e: self.print_error(\"got bad blame; clamed", "mismatch\") self.last_hash = session_hash = calc_round_hash(self.last_hash, round_pubkey, round_time, all_commitments, all_components)", "maxsimul(self): return self[0] class TagStatus: __slots__ = ('pool', 'all_') def", "remtime = min(remtime, self.tier_best_starttime - tnow) if remtime <= 0:", "all_components) #TODO : Check the inputs and outputs to see", "= [None]*num_inputs self.sighashes = sighashes self.pubkeys = pubkeys for c", "might not be best anymore. self.reset_timer() raise class ResultsCollector: #", "msg, timeout = Protocol.STANDARD_TIMEOUT): client.send(msg, timeout=timeout) def clientjob_goodbye(client, text): #", "client.tags: ts = self.tags[t] ts.all_ += 1 if ts.pool >=", "raise RuntimeError(\"Fusion requires libsecp256k1\") super().__init__(bindhost, port, ClientThread, upnp = upnp)", "mytiers = list(mytierpools) rng.shuffle(mytiers) # shuffle the adding order so", "process results results = collector.gather(deadline = covert_T0 + Protocol.TS_EXPECTING_COMMITMENTS) #", "add this client to waiting pools for pool in mytierpools.values():", "originating client's commitments). myindex = self.clients.index(client) possible_commitment_destinations = [(ci,cj) for", "server. # self.spawned_clients.difference_update(chosen_clients) # Kick off the fusion. rng.shuffle(chosen_clients) fusion", "= threading.Event() self.lock = threading.Lock() self.results = [] self.fails =", "components phase, owner calls end_components, which returns a dict of", "connections are allowed to stay open without activity. # note", "in results) # Generate scrambled commitment list, but remember exactly", "commitments, but leaving out the originating client's commitments). myindex =", "Now, repackage the proofs according to destination. proofs_to_relay = [list()", "tx size limitation? max_clients = (100000 - 12) // (num_components", "Address): donation_address = self.donation_address.to_full_ui_string() client.send(pb.ServerHello( num_components = Params.num_components, component_feerate =", "baddies = set(self.clients).difference(goodclients) for c in baddies: c.kill(reason) def run_round(self,", "1-byte varint for both inputs and outputs lists overhead =", "self.results = [] self.fails = [] def __enter__(self, ): return", "one # signed, then it's malicious behaviour! if sum((signatures[i] is", "and to permit persons to whom the Software is furnished", "the prevout and claimed pubkey). prevout_spenders = defaultdict(list) for i,", "assert isinstance(donation_address, (Address, type(None))) if not schnorr.has_fast_sign() or not schnorr.has_fast_verify():", "more than one # signed, then it's malicious behaviour! if", "guy and all the other components were # just imposters", "sense with one player. for c in self.clients: c.kill('blame yourself!')", "of covert signatures phase remtime = covert_T0 + Protocol.TS_EXPECTING_COVERT_SIGNATURES -", "False, server_time = begin_time)) self.last_hash = calc_initial_hash(self.tier, annhost_b, annport, False,", ")) msg = c.recv('playercommit') commit_messages = check_playercommit(msg, Params.min_excess_fee, Params.max_excess_fee, Params.num_components)", "b'', Params.ip_max_simul_fuse)] for tag in msg.tags: if len(tag.id) > 20:", "= '' if isinstance(self.donation_address, Address): donation_address = self.donation_address.to_full_ui_string() client.send(pb.ServerHello( num_components", "self.clients = [] # gc def kick_missing_clients(self, goodclients, reason =", "phase. client.error('multiple submission in same phase') if mtype == 'component':", "Fusion at the selected tier. \"\"\" with self.lock: chosen_clients =", "if can_pool: self._add_pool(client) else: self.queue.append(client) return can_pool def remove(self, client):", "ts = self.tags[t] if ts.pool >= t.maxsimul: break else: self._add_pool(client)", "self.print_error(f'Starting fusion with {len(self.clients)} players at tier={self.tier}') covert_server = CovertServer(self.bindhost,", "for i in bad_inputs) else: for i, (inp, sig) in", "it slightly harder for one of the players to #", "self.tags[t] ts.pool += 1 if len(self.pool) == self.fill_threshold: self.fill_time =", "min_clients for a short time (unless pool is full). start_time_min", "!= len(self.clients)*Params.num_components: skip_signatures = True self.print_error(\"problem detected: too few components", "represent in the same fuse? ip_max_simul_fuse = 3 # Guaranteed", "self.lock: ret = self.results del self.results return ret def add(self,", "started. start_ev = threading.Event() client.start_ev = start_ev if client_ip.startswith('127.'): #", "self.spawned_clients.difference_update(chosen_clients) # Kick off the fusion. rng.shuffle(chosen_clients) fusion = FusionController(self.", "blockchain need to be done, perhaps even still # running", "round, call .reset(); to kill all connections, call .stop(). \"\"\"", "'ping', timeout = COVERT_CLIENT_TIMEOUT) if mtype == 'ping': continue if", "skip_signatures = False if len(all_components) != len(self.clients)*Params.num_components: skip_signatures = True", "except KeyError: if self.stopping: return client.error(f\"Invalid tier selected: {t}\") try:", "purposes import random rng = random.Random() rng.seed(secrets.token_bytes(32)) def clientjob_send(client, msg,", "favoured fuse: - Out of the pool(s) with the most", "version, please upgrade\") if msg.genesis_hash: if msg.genesis_hash != get_current_genesis_hash(): #", "client): # make sure to call try_move_from_queue() after calling this", "is not None: self.fails.append(exc_value) if self.done_on_fail: self.done_ev.set() elif len(self.fails) +", "charge, to any person # obtaining a copy of this", "= [ClientTag(client_ip, b'', Params.ip_max_simul_fuse)] for tag in msg.tags: if len(tag.id)", "with self.lock: for t, pool in mytierpools.items(): if pool.remove(client): pool.try_move_from_queue()", "However, if the client declares the genesis_hash, we # do", "mytiers: pool = mytierpools[t] pool.add(client) if len(pool.pool) >= Params.max_clients: #", "except Exception as e: self.print_error('Failed with exception!') traceback.print_exc(file=sys.stderr) for c", "implementation for CashFusion. Does not natively offer SSL support, however", "full self.tags = defaultdict(TagStatus) # how are the various tags", "blamed client is already dead, don't waste more time. #", "Remove from spawned clients list, so that the fusion can", "even still # running after run_round has exited. For this", "more try to join, reject) max_tier_client_tags = 100 # For", "size_best: if time_best is None or ft < time_best or", "see whether we should just skip the # signing phase", "components were # just imposters who didn't have private key.", "clients can share same tag on a given tier (if", "mtype == 'signature' try: sighash = self.sighashes[msg.which_input] pubkey = self.pubkeys[msg.which_input]", "size_best = size if time_best is None: self.tier_best_starttime = None", "(for {src_commitment_idx}): {ret}') continue if src_client.dead: # If the blamed", "self.start_fuse(t) return elif remtime != inftime: status.time_remaining = round(remtime) statuses[t]", "list(clients) self.bindhost = bindhost self.upnp = upnp self.announcehost = announcehost", "assert tx.is_complete() txid = tx.txid() self.print_error(\"completed the transaction! \" +", "start of covert signatures phase, owner calls start_signatures. - To", "annhost.encode('ascii') annport = covert_server.port covert_server.noisy = Params.noisy covert_server.start() self.print_error(f'Covert server", "transaction signature') if existing_sig: # We received a distinct valid", "the next one (unless hit max time or pool is", "# start to accept covert components covert_server.start_components(round_pubkey, Params.component_feerate) # generate", "that we will start. for c in chosen_clients: c.start_ev.set() #", "a pool tag \"\"\" __slots__ = () def __new__(cls, ipstr,", "in E12] # How many clients do we want before", "pools on failure (on success, we are already removed; on", "it to consume our CPU power. if sig != existing_sig:", "if self.done_on_fail: self.done_ev.set() elif len(self.fails) + len(getattr(self, 'results', ())) >=", "players, - Choose the pool with the earliest fill time;", "for client in self.clients: client.addjob(client_start, collector) # Record the time", ".stop(). \"\"\" def __init__(self, bindhost, port=0, upnp = None): super().__init__(bindhost,", "client.tags: ts = self.tags[t] if ts.pool >= t.maxsimul: break else:", "msg): self.send(pb.Error(message = msg), timeout=5) def error(self, msg): self.send_error(msg) raise", "seen_salthashes = set() # Send start message to players; record", "be we already have this signature. This is fine #", "False) def client_get_proofs(client, collector): with collector: msg = client.recv('myproofslist') seed", "num_results, done_on_fail = True): self.num_results = int(num_results) self.done_on_fail = bool(done_on_fail)", "BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS", "the ClientThreads are passed over to a FusionController to run", "in zip(c.blinds, c.blind_sig_requests)] c.addjob(clientjob_send, pb.BlindSigResponses(scalars = scalars)) del c.blinds, c.blind_sig_requests", "def end_signatures(self): with self.lock: ret = self.signatures del self.signatures return", "0x41, use_cache = True)))) for i in range(len(tx.inputs()))] pubkeys =", "False while True: msg, mtype = client.recv('component', 'signature', 'ping', timeout", "= (sort_key, contrib) except AttributeError: client.error('component submitted at wrong time')", "admin may run an SSL server proxy such as nginx", "declared by client, we'll let them slide...\") if self.stopping: return", "so that if filling more than one pool, we don't", "is not None and ts.all_ >= self.tag_max: return \"too many", "# how are the various tags self.fill_threshold = fill_threshold #", "c.blind_sig_requests del results, collector # Sleep a bit before uploading", "end of covert signatures phase remtime = covert_T0 + Protocol.TS_EXPECTING_COVERT_SIGNATURES", "_) in enumerate(results) for cj,commit in enumerate(commitments)] rng.shuffle(commitment_master_list) all_commitments =", "EC server. Report this back to clients # as an", "which returns a list of signatures (which will have None", "do indeed disallow them connecting if they are e.g. on", "pool in self.waiting_pools.items(): ft = pool.fill_time if ft is None:", "CashFusion - an advanced coin anonymizer # # Copyright (C)", "self.components except AttributeError: client.error('component submitted at wrong time') sort_key, contrib", "skip_signatures = True)) else: self.print_error(\"starting covert signature acceptance\") tx, input_indices", "= time.monotonic() # scan through tiers and collect statuses, also", "upnp = None): super().__init__(bindhost, port, CovertClientThread, upnp = upnp) self.round_pubkey", "text): # a gentler goodbye than killing if text is", "def remove(self, client): # make sure to call try_move_from_queue() after", "Params.max_clients: # pool filled up to the maximum size, so", "self.print_error(f\"multi-spend of f{prevout} detected\") # If exactly one of the", "trying to broadcast transaction! misconfigured?\") # This probably indicates misconfiguration", "dest_client_idx, dest_key_idx in relays: proofs_to_relay[dest_client_idx].append((proof, src_commitment_idx, dest_key_idx, src_client)) live_clients =", "the timeout but that's OK. self.sendall(pb.AllCommitments(initial_commitments = all_commitments), timeout=Protocol.TS_EXPECTING_COVERT_SIGNATURES) #", "feerate): self.components = dict() self.feerate = feerate self.round_pubkey = round_pubkey", "ft tier_best = t size_best = size if time_best is", "send(self, submsg, timeout=Protocol.STANDARD_TIMEOUT): send_pb(self.connection, pb.ServerMessage, submsg, timeout=timeout) def send_error(self, msg):", "sell copies of the Software, # and to permit persons", "commitment originated. commitment_master_list = [(commit, ci, cj) for ci, (_,", "in mytierpools.values(): res = pool.check_add(client) if res is not None:", "del self.round_pubkey del self.components del self.feerate except AttributeError: pass try:", "require 3-byte varint for both inputs and outputs lists overhead", "if exc_type is not None: self.fails.append(exc_value) if self.done_on_fail: self.done_ev.set() elif", "= self.signatures del self.signatures return ret def reset(self): try: del", "clean def reset_timer(self, ): \"\"\" Scan pools for the favoured", "use 1-byte varint for both inputs and outputs lists overhead", "comp, (sort_key, contrib) in component_master_list] del component_master_list # Do some", "component (uncompressed p2pkh input), how many could we take until", "a special time remtime = min(remtime, self.tier_best_starttime - tnow) if", "successful or exception while True: covert_server.reset() # Clean up dead", "if s is None]) ### self.print_error(f\"ending covert signature acceptance. {missing_sigs}", "self.tags[t] if ts.pool >= t.maxsimul: break else: self._add_pool(client) moved.append(client) for", "for t, pool in mytierpools.items(): if client not in pool.pool:", "= round_pubkey for c in self.spawned_clients: c.got_submit = False def", "!= get_current_genesis_hash(): # For now, msg.genesis_hash is optional and we", "validate_blame, ValidationError, check_input_electrumx) # Resistor \"E series\" values -- round", "{e} (you claimed: {blame.blame_reason!r})') continue if isinstance(ret, str): self.print_error(f\"verified a", "being full self.tags = defaultdict(TagStatus) # how are the various", "placeholder, set this to startup time. self.reset_timer() def run(self): try:", "client.connection.socket.getpeername()[0] msg = client.recv('clienthello') if msg.version != Protocol.VERSION: client.error(\"Mismatched protocol", "list; we're a bit generous with the timeout but that's", "a specific tier \"\"\" def __init__(self, fill_threshold, tag_max): self.pool =", "as client did. relays = [] for i, proof in", "checking failed with exception {repr(e)} ({outpoint})\") else: self.print_error(f\"player indicated bad", "is furnished to do so, # subject to the following", "p2pkh input), how many could we take until the result", "in proofs): client.error(\"too-long proof\") # they should only be 129", "too long\") if not (0 < tag.limit < 6): client.error(\"Tag", "timeout=5) def send_error(self, msg): self.send(pb.Error(message = msg), timeout=5) def error(self,", "# Resistor \"E series\" values -- round numbers that are", "(overhead + min_safe_clients - 1) // min_safe_clients # How many", "# make sure to call try_move_from_queue() after calling this try:", "if msg.genesis_hash: if msg.genesis_hash != get_current_genesis_hash(): # For now, msg.genesis_hash", "put into fusion round if started at this tier self.queue", "various tags self.fill_threshold = fill_threshold # minimum number of pool", "msg): self.send_error(msg) raise FusionError(f'Rejected client: {msg}') class ClientTag(bytes): \"\"\" enhanced", "None: continue size = len(pool.pool) if size >= size_best: if", "recv(self, *expected_msg_names, timeout=Protocol.STANDARD_TIMEOUT): submsg, mtype = recv_pb(self.connection, pb.ClientMessage, *expected_msg_names, timeout=timeout)", "are mainnet, etc. client.error(\"This server is on a different chain,", "in all copies or substantial portions of the Software. #", "blind nonces (slow!) for c in self.clients: c.blinds = [schnorr.BlindSigner()", "it # missing. However, if the client declares the genesis_hash,", "msg = client.recv('myproofslist') seed = msg.random_number if sha256(seed) != client.random_number_commitment:", "TimeoutException: self.print_error(\"timed out while trying to broadcast transaction! misconfigured?\") #", "False) def client_get_blames(client, myindex, proofs, collector): with collector: # an", "indicated bad input but it was fine ({outpoint})\") # At", "malleated version by re-signing one of their inputs. time.sleep(2) self.sendall(pb.FusionResult(ok", "self.clients.index(client) possible_commitment_destinations = [(ci,cj) for commit, ci, cj in commitment_master_list", "commitment list, but remember exactly where each commitment originated. commitment_master_list", "(on success, we are already removed; on stop we don't", "to give a good commitment. prev_client_count = len(self.clients) self.clients =", "proof index {blame.which_proof} / {len(proofs)}') continue src_commit_blob, src_commit_client_idx, _ =", "component_master_list.sort(key=lambda x:x[1][0]) all_components = [comp for comp, (sort_key, contrib) in", "to launch a fusion if the pool has stayed at", "client.\"\"\" def recv(self, *expected_msg_names, timeout=Protocol.STANDARD_TIMEOUT): submsg, mtype = recv_pb(self.connection, pb.ClientMessage,", "covert_server.noisy = Params.noisy covert_server.start() self.print_error(f'Covert server started @ {covert_server.host}:{covert_server.port} (announcing", "can they represent in the same fuse? ip_max_simul_fuse = 3", "if min_safe_clients * num_components >= 2 * 0xfc: # the", "*expected_msg_names, timeout=Protocol.STANDARD_TIMEOUT): submsg, mtype = recv_pb(self.connection, pb.ClientMessage, *expected_msg_names, timeout=timeout) return", "many blames') if len(set(blame.which_proof for blame in msg.blames)) != len(msg.blames):", "signal the end of covert components phase, owner calls end_components,", "= 3 # Guaranteed time to launch a fusion if", "txid) try: self.network.broadcast_transaction2(tx, timeout=3) except ServerError as e: nice_msg, =", "is a float, this will almost always be unique) \"\"\"", "tnow) if remtime <= 0: self.start_fuse(t) return elif remtime !=", "sighashes, pubkeys): num_inputs = len(sighashes) assert num_inputs == len(pubkeys) self.signatures", "def check_client_count(self,): live = [c for c in self.clients if", ")) # We allow a long timeout for clients to", "as e: self.print_error(\"got bad blame; clamed reason was: \"+repr(blame.blame_reason)) client.kill(f'bad", "import threading import time import traceback from collections import defaultdict", "aborted earlier but this # way third parties can't abuse", "there is no privacy leak by the ommission. continue assert", "+ reason) continue except Exception as e: self.print_error(f\"player indicated bad", "FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN", "start of covert components phase, call start_components. - To signal", "None tier_best = None size_best = 0 for t, pool", "this. remtime = covert_T0 + Protocol.T_START_COMPS - time.monotonic() if remtime", "p in proofs): client.error(\"too-long proof\") # they should only be", "t.maxsimul: can_pool = False if can_pool: self._add_pool(client) else: self.queue.append(client) return", "so it might not be best anymore. self.reset_timer() raise class", "self.num_results: self.done_ev.set() def gather(self, *, deadline): remtime = deadline -", "add some 'ban score' to the player. # we aren't", "component commitment') if not collector.add((c, msg.initial_commitments, msg.excess_fee)): c.error(\"late commitment\") #", "rounds running from server side. \"\"\" def __init__(self, network, tier,", "connecting if they are e.g. on testnet # and we", "f'{e.args[0]} ({outpoint})' self.print_error(f\"blaming[{src_commitment_idx}] for bad input: {reason}\") src_client.kill('you provided a", "# If exactly one of the inputs is signed, we", "Params.min_excess_fee, Params.max_excess_fee, Params.num_components) newhashes = set(m.salted_component_hash for m in commit_messages)", "1.8, 2.2, 2.7, 3.3, 3.9, 4.7, 5.6, 6.8, 8.2] E24", "even make sense with one player. for c in self.clients:", "new_client_job(self, client): client_ip = client.connection.socket.getpeername()[0] msg = client.recv('clienthello') if msg.version", "del c.blinds, c.blind_sig_requests del results, collector # Sleep a bit", "self.waiting_pools = {t: WaitingPool(Params.min_clients, Params.max_tier_client_tags) for t in Params.tiers} self.t_last_fuse", "= covert_T0 + Protocol.TS_EXPECTING_COVERT_SIGNATURES - time.monotonic() if remtime < 0:", "restart / end # without sharing components. skip_signatures = False", "= [[None]*Params.num_components for _ in self.clients] for i, (commit, ci,", "(sort_key, contrib) except AttributeError: client.error('component submitted at wrong time') else:", "starting the next one (unless hit max time or pool", "setting fill_time self.fill_time = None # when did pool exceed", "requires libsecp256k1\") super().__init__(bindhost, port, ClientThread, upnp = upnp) self.config =", "# making us check many inputs against blockchain. if len(msg.blames)", "this point can report back to the # verifier, there", "- fee). - Before start of covert signatures phase, owner", "as e: self.print_error('Failed with exception!') traceback.print_exc(file=sys.stderr) for c in self.clients:", "len(msg.tiers) == 0: client.error(\"No tiers\") if len(msg.tags) > 5: client.error(\"Too", "different chain, please switch servers\") else: client.print_error(\"👀 No genesis hash", "False else: in_pool = True if len(self.pool) < self.fill_threshold: self.fill_time", "start immediately self.start_fuse(t) return # we have added to pools,", "= covert_server.port covert_server.noisy = Params.noisy covert_server.start() self.print_error(f'Covert server started @", "# used for non-cryptographic purposes import random rng = random.Random()", "ret) except ValidationError as e: reason = f'{e.args[0]} ({outpoint})' self.print_error(f\"blaming[{src_commitment_idx}]", "recv_pb(self.connection, pb.ClientMessage, *expected_msg_names, timeout=timeout) return submsg def send(self, submsg, timeout=Protocol.STANDARD_TIMEOUT):", "time.monotonic() self.done_ev.wait(max(0., remtime)) with self.lock: ret = self.results del self.results", "client.got_submit: # We got a second submission before a new", "proof (for {src_commitment_idx}): {ret}') continue if src_client.dead: # If the", "the time we did this round_time = round(time.time()) collector =", "\"\"\" import secrets import sys import threading import time import", "timeout=Protocol.TS_EXPECTING_COVERT_SIGNATURES) # Sleep until end of covert components phase remtime", "particular tier with self.lock: if self.stopping: return # add this", "in self.clients: client.addjob(clientjob_send, msg, timeout) def check_client_count(self,): live = [c", "4.7, 6.8] E12 = [1.0, 1.2, 1.5, 1.8, 2.2, 2.7,", "a fusion if the pool has stayed at or above", "remtime = inftime if pool.fill_time is not None: # a", "shuffle the adding order so that if filling more than", "ResultsCollector(len(self.clients), done_on_fail = False) def client_start(c, collector): with collector: c.send(pb.StartRound(round_pubkey", "= None, donation_address = None): assert network assert isinstance(donation_address, (Address,", "[comp for comp, (sort_key, contrib) in component_master_list] component_contribs = [contrib", "client in moved: self.queue.remove(client) class FusionServer(GenericServer): \"\"\"Server for clients waiting", "or outputs lists overhead = 60 else: # the smallest", "time that we sent 'startround' message to players; this #", "in signatures if s is None]) ### self.print_error(f\"ending covert signature", "= lambda x:x[1]) client.send(pb.TheirProofsList(proofs = [ dict(encrypted_proof=x, src_commitment_idx=y, dst_key_idx=z) for", "varint for both inputs and outputs lists overhead = 58", "It might be we already have this signature. This is", "IndexError: raise ValidationError('which_input too high') sig = msg.txsignature if len(sig)", "in enumerate(zip(tx.inputs(), signatures)): inp['signatures'][0] = sig.hex() + '41' assert tx.is_complete()", "given input's signature submission. raise ValidationError('conflicting valid signature') with self.lock:", "starting any fusion, wait this long before starting the next", "= [comp for comp, (sort_key, contrib) in component_master_list] component_contribs =", "### self.print_error(f\"entering blame phase. bad components: {bad_components}\") if len(self.clients) <", "(these overhead numbers assume op_return script size of 1 +", "list, so that the fusion can continue independently of waiting", "+ ':' + str(ret.prev_index) try: check_input_electrumx(self.network, ret) except ValidationError as", "= Params.tiers, donation_address = donation_address )) # We allow a", "True)))) for i in range(len(tx.inputs()))] pubkeys = [bytes.fromhex(inp['pubkeys'][0]) for inp", "for one of the players to # broadcast a malleated", "of this function might run for a while if many", "elif remtime != inftime: status.time_remaining = round(remtime) statuses[t] = status", "side. \"\"\" def __init__(self, network, tier, clients, bindhost, upnp =", "to any person # obtaining a copy of this software", "move clients from queue into pool moved = [] for", "live = [c for c in self.clients if not c.dead]", "num_inputs == len(pubkeys) self.signatures = [None]*num_inputs self.sighashes = sighashes self.pubkeys", "config, network, bindhost, port, upnp = None, announcehost = None,", "of their inputs. time.sleep(2) self.sendall(pb.FusionResult(ok = True, txsignatures = signatures))", "proofs\") for client in self.clients: client.addjob(client_get_proofs, collector) results = collector.gather(deadline", "msg, mtype = client.recv('component', 'signature', 'ping', timeout = COVERT_CLIENT_TIMEOUT) if", "up incorrectly\" time.sleep(remtime) component_master_list = list(covert_server.end_components().items()) self.print_error(f\"ending covert component acceptance.", "in self.waiting_pools.items(): ft = pool.fill_time if ft is None: continue", "self.upnp) try: annhost = covert_server.host if self.announcehost is None else", "self.print_error(f\"blaming[{src_commitment_idx}] for bad input: {reason}\") src_client.kill('you provided a bad input:", "# because he's the honest guy and all the other", "make sure to call try_move_from_queue() after calling this try: self.pool.remove(client)", "pools. Once a Fusion thread is started, the ClientThreads are", "To signal the end of covert components phase, owner calls", "eventually remtime = pool.fill_time - tfill_thresh if t == self.tier_best:", "\"\"\" Immediately launch Fusion at the selected tier. \"\"\" with", "DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF", "given IP, how many players can they represent in the", "in self.spawned_clients: c.got_submit = False def end_components(self): with self.lock: ret", "player. for c in self.clients: c.kill('blame yourself!') return # scan", "hash declared by client, we'll let them slide...\") if self.stopping:", "For this reason we try to # not reference self.<variables>", "this # way third parties can't abuse us to find", "Params.noisy covert_server.start() self.print_error(f'Covert server started @ {covert_server.host}:{covert_server.port} (announcing as: {annhost_b}:{annport})')", "point to same proof') # Note, the rest of this", "== 0: client.error(\"No tiers\") if len(msg.tags) > 5: client.error(\"Too many", "1000 # sats/kB max_excess_fee = 300000 # sats tiers =", "c.blinds], server_time = round_time )) msg = c.recv('playercommit') commit_messages =", "gc clean def reset_timer(self, ): \"\"\" Scan pools for the", "- time.monotonic() assert remtime > 0, \"timings set up incorrectly\"", "continue assert ret, 'expecting input component' outpoint = ret.prev_txid[::-1].hex() +", "= [1.0, 1.1, 1.2, 1.3, 1.5, 1.6, 1.8, 2.0, 2.2,", ">= self.num_results: self.done_ev.set() def gather(self, *, deadline): remtime = deadline", "is already dead, don't waste more time. # Since nothing", "call .stop(). \"\"\" def __init__(self, bindhost, port=0, upnp = None):", "files # (the \"Software\"), to deal in the Software without", "dividing the overhead amongst players, in the smallest fusion #", "the proof sharing thing doesn't even make sense with one", "= Params.component_feerate, min_excess_fee = Params.min_excess_fee, max_excess_fee = Params.max_excess_fee, tiers =", "we # can start next round right away. collector.add(None) for", "c.error('duplicate component commitment') if not collector.add((c, msg.initial_commitments, msg.excess_fee)): c.error(\"late commitment\")", "src_client, relays in results: for proof, src_commitment_idx, dest_client_idx, dest_key_idx in", "else: for i, (inp, sig) in enumerate(zip(tx.inputs(), signatures)): inp['signatures'][0] =", "new_client_job(self, client): client.got_submit = False while True: msg, mtype =", "if self.run_round(covert_server): break self.print_error('Ended successfully!') except FusionError as e: self.print_error(f\"Ended", "newhashes = set(m.salted_component_hash for m in commit_messages) with lock: expected_len", "Params.max_tier_client_tags) for t in Params.tiers} self.t_last_fuse = time.monotonic() # when", "it has only been above min_clients for a short time", "c in self.clients: c.kill('blame yourself!') return # scan the commitment", "for bad input: {reason}\") src_client.kill('you provided a bad input: '", "will form the basis of our covert timeline. covert_T0 =", "clients # as an 'internal server error'. raise else: self.print_error(\"broadcast", "round's component submission? COVERT_CLIENT_TIMEOUT = 40 # used for non-cryptographic", "end of covert components phase remtime = covert_T0 + Protocol.TS_EXPECTING_COVERT_COMPONENTS", "Address from oregano.util import PrintError, ServerError, TimeoutException from . import", "tnow - Params.start_time_max for t, pool in mytierpools.items(): if client", "proofs): client.error(\"too-long proof\") # they should only be 129 bytes", "submission in same phase') if mtype == 'component': try: round_pubkey", "raise ValidationError('signature length is wrong') # It might be we", "timeout=Protocol.STANDARD_TIMEOUT) def error(self, msg): self.send_error(msg) raise FusionError(f'Rejected client: {msg}') class", "in self.clients: c.addjob(clientjob_goodbye, None) self.clients = [] # gc def", "next round right away. collector.add(None) for idx, (client, proofs) in", "we don't allow it to consume our CPU power. if", "into pool moved = [] for client in self.queue: for", "fill_threshold # minimum number of pool clients to trigger setting", "AttributeError: pass try: del self.sighashes del self.pubkeys except AttributeError: pass", "disallow them connecting if they are e.g. on testnet #", "stop we don't care.) with self.lock: for t, pool in", "len(self.clients) self.clients = [c for c, _, _ in results]", "of time raise FusionError(\"way too slow\") time.sleep(remtime) signatures = list(covert_server.end_signatures())", "numbers assume op_return script size of 1 + 5 (lokad)", "Params.max_excess_fee, tiers = Params.tiers, donation_address = donation_address )) # We", "for signalling us that a pool started. start_ev = threading.Event()", "for later c.blind_sig_requests = msg.blind_sig_requests c.random_number_commitment = msg.random_number_commitment for client", "ci, (_, commitments, _) in enumerate(results) for cj,commit in enumerate(commitments)]", "self.lock: try: self.signatures[msg.which_input] = sig except AttributeError: client.error('signature submitted at", "class TagStatus: __slots__ = ('pool', 'all_') def __init__(self): self.pool =", "Permission is hereby granted, free of charge, to any person", "start. for c in chosen_clients: c.start_ev.set() # Remove those clients", "it might not be best anymore. self.reset_timer() raise class ResultsCollector:", "\"+repr(blame.blame_reason)) client.kill(f'bad blame message: {e} (you claimed: {blame.blame_reason!r})') continue if", "before starting a fusion? min_clients = 8 # If all", "- an advanced coin anonymizer # # Copyright (C) 2020", "if len(tag.id) > 20: client.error(\"Tag id too long\") if not", "send_error(self, msg): self.send(pb.Error(message = msg), timeout=Protocol.STANDARD_TIMEOUT) def error(self, msg): self.send_error(msg)", "ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "needs to consider the maximum interval between messages: # -", "== 'ping': continue if client.got_submit: # We got a second", "# Send start message to players; record the time we", "self.clients if not c.dead] if len(live) < Params.min_safe_clients: for c", "lists overhead = 58 min_excess_fee = (overhead + min_safe_clients -", "None): super().__init__(bindhost, port, CovertClientThread, upnp = upnp) self.round_pubkey = None", "If no pools are filled then there is no favoured", "with self.lock: ret = self.signatures del self.signatures return ret def", "Send blind signatures for c in self.clients: scalars = [b.sign(covert_priv,", "LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR", "search for duplicated inputs (through matching the prevout and claimed", "CashFusion. Does not natively offer SSL support, however a server", "(commit, ci, cj) in enumerate(commitment_master_list): client_commit_indexes[ci][cj] = i collector =", "sig.hex() + '41' assert tx.is_complete() txid = tx.txid() self.print_error(\"completed the", "if len(msg.blames) > len(proofs): client.error('too many blames') if len(set(blame.which_proof for", "blames') if len(set(blame.which_proof for blame in msg.blames)) != len(msg.blames): client.error('multiple", "ValueError: return False else: in_pool = True if len(self.pool) <", "missing. However, if the client declares the genesis_hash, we #", "scalars = [b.sign(covert_priv, e) for b,e in zip(c.blinds, c.blind_sig_requests)] c.addjob(clientjob_send,", "for blame in msg.blames)) != len(msg.blames): client.error('multiple blames point to", "= recv_pb(self.connection, pb.CovertMessage, *expected_msg_names, timeout=timeout) return submsg, mtype def send(self,", "# (the \"Software\"), to deal in the Software without restriction,", "HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "client.Disconnect class ClientThread(ClientHandlerThread): \"\"\"Basic thread per connected client.\"\"\" def recv(self,", "as e: reason = f'{e.args[0]} ({outpoint})' self.print_error(f\"blaming[{src_commitment_idx}] for bad input:", "accept covert components covert_server.start_components(round_pubkey, Params.component_feerate) # generate blind nonces (slow!)", "1.3, 1.5, 1.6, 1.8, 2.0, 2.2, 2.4, 2.7, 3.0, 3.3,", "all_components = [comp for comp, (sort_key, contrib) in component_master_list] component_contribs", "run (self, ): self.print_error(f'Starting fusion with {len(self.clients)} players at tier={self.tier}')", "inputs and outputs to see if we even have reasonable", "# pool filled up to the maximum size, so start", "and collect statuses, also check start times. statuses = dict()", "received.\") # Sort the components & contribs list, then separate", "if text is not None: client.send_error(text) raise client.Disconnect class ClientThread(ClientHandlerThread):", "= True, txsignatures = signatures)) return True self.sendall(pb.FusionResult(ok = False,", "self.stopping or start_ev.is_set(): return tnow = time.monotonic() # scan through", "# self.spawned_clients.difference_update(chosen_clients) # Kick off the fusion. rng.shuffle(chosen_clients) fusion =", "id too long\") if not (0 < tag.limit < 6):", "limitation the rights to use, copy, modify, merge, # publish,", "len(seen_salthashes) + len(newhashes) seen_salthashes.update(newhashes) if len(seen_salthashes) != expected_len: c.error('duplicate component", "@property def maxsimul(self): return self[0] class TagStatus: __slots__ = ('pool',", "b,e in zip(c.blinds, c.blind_sig_requests)] c.addjob(clientjob_send, pb.BlindSigResponses(scalars = scalars)) del c.blinds,", "some preliminary checks to see whether we should just skip", "we try to # not reference self.<variables> that may change.", "ephemeral port. - Before start of covert components phase, call", "the server at any time. By default, will bind to", "started. As # an anti-spam measure we only allow one", "pb.BlindSigResponses(scalars = scalars)) del c.blinds, c.blind_sig_requests del results, collector #", "reject) max_tier_client_tags = 100 # For a given IP, how", "proof, src_commitment_idx, dest_client_idx, dest_key_idx in relays: proofs_to_relay[dest_client_idx].append((proof, src_commitment_idx, dest_key_idx, src_client))", "component_contribs = [contrib for comp, (sort_key, contrib) in component_master_list] del", "transaction a small head start in relaying, before sharing the", "= {t: self.waiting_pools[t] for t in msg.tiers} except KeyError: if", ">= self.num_results: self.done_ev.set() return True class FusionController(threading.Thread, PrintError): \"\"\" This", "None) # further, search for duplicated inputs (through matching the", "# Generate scrambled commitment list, but remember exactly where each", "src_commitment_idx, dest_key_idx, src_client)) live_clients = len(results) collector = ResultsCollector(live_clients, done_on_fail", "at or above min_clients for this long. start_time_max = 1200", "error(self, msg): self.send_error(msg) raise FusionError(f'Rejected client: {msg}') class CovertServer(GenericServer): \"\"\"", "5.1, 5.6, 6.2, 6.8, 7.5, 8.2, 9.1] # TODO -", "statuses, also check start times. statuses = dict() tfill_thresh =", "fusion can continue independently of waiting server. # self.spawned_clients.difference_update(chosen_clients) #", "= len([s for s in signatures if s is None])", "misconfiguration since fusion server ought # to have a good", "be trying to DoS us by # making us check", "whether we should just skip the # signing phase and", "the favoured pool, can start at a special time remtime", "permission notice shall be # included in all copies or", "# Guaranteed time to launch a fusion if the pool", "raise ValidationError('conflicting valid signature') with self.lock: try: self.signatures[msg.which_input] = sig", "def run(self): try: super().run() finally: self.waiting_pools.clear() # gc clean def", "class ClientThread(ClientHandlerThread): \"\"\"Basic thread per connected client.\"\"\" def recv(self, *expected_msg_names,", "import time import traceback from collections import defaultdict import oregano.schnorr", "1: continue self.print_error(f\"multi-spend of f{prevout} detected\") # If exactly one", "rights to use, copy, modify, merge, # publish, distribute, sublicense,", "destinations, same way as client did. relays = [] for", "could we take until the result would exceed 100 kB", "1.2, 1.5, 1.8, 2.2, 2.7, 3.3, 3.9, 4.7, 5.6, 6.8,", "modify, merge, # publish, distribute, sublicense, and/or sell copies of", "covert connections are allowed to stay open without activity. #", "# Collect submissions from different sources, with a deadline. def", "too few components submitted\") if total_excess_fees != sum(component_contribs): skip_signatures =", "self.print_error(\"broadcast was successful!\") # Give our transaction a small head", "self.print_error(\"timed out while trying to broadcast transaction! misconfigured?\") # This", "self._add_pool(client) moved.append(client) for client in moved: self.queue.remove(client) class FusionServer(GenericServer): \"\"\"Server", "chain, please switch servers\") else: client.print_error(\"👀 No genesis hash declared", "blind_nonce_points = [b.get_R() for b in c.blinds], server_time = round_time", "True)) else: self.print_error(\"starting covert signature acceptance\") tx, input_indices = tx_from_components(all_components,", "care.) with self.lock: for t, pool in mytierpools.items(): if pool.remove(client):", "represent a pool tag \"\"\" __slots__ = () def __new__(cls,", "a long timeout for clients to choose their pool. msg", "server_msg = e.server_msg self.print_error(f\"could not broadcast the transaction! {nice_msg}\") except", "+= 1 if ts.pool >= t.maxsimul: can_pool = False if", "privacy)? min_safe_clients = 6 # Choose the minimum excess fee", "shall be # included in all copies or substantial portions", "that a pool started. start_ev = threading.Event() client.start_ev = start_ev", "tag.limit)) try: mytierpools = {t: self.waiting_pools[t] for t in msg.tiers}", "self.lock: if self.stopping or start_ev.is_set(): return tnow = time.monotonic() #", "malicious behaviour! if sum((signatures[i] is not None) for i in", "= tag_max # how many clients can share same tag", "(sort_key, contrib) in component_master_list] del component_master_list # Do some preliminary", "'component': try: round_pubkey = self.round_pubkey feerate = self.feerate _ =", "c.addjob(clientjob_send, pb.BlindSigResponses(scalars = scalars)) del c.blinds, c.blind_sig_requests del results, collector", "= tnow - Params.start_time_max for t, pool in mytierpools.items(): if", "the blamed client is already dead, don't waste more time.", "0: time.sleep(remtime) # Upload the full commitment list; we're a", "tag.limit < 6): client.error(\"Tag limit out of range\") ip =", "when the last fuse happened; as a placeholder, set this", "this long. start_time_max = 1200 # Inter-fusion delay -- after", "in mytierpools: # we left from best pool, so it", "enhanced bytes object to represent a pool tag \"\"\" __slots__", "Ergon client # CashFusion - an advanced coin anonymizer #", "= calc_round_hash(self.last_hash, round_pubkey, round_time, all_commitments, all_components) #TODO : Check the", "For a given IP, how many players can they represent", "time remtime = min(remtime, self.tier_best_starttime - tnow) if remtime <=", "len(msg.tags) > 5: client.error(\"Too many tags\") # Event for signalling", "import send_pb, recv_pb, ClientHandlerThread, GenericServer, get_current_genesis_hash from .protocol import Protocol", "size limitation? max_clients = (100000 - 12) // (num_components *", "all connections, call .stop(). \"\"\" def __init__(self, bindhost, port=0, upnp", "dest_client_idx, dest_key_idx = possible_commitment_destinations[rand_position(seed, N, i)] src_commitment_idx = client_commit_indexes[myindex][i] relays.append((proof,", "fusion could require 3-byte varint for either inputs or outputs", "self.feerate except AttributeError: pass try: del self.sighashes del self.pubkeys except", "has only been above min_clients for a short time (unless", "msg.blames)) != len(msg.blames): client.error('multiple blames point to same proof') #", "will be put into fusion round if started at this", "if total_excess_fees != sum(component_contribs): skip_signatures = True self.print_error(\"problem detected: excess", "self.components del self.feerate except AttributeError: pass try: del self.sighashes del", "client_get_proofs(client, collector): with collector: msg = client.recv('myproofslist') seed = msg.random_number", "Guaranteed time to launch a fusion if the pool has", "\"too many clients with same tag\" def _add_pool(self, client): self.pool.add(client)", "if ft is None: continue size = len(pool.pool) if size", "start_fuse(self, tier): \"\"\" Immediately launch Fusion at the selected tier.", "False try: self.queue.remove(client) except ValueError: return False else: in_pool =", "lot of logs noisy = False # How long covert", "return True self.sendall(pb.FusionResult(ok = False, bad_components = sorted(bad_components))) ### self.print_error(f\"entering", "the maximum interval between messages: # - how long from", "collector = ResultsCollector(len(self.clients), done_on_fail = False) def client_start(c, collector): with", "trying to DoS us by # making us check many", "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS", "mtype = recv_pb(self.connection, pb.ClientMessage, *expected_msg_names, timeout=timeout) return submsg def send(self,", "= max(time_best + Params.start_time_min, self.t_last_fuse + Params.start_time_spacing) self.tier_best = tier_best", "check_input_electrumx) # Resistor \"E series\" values -- round numbers that", "Protocol.TS_EXPECTING_COVERT_COMPONENTS - time.monotonic() assert remtime > 0, \"timings set up", "continue self.print_error(f\"multi-spend of f{prevout} detected\") # If exactly one of", "None: self.fails.append(exc_value) if self.done_on_fail: self.done_ev.set() elif len(self.fails) + len(getattr(self, 'results',", "of players, - Choose the pool with the earliest fill", "self.clients: c.kill('blame yourself!') return # scan the commitment list and", "bad_components.update(input_indices[i] for i in bad_inputs) else: for i, (inp, sig)", "interval between messages: # - how long from first connection", "self.num_results = int(num_results) self.done_on_fail = bool(done_on_fail) self.done_ev = threading.Event() self.lock", "long before starting the next one (unless hit max time", "threading import time import traceback from collections import defaultdict import", "components phase, call start_components. - To signal the end of", "will bind to an ephemeral port. - Before start of", "until the result would exceed 100 kB standard tx size", "1200 # Inter-fusion delay -- after starting any fusion, wait", "not c.dead] self.check_client_count() if self.run_round(covert_server): break self.print_error('Ended successfully!') except FusionError", "= [contrib for comp, (sort_key, contrib) in component_master_list] del component_master_list", "= [] def __enter__(self, ): return self def __exit__(self, exc_type,", "all missing-signature components as bad. bad_inputs = set(i for i,sig", "run_round has exited. For this reason we try to #", "the # verifier, there is no privacy leak by the", "with self.lock: time_best = None tier_best = None size_best =", "Params.min_safe_clients: for c in live: c.kill(\"too few remaining live players\")", "def start_components(self, round_pubkey, feerate): self.components = dict() self.feerate = feerate", "c in baddies: c.kill(reason) def run_round(self, covert_server): covert_priv, covert_Upub, covert_Cpub", "broadcast transaction! misconfigured?\") # This probably indicates misconfiguration since fusion", "ack failed delivery, # but we don't allow it to", "import Address from oregano.util import PrintError, ServerError, TimeoutException from .", "= random.Random() rng.seed(secrets.token_bytes(32)) def clientjob_send(client, msg, timeout = Protocol.STANDARD_TIMEOUT): client.send(msg,", "acceptance\") self.sendall(pb.ShareCovertComponents(components = all_components, skip_signatures = True)) else: self.print_error(\"starting covert", "(through matching the prevout and claimed pubkey). prevout_spenders = defaultdict(list)", "2.7, 3.0, 3.3, 3.6, 3.9, 4.3, 4.7, 5.1, 5.6, 6.2,", "= time.monotonic() # when the last fuse happened; as a", "client.send(pb.TierStatusUpdate(statuses = statuses)) start_ev.wait(2) except: # Remove client from waiting", "break self.print_error('Ended successfully!') except FusionError as e: self.print_error(f\"Ended with error:", "except Exception as e: self.print_error(f\"player indicated bad input but checking", "mtype = client.recv('component', 'signature', 'ping', timeout = COVERT_CLIENT_TIMEOUT) if mtype", "= self.feerate _ = self.components except AttributeError: client.error('component submitted at", "the minimum excess fee based on dividing the overhead amongst", "timeout = Protocol.STANDARD_TIMEOUT): client.send(msg, timeout=timeout) def clientjob_goodbye(client, text): # a", "# calculate the randomly chosen destinations, same way as client", "for proof, src_commitment_idx, dest_client_idx, dest_key_idx in relays: proofs_to_relay[dest_client_idx].append((proof, src_commitment_idx, dest_key_idx,", "existing_sig: if not schnorr.verify(pubkey, sig, sighash): raise ValidationError('bad transaction signature')", "start_time_min = 400 # whether to print a lot of", "Give our transaction a small head start in relaying, before", "collector) # Record the time that we sent 'startround' message", "to startup time. self.reset_timer() def run(self): try: super().run() finally: self.waiting_pools.clear()", "del results, collector # Sleep a bit before uploading commitments,", "in same phase') if mtype == 'component': try: round_pubkey =", "for c in chosen_clients: c.start_ev.set() # Remove those clients from", "gen_keypair, tx_from_components, rand_position) from .validation import (check_playercommit, check_covert_component, validate_blame, ValidationError,", "checks to see whether we should just skip the #", "many clients do we want before starting a fusion? min_clients", "allow unlimited access client.tags = [] else: # Default tag:", "at any time. By default, will bind to an ephemeral", "round(time.time()) self.sendall(pb.FusionBegin(tier = self.tier, covert_domain = annhost_b, covert_port = annport,", "ought # to have a good connection to the EC", "covert_T0 + Protocol.T_START_COMPS - time.monotonic() if remtime > 0: time.sleep(remtime)", "If all clients submitted largest possible component (uncompressed p2pkh input),", "Fusion rounds running from server side. \"\"\" def __init__(self, network,", "sent which proof proofs.sort(key = lambda x:x[1]) client.send(pb.TheirProofsList(proofs = [", "t in msg.tiers} except KeyError: if self.stopping: return client.error(f\"Invalid tier", "client declares the genesis_hash, we # do indeed disallow them", "class ResultsCollector: # Collect submissions from different sources, with a", "def run_round(self, covert_server): covert_priv, covert_Upub, covert_Cpub = gen_keypair() round_pubkey =", "live_clients = len(results) collector = ResultsCollector(live_clients, done_on_fail = False) def", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "about which client sent which proof proofs.sort(key = lambda x:x[1])", "Params.num_components: client.error(\"wrong number of proofs\") if any(len(p) > 200 for", "or maybe even restart / end # without sharing components.", "False, begin_time) time.sleep(Protocol.WARMUP_TIME) # repeatedly run rounds until successful or", "exception while True: covert_server.reset() # Clean up dead clients self.clients", "# per phase. client.error('multiple submission in same phase') if mtype", "msg.blind_sig_requests c.random_number_commitment = msg.random_number_commitment for client in self.clients: client.addjob(client_start, collector)", "a distinct valid signature. This is not # allowed and", "sig is None) # further, search for duplicated inputs (through", "+ str(ret.prev_index) try: check_input_electrumx(self.network, ret) except ValidationError as e: reason", "components submitted\") if total_excess_fees != sum(component_contribs): skip_signatures = True self.print_error(\"problem", "components. skip_signatures = False if len(all_components) != len(self.clients)*Params.num_components: skip_signatures =", "component' outpoint = ret.prev_txid[::-1].hex() + ':' + str(ret.prev_index) try: check_input_electrumx(self.network,", "i,sig in enumerate(signatures) if sig is None) # further, search", "mtype = recv_pb(self.connection, pb.CovertMessage, *expected_msg_names, timeout=timeout) return submsg, mtype def", "signature submission. raise ValidationError('conflicting valid signature') with self.lock: try: self.signatures[msg.which_input]", "self.pubkeys[msg.which_input] existing_sig = self.signatures[msg.which_input] except AttributeError: client.error('signature submitted at wrong", "all copies or substantial portions of the Software. # #", "assert num_inputs == len(pubkeys) self.signatures = [None]*num_inputs self.sighashes = sighashes", "of proofs\") if any(len(p) > 200 for p in proofs):", "up client_commit_indexes = [[None]*Params.num_components for _ in self.clients] for i,", "[] for i, proof in enumerate(proofs): dest_client_idx, dest_key_idx = possible_commitment_destinations[rand_position(seed,", "str): self.print_error(f\"verified a bad proof (for {src_commitment_idx}): {ret}\") src_client.kill(f'bad proof", "= session_hash = calc_round_hash(self.last_hash, round_pubkey, round_time, all_commitments, all_components) #TODO :", "mytierpools.items(): if pool.remove(client): pool.try_move_from_queue() if self.tier_best in mytierpools: # we", "size_best: time_best = ft tier_best = t size_best = size", "True for t in client.tags: ts = self.tags[t] ts.all_ +=", "None: client.send_error(text) raise client.Disconnect class ClientThread(ClientHandlerThread): \"\"\"Basic thread per connected", "clientjob_goodbye(client, text): # a gentler goodbye than killing if text", "self.upnp, announcehost = self.announcehost) fusion.start() return len(chosen_clients) def new_client_job(self, client):", "or pool is full). start_time_spacing = 120 # But don't", "# - how long from one round's component submission to", "advanced coin anonymizer # # Copyright (C) 2020 <NAME> #", "pb.TierStatusUpdate.TierStatus(players = len(pool.pool), min_players = Params.min_clients) remtime = inftime if", "+ len(self.results) >= self.num_results: self.done_ev.set() return True class FusionController(threading.Thread, PrintError):", "pool in self.waiting_pools.items(): for c in chosen_clients: pool.remove(c) pool.try_move_from_queue() #", "timeout=None): send_pb(self.connection, pb.CovertResponse, submsg, timeout=timeout) def send_ok(self,): self.send(pb.OK(), timeout=5) def", "return ret def start_signatures(self, sighashes, pubkeys): num_inputs = len(sighashes) assert", "possible component (uncompressed p2pkh input), how many could we take", "no-longer-used tags del self.tags[t] return True def try_move_from_queue(self): # attempt", "else: in_pool = True if len(self.pool) < self.fill_threshold: self.fill_time =", "mytierpools[t] pool.add(client) if len(pool.pool) >= Params.max_clients: # pool filled up", "exited. For this reason we try to # not reference", "OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES", "msg.tags: if len(tag.id) > 20: client.error(\"Tag id too long\") if", "exceed 100 kB standard tx size limitation? max_clients = (100000", "number of players, - Choose the pool with the earliest", "server started @ {covert_server.host}:{covert_server.port} (announcing as: {annhost_b}:{annport})') begin_time = round(time.time())", "Params.ip_max_simul_fuse)] for tag in msg.tags: if len(tag.id) > 20: client.error(\"Tag", "included in all copies or substantial portions of the Software.", "reason) continue except Exception as e: self.print_error(f\"player indicated bad input", "\"\"\" This controls the Fusion rounds running from server side.", "t in client.tags: ts = self.tags[t] ts.all_ += 1 if", "in range(Params.num_components)] lock = threading.Lock() seen_salthashes = set() # Send", "many clients can share same tag (in pool and queue)", ">= size_best: if time_best is None or ft < time_best", "start_ev.is_set(): return tnow = time.monotonic() # scan through tiers and", "and we are mainnet, etc. client.error(\"This server is on a", "else: self.print_error(f\"player indicated bad input but it was fine ({outpoint})\")", "notice shall be # included in all copies or substantial", "to consider the maximum interval between messages: # - how", "smallest fusion could require 3-byte varint for both inputs and", "raise FusionError(f'Rejected client: {msg}') class CovertServer(GenericServer): \"\"\" Server for covert", "except KeyError: in_pool = False try: self.queue.remove(client) except ValueError: return", "start_ev = threading.Event() client.start_ev = start_ev if client_ip.startswith('127.'): # localhost", "how many players can they represent in the same fuse?", "### self.print_error(f\"ending covert signature acceptance. {missing_sigs} missing :{'(' if missing_sigs", "self.lock: ret = self.components del self.components return ret def start_signatures(self,", "= signatures)) return True self.sendall(pb.FusionResult(ok = False, bad_components = sorted(bad_components)))", "@ {covert_server.host}:{covert_server.port} (announcing as: {annhost_b}:{annport})') begin_time = round(time.time()) self.sendall(pb.FusionBegin(tier =", "this # will form the basis of our covert timeline.", "seen_salthashes.update(newhashes) if len(seen_salthashes) != expected_len: c.error('duplicate component commitment') if not", "but that's OK. self.sendall(pb.AllCommitments(initial_commitments = all_commitments), timeout=Protocol.TS_EXPECTING_COVERT_SIGNATURES) # Sleep until", "until end of covert components phase remtime = covert_T0 +", "len(proofs) != Params.num_components: client.error(\"wrong number of proofs\") if any(len(p) >", "can continue independently of waiting server. # self.spawned_clients.difference_update(chosen_clients) # Kick", "first connection to last possible Tor component submission? # -", "start_ev if client_ip.startswith('127.'): # localhost is whitelisted to allow unlimited", "# Sort the components & contribs list, then separate it", "delivery, # but we don't allow it to consume our", "num_components >= 0xfc: # the smallest fusion could require 3-byte", "TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN #", "covert_port = annport, covert_ssl = False, server_time = begin_time)) self.last_hash", "= None): baddies = set(self.clients).difference(goodclients) for c in baddies: c.kill(reason)", "list of signatures (which will have None at positions of", "generous with the timeout but that's OK. self.sendall(pb.AllCommitments(initial_commitments = all_commitments),", "begin_time) time.sleep(Protocol.WARMUP_TIME) # repeatedly run rounds until successful or exception", "# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT.", "in [10000, 100000, 1000000, 10000000, 100000000] for s in E12]", "ipstr, tagbytes, maxsimul): ipb = ipstr.encode() b = bytes([maxsimul, len(ipb)])", "pool.remove(c) pool.try_move_from_queue() # Update timing info self.t_last_fuse = time.monotonic() self.reset_timer()", "both inputs and outputs lists overhead = 58 min_excess_fee =", "for b in [10000, 100000, 1000000, 10000000, 100000000] for s", "have changed the favoured tier self.reset_timer() inftime = float('inf') while", "furnished to do so, # subject to the following conditions:", "inftime if pool.fill_time is not None: # a non-favoured pool", "IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED,", "time_best = None tier_best = None size_best = 0 for", "could blame the originator, however # blockchain checks are somewhat", "gentler goodbye than killing if text is not None: client.send_error(text)", "to an ephemeral port. - Before start of covert components", "def __init__(self, fill_threshold, tag_max): self.pool = set() # clients who", "raise FusionError(\"way too slow\") time.sleep(remtime) signatures = list(covert_server.end_signatures()) missing_sigs =", "- To reset the server for a new round, call", "# Await commitment messages then process results results = collector.gather(deadline", "# subject to the following conditions: # # The above", "commitment. prev_client_count = len(self.clients) self.clients = [c for c, _,", "in_pool = True if len(self.pool) < self.fill_threshold: self.fill_time = None", "object to represent a pool tag \"\"\" __slots__ = ()", "ClientTag(bytes): \"\"\" enhanced bytes object to represent a pool tag", "(self, ): self.print_error(f'Starting fusion with {len(self.clients)} players at tier={self.tier}') covert_server", "long timeout for clients to choose their pool. msg =", "# since it might be a resubmission after ack failed", "size of 1 + 5 (lokad) + 33 (session hash)", "op_return script size of 1 + 5 (lokad) + 33", "print a lot of logs noisy = False # How", "= upnp) self.round_pubkey = None def start_components(self, round_pubkey, feerate): self.components", "client): for t in client.tags: ts = self.tags.get(t) if ts", "self.last_hash = session_hash = calc_round_hash(self.last_hash, round_pubkey, round_time, all_commitments, all_components) #TODO", "calc_round_hash, gen_keypair, tx_from_components, rand_position) from .validation import (check_playercommit, check_covert_component, validate_blame,", "as a result. # Note that we could have aborted", "for c in self.clients: c.addjob(clientjob_goodbye, None) self.clients = [] #", "signed, then it's malicious behaviour! if sum((signatures[i] is not None)", "# Now, repackage the proofs according to destination. proofs_to_relay =", "for i, inp in enumerate(tx.inputs()): prevout_spenders[f\"{inp['prevout_hash']}:{inp['prevout_n']} {inp['pubkeys'][0]}\"].append(i) for prevout, spenders", "too many fuses. client.tags = [ClientTag(client_ip, b'', Params.ip_max_simul_fuse)] for tag", "t, pool in mytierpools.items(): if pool.remove(client): pool.try_move_from_queue() if self.tier_best in", "# At this point we could blame the originator, however", "between messages: # - how long from first connection to", "submsg, mtype def send(self, submsg, timeout=None): send_pb(self.connection, pb.CovertResponse, submsg, timeout=timeout)", ">= 0xfc: # the smallest fusion could require 3-byte varint", "collector = ResultsCollector(len(self.clients), done_on_fail = False) def client_get_proofs(client, collector): with", "def end_components(self): with self.lock: ret = self.components del self.components return", "earlier but this # way third parties can't abuse us", "this function might run for a while if many #", "is full). start_time_min = 400 # whether to print a", "TimeoutException from . import fusion_pb2 as pb from .comms import", "time or pool is full). start_time_spacing = 120 # But", "return tnow = time.monotonic() # scan through tiers and collect", "error') finally: covert_server.stop() for c in self.clients: c.addjob(clientjob_goodbye, None) self.clients", "Software without restriction, # including without limitation the rights to", "2.4, 2.7, 3.0, 3.3, 3.6, 3.9, 4.3, 4.7, 5.1, 5.6,", "+ Protocol.BLAME_VERIFY_TIME) # More than one blame per proof is", "take until the result would exceed 100 kB standard tx", "time') sort_key, contrib = check_covert_component(msg, round_pubkey, feerate) with self.lock: try:", "# gc clean def reset_timer(self, ): \"\"\" Scan pools for", "dest_key_idx)) if not collector.add((client, relays)): client.error(\"late proofs\") for client in", "try: annhost = covert_server.host if self.announcehost is None else self.announcehost", "against blockchain. if len(msg.blames) > len(proofs): client.error('too many blames') if", "How long covert connections are allowed to stay open without", "through tiers and collect statuses, also check start times. statuses", "threading.Lock() self.results = [] self.fails = [] def __enter__(self, ):", "self.announcehost is None else self.announcehost annhost_b = annhost.encode('ascii') annport =", "0: # really shouldn't happen, we had plenty of time", "outputs lists overhead = 58 min_excess_fee = (overhead + min_safe_clients", "from best pool, so it might not be best anymore.", "= pool.fill_time - tfill_thresh if t == self.tier_best: # this", "back to the # verifier, there is no privacy leak", "player. # we aren't collecting any results, rather just marking", "covert component acceptance. {len(component_master_list)} received.\") # Sort the components &", "return False else: in_pool = True if len(self.pool) < self.fill_threshold:", "start_signatures. - To signal the end of covert signatures phase,", "client): client.got_submit = False while True: msg, mtype = client.recv('component',", "not schnorr.has_fast_sign() or not schnorr.has_fast_verify(): raise RuntimeError(\"Fusion requires libsecp256k1\") super().__init__(bindhost,", "all clients submitted largest possible component (uncompressed p2pkh input), how", "inputs and outputs lists overhead = 62 elif min_safe_clients *", "self.announcehost = announcehost self.daemon = True def sendall(self, msg, timeout", "check_client_count(self,): live = [c for c in self.clients if not", "covert_server.port covert_server.noisy = Params.noisy covert_server.start() self.print_error(f'Covert server started @ {covert_server.host}:{covert_server.port}", "commitment list and note where each client's commitments ended up", "for the favoured fuse: - Out of the pool(s) with", "is None]) ### self.print_error(f\"ending covert signature acceptance. {missing_sigs} missing :{'('", "# without sharing components. skip_signatures = False if len(all_components) !=", "1.1, 1.2, 1.3, 1.5, 1.6, 1.8, 2.0, 2.2, 2.4, 2.7,", "except AttributeError: client.error('component submitted at wrong time') else: assert mtype", "= self.signatures[msg.which_input] except AttributeError: client.error('signature submitted at wrong time') except", "passed over to a FusionController to run the rounds.\"\"\" def", "tuple(commit for commit,ci,cj in commitment_master_list) # Send blind signatures for", "players\") raise FusionError(\"too few remaining live players\") def run (self,", "1.5, 1.8, 2.2, 2.7, 3.3, 3.9, 4.7, 5.6, 6.8, 8.2]", "a malleated version by re-signing one of their inputs. time.sleep(2)", "+ Params.start_time_min, self.t_last_fuse + Params.start_time_spacing) self.tier_best = tier_best def start_fuse(self,", "the server for a new round, call .reset(); to kill", "= None): super().__init__(name=\"FusionController\") self.network = network self.tier = tier self.clients", "same proof') # Note, the rest of this function might", "client.send(msg, timeout=timeout) def clientjob_goodbye(client, text): # a gentler goodbye than", "def start_signatures(self, sighashes, pubkeys): num_inputs = len(sighashes) assert num_inputs ==", "out the originating client's commitments). myindex = self.clients.index(client) possible_commitment_destinations =", "def __init__(self, network, tier, clients, bindhost, upnp = None, announcehost", "self.clients: c.addjob(clientjob_goodbye, None) self.clients = [] # gc def kick_missing_clients(self,", "FusionError(f'Rejected client: {msg}') class CovertServer(GenericServer): \"\"\" Server for covert submissions.", "get_current_genesis_hash(): # For now, msg.genesis_hash is optional and we tolerate", "upnp = None, announcehost = None): super().__init__(name=\"FusionController\") self.network = network", "def client_start(c, collector): with collector: c.send(pb.StartRound(round_pubkey = round_pubkey, blind_nonce_points =", "the time that we sent 'startround' message to players; this", "in commitment_master_list if ci != myindex] N = len(possible_commitment_destinations) assert", "hash) ) if min_safe_clients * num_components >= 2 * 0xfc:", "prevout_spenders = defaultdict(list) for i, inp in enumerate(tx.inputs()): prevout_spenders[f\"{inp['prevout_hash']}:{inp['prevout_n']} {inp['pubkeys'][0]}\"].append(i)", "to move clients from queue into pool moved = []", "= [c for c, _, _ in results] self.check_client_count() self.print_error(f\"got", "= time.monotonic() + Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME * 2) self.sendall(pb.RestartRound()) class", "(+- amount - fee). - Before start of covert signatures", "_ in proofs])) msg = client.recv('blames', timeout = Protocol.STANDARD_TIMEOUT +", "for clients waiting to start a fusion. New clients get", "clients submitted largest possible component (uncompressed p2pkh input), how many", "+= 1 if len(self.pool) == self.fill_threshold: self.fill_time = time.monotonic() def", "input), how many could we take until the result would", "we even have reasonable # privacy with what we have.", "from spawned clients list, so that the fusion can continue", "N = len(possible_commitment_destinations) assert N == len(all_commitments) - Params.num_components #", "is not None) for i in spenders) != 1: bad_inputs.update(spenders)", "'signature' try: sighash = self.sighashes[msg.which_input] pubkey = self.pubkeys[msg.which_input] existing_sig =", "def clientjob_send(client, msg, timeout = Protocol.STANDARD_TIMEOUT): client.send(msg, timeout=timeout) def clientjob_goodbye(client,", "len(spenders) == 1: continue self.print_error(f\"multi-spend of f{prevout} detected\") # If", "got a second submission before a new phase started. As", "please upgrade\") if msg.genesis_hash: if msg.genesis_hash != get_current_genesis_hash(): # For", "\"\"\"Server for clients waiting to start a fusion. New clients", "exactly where each commitment originated. commitment_master_list = [(commit, ci, cj)", "thread is started, the ClientThreads are passed over to a", "'' if isinstance(self.donation_address, Address): donation_address = self.donation_address.to_full_ui_string() client.send(pb.ServerHello( num_components =", "self.results.append(result) except AttributeError: return False else: if len(self.fails) + len(self.results)", "c.blind_sig_requests = msg.blind_sig_requests c.random_number_commitment = msg.random_number_commitment for client in self.clients:", "Report this back to clients # as an 'internal server", "== 0: # cleanup for no-longer-used tags del self.tags[t] return", "then there is no favoured fuse. (since fill time is", "client.kill(f'bad proof index {blame.which_proof} / {len(proofs)}') continue src_commit_blob, src_commit_client_idx, _", "try: sighash = self.sighashes[msg.which_input] pubkey = self.pubkeys[msg.which_input] existing_sig = self.signatures[msg.which_input]", "not natively offer SSL support, however a server admin may", "covert_Cpub = gen_keypair() round_pubkey = covert_Cpub # start to accept", "are almost geometrically uniform E6 = [1.0, 1.5, 2.2, 3.3,", "clients (dropped {prev_client_count - len(self.clients)})\") total_excess_fees = sum(f for _,_,f", "= scalars)) del c.blinds, c.blind_sig_requests del results, collector # Sleep", "ResultsCollector: # Collect submissions from different sources, with a deadline.", "c in self.clients: scalars = [b.sign(covert_priv, e) for b,e in", "# Send blind signatures for c in self.clients: scalars =", "(for {src_commitment_idx}): {ret}\") src_client.kill(f'bad proof (for {src_commitment_idx}): {ret}') continue if", "consider the maximum interval between messages: # - how long", "rather just marking that # 'checking finished' so that if", "results = collector.gather(deadline = time.monotonic() + Protocol.STANDARD_TIMEOUT) # Now, repackage", "ts.all_ -= 1 if in_pool: ts.pool -= 1 if ts.all_", "Notify that we will start. for c in chosen_clients: c.start_ev.set()", "finished' so that if all blames are checked, we #", "end # without sharing components. skip_signatures = False if len(all_components)", "fill_threshold, tag_max): self.pool = set() # clients who will be", "Software is furnished to do so, # subject to the", "# the smallest fusion could require 3-byte varint for either", "len(ipb)]) + ipb + tagbytes return super().__new__(cls, b) @property def", "= size if time_best is None: self.tier_best_starttime = None else:", "c, _, _ in results] self.check_client_count() self.print_error(f\"got commitments from {len(self.clients)}", "than one pool, we don't have bias towards any particular", "Record the time that we sent 'startround' message to players;", "recv_pb(self.connection, pb.CovertMessage, *expected_msg_names, timeout=timeout) return submsg, mtype def send(self, submsg,", "CovertClientThread(ClientHandlerThread): def recv(self, *expected_msg_names, timeout=None): submsg, mtype = recv_pb(self.connection, pb.CovertMessage,", "a dict of {component: contrib}, where contrib is (+- amount", "submitted at wrong time') sort_key, contrib = check_covert_component(msg, round_pubkey, feerate)", "it was fine ({outpoint})\") # At this point we could", "might be we already have this signature. This is fine", "as nginx for that purpose. \"\"\" import secrets import sys", "network, bindhost, port, upnp = None, announcehost = None, donation_address", "without limitation the rights to use, copy, modify, merge, #", "ip_max_simul_fuse = 3 # Guaranteed time to launch a fusion", "= inftime if pool.fill_time is not None: # a non-favoured", "time.sleep(remtime) component_master_list = list(covert_server.end_components().items()) self.print_error(f\"ending covert component acceptance. {len(component_master_list)} received.\")", "we don't punish him # because he's the honest guy", "dest_key_idx in relays: proofs_to_relay[dest_client_idx].append((proof, src_commitment_idx, dest_key_idx, src_client)) live_clients = len(results)", "pool.pool: continue status = pb.TierStatusUpdate.TierStatus(players = len(pool.pool), min_players = Params.min_clients)", "{ret}\") src_client.kill(f'bad proof (for {src_commitment_idx}): {ret}') continue if src_client.dead: #", "fusion. rng.shuffle(chosen_clients) fusion = FusionController(self. network, tier, chosen_clients, self.bindhost, upnp", "which may have changed the favoured tier self.reset_timer() inftime =", "largest possible component (uncompressed p2pkh input), how many could we", "phase started. As # an anti-spam measure we only allow", "time.monotonic() # scan through tiers and collect statuses, also check", "it's malicious behaviour! if sum((signatures[i] is not None) for i", "(sort_key, contrib) in component_master_list] component_contribs = [contrib for comp, (sort_key,", "self.queue = list() # clients who are waiting due to", "where contrib is (+- amount - fee). - Before start", "bytes([maxsimul, len(ipb)]) + ipb + tagbytes return super().__new__(cls, b) @property", "= ret.prev_txid[::-1].hex() + ':' + str(ret.prev_index) try: check_input_electrumx(self.network, ret) except", "mark all missing-signature components as bad. bad_inputs = set(i for", "to see if we even have reasonable # privacy with", "range(len(tx.inputs()))] pubkeys = [bytes.fromhex(inp['pubkeys'][0]) for inp in tx.inputs()] covert_server.start_signatures(sighashes,pubkeys) self.sendall(pb.ShareCovertComponents(components", "\"\"\" a waiting pool for a specific tier \"\"\" def", "try: mytierpools = {t: self.waiting_pools[t] for t in msg.tiers} except", "self.reset_timer() raise class ResultsCollector: # Collect submissions from different sources,", "check for testing -- the proof sharing thing doesn't even", "in_pool: ts.pool -= 1 if ts.all_ == 0: # cleanup", "tiers and collect statuses, also check start times. statuses =", "to DoS us by # making us check many inputs", "we only allow one submission per connection # per phase.", "client.error(res) for t in mytiers: pool = mytierpools[t] pool.add(client) if", "self.announcehost = announcehost self.donation_address = donation_address self.waiting_pools = {t: WaitingPool(Params.min_clients,", "mainnet, etc. client.error(\"This server is on a different chain, please", "< tag.limit < 6): client.error(\"Tag limit out of range\") ip", "import oregano.schnorr as schnorr from oregano.address import Address from oregano.util", "traceback): if exc_type is not None: self.fails.append(exc_value) if self.done_on_fail: self.done_ev.set()", "reason = f'{e.args[0]} ({outpoint})' self.print_error(f\"blaming[{src_commitment_idx}] for bad input: {reason}\") src_client.kill('you", "start_ev.wait(2) except: # Remove client from waiting pools on failure", "client.error(f\"Invalid tier selected: {t}\") try: mytiers = list(mytierpools) rng.shuffle(mytiers) #", "we need as an absolute minimum (for privacy)? min_safe_clients =", "e: self.print_error(f\"player indicated bad input but checking failed with exception", "# CashFusion - an advanced coin anonymizer # # Copyright", "and outputs lists overhead = 58 min_excess_fee = (overhead +", "limitation? max_clients = (100000 - 12) // (num_components * 173)", "self.feerate _ = self.components except AttributeError: client.error('component submitted at wrong", "assert remtime > 0, \"timings set up incorrectly\" time.sleep(remtime) component_master_list", "the Fusion rounds running from server side. \"\"\" def __init__(self,", "self.spawned_clients: c.got_submit = False def end_components(self): with self.lock: ret =", "pb from .comms import send_pb, recv_pb, ClientHandlerThread, GenericServer, get_current_genesis_hash from", "pools for pool in mytierpools.values(): res = pool.check_add(client) if res", "list (all commitments, but leaving out the originating client's commitments).", "marking that # 'checking finished' so that if all blames", "def kick_missing_clients(self, goodclients, reason = None): baddies = set(self.clients).difference(goodclients) for", "tags del self.tags[t] return True def try_move_from_queue(self): # attempt to", "no favoured fuse. (since fill time is a float, this", "not None) for i in spenders) != 1: bad_inputs.update(spenders) if", "!= len(msg.blames): client.error('multiple blames point to same proof') # Note,", "text is not None: client.send_error(text) raise client.Disconnect class ClientThread(ClientHandlerThread): \"\"\"Basic", "this will almost always be unique) \"\"\" with self.lock: time_best", "Event for signalling us that a pool started. start_ev =", "time (unless pool is full). start_time_min = 400 # whether", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "[] # gc def kick_missing_clients(self, goodclients, reason = None): baddies", "done_on_fail = False) def client_start(c, collector): with collector: c.send(pb.StartRound(round_pubkey =", "pool.fill_time if ft is None: continue size = len(pool.pool) if", "(the \"Software\"), to deal in the Software without restriction, #", "= len(results) collector = ResultsCollector(live_clients, done_on_fail = False) def client_get_blames(client,", "same way as client did. relays = [] for i,", "self.send(pb.OK(), timeout=5) def send_error(self, msg): self.send(pb.Error(message = msg), timeout=5) def", "in self.clients] for i, (commit, ci, cj) in enumerate(commitment_master_list): client_commit_indexes[ci][cj]", "size >= size_best: if time_best is None or ft <", "running from server side. \"\"\" def __init__(self, network, tier, clients,", "network, tier, clients, bindhost, upnp = None, announcehost = None):", "client.error(\"Mismatched protocol version, please upgrade\") if msg.genesis_hash: if msg.genesis_hash !=", "on dividing the overhead amongst players, in the smallest fusion", "after run_round has exited. For this reason we try to", "tags being full self.tags = defaultdict(TagStatus) # how are the", "self.tier_best_starttime = max(time_best + Params.start_time_min, self.t_last_fuse + Params.start_time_spacing) self.tier_best =", "by the ommission. continue assert ret, 'expecting input component' outpoint", "send_pb(self.connection, pb.CovertResponse, submsg, timeout=timeout) def send_ok(self,): self.send(pb.OK(), timeout=5) def send_error(self,", "for prevout, spenders in prevout_spenders.items(): if len(spenders) == 1: continue", "of the Software, # and to permit persons to whom", "added to pools, which may have changed the favoured tier", "client did. relays = [] for i, proof in enumerate(proofs):", "(check_playercommit, check_covert_component, validate_blame, ValidationError, check_input_electrumx) # Resistor \"E series\" values", "ret = self.results del self.results return ret def add(self, result):", "after starting any fusion, wait this long before starting the", "None: client.error(res) for t in mytiers: pool = mytierpools[t] pool.add(client)", "in mytierpools.items(): if pool.remove(client): pool.try_move_from_queue() if self.tier_best in mytierpools: #", "client in self.clients: client.addjob(clientjob_send, msg, timeout) def check_client_count(self,): live =", "with self.lock: if self.stopping or start_ev.is_set(): return tnow = time.monotonic()", "are passed over to a FusionController to run the rounds.\"\"\"", "ret = self.components del self.components return ret def start_signatures(self, sighashes,", "self.print_error(\"starting covert signature acceptance\") tx, input_indices = tx_from_components(all_components, session_hash) sighashes", "for client in self.clients: client.addjob(client_get_proofs, collector) results = collector.gather(deadline =", "from .comms import send_pb, recv_pb, ClientHandlerThread, GenericServer, get_current_genesis_hash from .protocol", "same tag\" def _add_pool(self, client): self.pool.add(client) for t in client.tags:", "ValidationError('conflicting valid signature') with self.lock: try: self.signatures[msg.which_input] = sig except", "is None else self.announcehost annhost_b = annhost.encode('ascii') annport = covert_server.port", "= [(ci,cj) for commit, ci, cj in commitment_master_list if ci", "Oregano - a lightweight Ergon client # CashFusion - an", "size if time_best is None: self.tier_best_starttime = None else: self.tier_best_starttime", "is malicious. Boot client # immediately since client may be", "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN", "This makes it slightly harder for one of the players", "= set(i for i,sig in enumerate(signatures) if sig is None)", "goodclients, reason = None): baddies = set(self.clients).difference(goodclients) for c in", "collector.gather(deadline = covert_T0 + Protocol.TS_EXPECTING_COMMITMENTS) # Filter clients who didn't", "ipstr.encode() b = bytes([maxsimul, len(ipb)]) + ipb + tagbytes return", "special time remtime = min(remtime, self.tier_best_starttime - tnow) if remtime", "def add(self, result): with self.lock: try: self.results.append(result) except AttributeError: return", "size, so start immediately self.start_fuse(t) return # we have added", "this permission notice shall be # included in all copies", "False # How long covert connections are allowed to stay", "in spenders) != 1: bad_inputs.update(spenders) if bad_inputs: bad_components.update(input_indices[i] for i", "over to a FusionController to run the rounds.\"\"\" def __init__(self,", "= [b.get_R() for b in c.blinds], server_time = round_time ))", "self.fill_time = None # when did pool exceed fill_threshold self.tag_max", "self[0] class TagStatus: __slots__ = ('pool', 'all_') def __init__(self): self.pool", "many clients with same tag\" def _add_pool(self, client): self.pool.add(client) for", "__init__(self): self.pool = 0 self.all_ = 0 class WaitingPool: \"\"\"", "Protocol.STANDARD_TIMEOUT): for client in self.clients: client.addjob(clientjob_send, msg, timeout) def check_client_count(self,):", "baddies: c.kill(reason) def run_round(self, covert_server): covert_priv, covert_Upub, covert_Cpub = gen_keypair()", "clients can share same tag (in pool and queue) def", "all_commitments = tuple(commit for commit,ci,cj in commitment_master_list) # Send blind", "= sig.hex() + '41' assert tx.is_complete() txid = tx.txid() self.print_error(\"completed", "= self.tags[t] ts.all_ -= 1 if in_pool: ts.pool -= 1", "short time (unless pool is full). start_time_min = 400 #", "{len(self.clients)} clients (dropped {prev_client_count - len(self.clients)})\") total_excess_fees = sum(f for", "class CovertClientThread(ClientHandlerThread): def recv(self, *expected_msg_names, timeout=None): submsg, mtype = recv_pb(self.connection,", "sig) in enumerate(zip(tx.inputs(), signatures)): inp['signatures'][0] = sig.hex() + '41' assert", "time raise FusionError(\"way too slow\") time.sleep(remtime) signatures = list(covert_server.end_signatures()) missing_sigs", "= [bytes.fromhex(inp['pubkeys'][0]) for inp in tx.inputs()] covert_server.start_signatures(sighashes,pubkeys) self.sendall(pb.ShareCovertComponents(components = all_components,", "# we left from best pool, so it might not", "for blame in msg.blames: try: encproof, src_commitment_idx, dest_key_idx, src_client =", "1 if ts.all_ == 0: # cleanup for no-longer-used tags", "_, _ in results] self.check_client_count() self.print_error(f\"got commitments from {len(self.clients)} clients", "waiting to start a fusion. New clients get a ClientThread", "# Do some preliminary checks to see whether we should", "remtime = covert_T0 + Protocol.TS_EXPECTING_COVERT_SIGNATURES - time.monotonic() if remtime <", ".comms import send_pb, recv_pb, ClientHandlerThread, GenericServer, get_current_genesis_hash from .protocol import", "indicated bad input but checking failed with exception {repr(e)} ({outpoint})\")", "class Params: num_components = 23 component_feerate = 1000 # sats/kB", "one blame per proof is malicious. Boot client # immediately", "del self.signatures return ret def reset(self): try: del self.round_pubkey del", "inputs. time.sleep(2) self.sendall(pb.FusionResult(ok = True, txsignatures = signatures)) return True", "= self.tags[t] ts.all_ += 1 if ts.pool >= t.maxsimul: can_pool", "missing_sigs else ')'}\") # mark all missing-signature components as bad.", "2 * 0xfc: # the smallest fusion could require 3-byte", "from oregano.address import Address from oregano.util import PrintError, ServerError, TimeoutException", "go directly to blame, or maybe even restart / end", "client_commit_indexes[ci][cj] = i collector = ResultsCollector(len(self.clients), done_on_fail = False) def", "msg, timeout = Protocol.STANDARD_TIMEOUT): for client in self.clients: client.addjob(clientjob_send, msg,", "just skip the # signing phase and go directly to", "the pool has stayed at or above min_clients for this", "= False) def client_get_proofs(client, collector): with collector: msg = client.recv('myproofslist')", "dest_key_idx = possible_commitment_destinations[rand_position(seed, N, i)] src_commitment_idx = client_commit_indexes[myindex][i] relays.append((proof, src_commitment_idx,", "client is already dead, don't waste more time. # Since", "bad input: ' + reason) continue except Exception as e:", "fill time; - If no pools are filled then there", "indeed disallow them connecting if they are e.g. on testnet", "maximum size, so start immediately self.start_fuse(t) return # we have", "results, rather just marking that # 'checking finished' so that", "could require 3-byte varint for both inputs and outputs lists", "if not schnorr.verify(pubkey, sig, sighash): raise ValidationError('bad transaction signature') if", "checks against blockchain need to be done, perhaps even still", "= None size_best = 0 for t, pool in self.waiting_pools.items():", "if the client declares the genesis_hash, we # do indeed", "in self.clients: c.kill('blame yourself!') return # scan the commitment list", "ValidationError, check_input_electrumx) # Resistor \"E series\" values -- round numbers", "msg): self.send(pb.Error(message = msg), timeout=Protocol.STANDARD_TIMEOUT) def error(self, msg): self.send_error(msg) raise", "!= 1: bad_inputs.update(spenders) if bad_inputs: bad_components.update(input_indices[i] for i in bad_inputs)", "timeout=Protocol.STANDARD_TIMEOUT): send_pb(self.connection, pb.ServerMessage, submsg, timeout=timeout) def send_error(self, msg): self.send(pb.Error(message =", "missing :{'(' if missing_sigs else ')'}\") # mark all missing-signature", "tier selected: {t}\") try: mytiers = list(mytierpools) rng.shuffle(mytiers) # shuffle", "and all the other components were # just imposters who", "end_components(self): with self.lock: ret = self.components del self.components return ret", "= () def __new__(cls, ipstr, tagbytes, maxsimul): ipb = ipstr.encode()", "self.bindhost, upnp = self.upnp, announcehost = self.announcehost) fusion.start() return len(chosen_clients)", "statuses = dict() tfill_thresh = tnow - Params.start_time_max for t,", "t in Params.tiers} self.t_last_fuse = time.monotonic() # when the last", "bad_inputs: bad_components.update(input_indices[i] for i in bad_inputs) else: for i, (inp,", "Launch the server at any time. By default, will bind", "a short time (unless pool is full). start_time_min = 400", "chosen_clients = list(self.waiting_pools[tier].pool) # Notify that we will start. for", "= [] self.fails = [] def __enter__(self, ): return self", "few remaining live players\") raise FusionError(\"too few remaining live players\")", "of {component: contrib}, where contrib is (+- amount - fee).", "(which will have None at positions of missing signatures). -", "sig != existing_sig: if not schnorr.verify(pubkey, sig, sighash): raise ValidationError('bad", "= 58 min_excess_fee = (overhead + min_safe_clients - 1) //", "waiting pools on failure (on success, we are already removed;", "self.fails = [] def __enter__(self, ): return self def __exit__(self,", "[b.sign(covert_priv, e) for b,e in zip(c.blinds, c.blind_sig_requests)] c.addjob(clientjob_send, pb.BlindSigResponses(scalars =", "= 6 # Choose the minimum excess fee based on", "for c in self.clients: c.blinds = [schnorr.BlindSigner() for _co in", "outputs to see if we even have reasonable # privacy", "WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE,", "Send start message to players; record the time we did", "[c for c in self.clients if not c.dead] self.check_client_count() if", "if remtime < 0: # really shouldn't happen, we had", "client.random_number_commitment: client.error(\"seed did not match commitment\") proofs = msg.encrypted_proofs if", "possible_commitment_destinations = [(ci,cj) for commit, ci, cj in commitment_master_list if", "dest_key_idx, src_client = proofs[blame.which_proof] except IndexError: client.kill(f'bad proof index {blame.which_proof}", "as e: self.print_error(f\"player indicated bad input but checking failed with", "= annport, covert_ssl = False, server_time = begin_time)) self.last_hash =", "src_commit_blob, src_commit_client_idx, _ = commitment_master_list[src_commitment_idx] dest_commit_blob = all_commitments[client_commit_indexes[myindex][dest_key_idx]] try: ret", "# included in all copies or substantial portions of the", "signatures. This makes it slightly harder for one of the", "& contribs list, then separate it out. component_master_list.sort(key=lambda x:x[1][0]) all_components", "= ft tier_best = t size_best = size if time_best", "hereby granted, free of charge, to any person # obtaining", "bad blame; clamed reason was: \"+repr(blame.blame_reason)) client.kill(f'bad blame message: {e}", "a placeholder, set this to startup time. self.reset_timer() def run(self):", "sighash): raise ValidationError('bad transaction signature') if existing_sig: # We received", "chosen destinations, same way as client did. relays = []", "to blame, or maybe even restart / end # without", "to use, copy, modify, merge, # publish, distribute, sublicense, and/or", "A basic server implementation for CashFusion. Does not natively offer", "if ts.pool >= t.maxsimul: break else: self._add_pool(client) moved.append(client) for client", "= 1200 # Inter-fusion delay -- after starting any fusion,", "calculate the randomly chosen destinations, same way as client did.", "= threading.Event() client.start_ev = start_ev if client_ip.startswith('127.'): # localhost is", "# scan through tiers and collect statuses, also check start", ">= t.maxsimul: can_pool = False if can_pool: self._add_pool(client) else: self.queue.append(client)", "Filter clients who didn't manage to give a good commitment.", "the end of covert components phase, owner calls end_components, which", "with self.lock: try: self.components[msg.component] = (sort_key, contrib) except AttributeError: client.error('component", "leak by the ommission. continue assert ret, 'expecting input component'", "clients do we want before starting a fusion? min_clients =", "= network self.announcehost = announcehost self.donation_address = donation_address self.waiting_pools =", "del self.sighashes del self.pubkeys except AttributeError: pass def new_client_job(self, client):", "remember exactly where each commitment originated. commitment_master_list = [(commit, ci,", "given tier (if more try to join, reject) max_tier_client_tags =", "detected: excess fee mismatch\") self.last_hash = session_hash = calc_round_hash(self.last_hash, round_pubkey,", "client in self.queue: for t in client.tags: ts = self.tags[t]", "switch servers\") else: client.print_error(\"👀 No genesis hash declared by client,", "+ min_safe_clients - 1) // min_safe_clients # How many clients", "round's component submission to the next round's component submission? COVERT_CLIENT_TIMEOUT", "list() # clients who are waiting due to tags being", "clientjob_send(client, msg, timeout = Protocol.STANDARD_TIMEOUT): client.send(msg, timeout=timeout) def clientjob_goodbye(client, text):", "pool.add(client) if len(pool.pool) >= Params.max_clients: # pool filled up to", "further, search for duplicated inputs (through matching the prevout and", "with self.lock: ret = self.results del self.results return ret def", "checks are somewhat subjective. It would be # appropriate to", ".reset(); to kill all connections, call .stop(). \"\"\" def __init__(self,", "sum(f for _,_,f in results) # Generate scrambled commitment list,", "scan through tiers and collect statuses, also check start times.", "signature. This is not # allowed and we break the", "players to # broadcast a malleated version by re-signing one", "(0 < tag.limit < 6): client.error(\"Tag limit out of range\")", "it might be a resubmission after ack failed delivery, #", "Params.min_clients) remtime = inftime if pool.fill_time is not None: #", "= len(possible_commitment_destinations) assert N == len(all_commitments) - Params.num_components # calculate", "reasonable # privacy with what we have. bad_components = set()", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER", "use_cache = True)))) for i in range(len(tx.inputs()))] pubkeys = [bytes.fromhex(inp['pubkeys'][0])", "checked, we # can start next round right away. collector.add(None)", "not schnorr.has_fast_verify(): raise RuntimeError(\"Fusion requires libsecp256k1\") super().__init__(bindhost, port, ClientThread, upnp", "CovertServer(self.bindhost, upnp = self.upnp) try: annhost = covert_server.host if self.announcehost", "for them, and they are put into the waiting pools.", "submission. raise ValidationError('conflicting valid signature') with self.lock: try: self.signatures[msg.which_input] =", "client_commit_indexes = [[None]*Params.num_components for _ in self.clients] for i, (commit,", "_ in self.clients] for src_client, relays in results: for proof,", "the waiting pools. Once a Fusion thread is started, the", "connection as a result. # Note that we could have", "def __exit__(self, exc_type, exc_value, traceback): if exc_type is not None:", "one of their inputs. time.sleep(2) self.sendall(pb.FusionResult(ok = True, txsignatures =", "KeyError: in_pool = False try: self.queue.remove(client) except ValueError: return False", "so that the fusion can continue independently of waiting server.", "Every round, clients leave ... How many clients do we", "return False else: if len(self.fails) + len(self.results) >= self.num_results: self.done_ev.set()", "Default tag: this IP cannot be present in too many", "collector = ResultsCollector(live_clients, done_on_fail = False) def client_get_blames(client, myindex, proofs,", "a FusionController to run the rounds.\"\"\" def __init__(self, config, network,", "import defaultdict import oregano.schnorr as schnorr from oregano.address import Address", "self.t_last_fuse + Params.start_time_spacing) self.tier_best = tier_best def start_fuse(self, tier): \"\"\"", "that if filling more than one pool, we don't have", "tolerate it # missing. However, if the client declares the", "started @ {covert_server.host}:{covert_server.port} (announcing as: {annhost_b}:{annport})') begin_time = round(time.time()) self.sendall(pb.FusionBegin(tier", "c in live: c.kill(\"too few remaining live players\") raise FusionError(\"too", "copyright notice and this permission notice shall be # included", "one of the inputs is signed, we don't punish him", "Copyright (C) 2020 <NAME> # # Permission is hereby granted,", "self.tag_max = tag_max # how many clients can share same", "clamed reason was: \"+repr(blame.blame_reason)) client.kill(f'bad blame message: {e} (you claimed:", "def __new__(cls, ipstr, tagbytes, maxsimul): ipb = ipstr.encode() b =", "- 12) // (num_components * 173) # Every round, clients", "return super().__new__(cls, b) @property def maxsimul(self): return self[0] class TagStatus:", "for p in proofs): client.error(\"too-long proof\") # they should only", "signature acceptance\") self.sendall(pb.ShareCovertComponents(components = all_components, skip_signatures = True)) else: self.print_error(\"starting", "timeline. covert_T0 = time.monotonic() self.print_error(f\"startround sent at {time.time()}; accepting covert", "if msg.genesis_hash != get_current_genesis_hash(): # For now, msg.genesis_hash is optional", "= all_commitments), timeout=Protocol.TS_EXPECTING_COVERT_SIGNATURES) # Sleep until end of covert components", "fuse happened; as a placeholder, set this to startup time.", "self.fill_threshold = fill_threshold # minimum number of pool clients to", "with self.lock: try: self.signatures[msg.which_input] = sig except AttributeError: client.error('signature submitted", "if pool.fill_time is not None: # a non-favoured pool will", "# verifier, there is no privacy leak by the ommission.", "donation_address = self.donation_address.to_full_ui_string() client.send(pb.ServerHello( num_components = Params.num_components, component_feerate = Params.component_feerate,", "{len(proofs)}') continue src_commit_blob, src_commit_client_idx, _ = commitment_master_list[src_commitment_idx] dest_commit_blob = all_commitments[client_commit_indexes[myindex][dest_key_idx]]", "and claimed pubkey). prevout_spenders = defaultdict(list) for i, inp in", "donation_address self.waiting_pools = {t: WaitingPool(Params.min_clients, Params.max_tier_client_tags) for t in Params.tiers}", "this long before starting the next one (unless hit max", "overhead numbers assume op_return script size of 1 + 5", "annhost_b, annport, False, begin_time) time.sleep(Protocol.WARMUP_TIME) # repeatedly run rounds until", "did this round_time = round(time.time()) collector = ResultsCollector(len(self.clients), done_on_fail =", "to see whether we should just skip the # signing", "- Out of the pool(s) with the most number of", "of range\") ip = '' if tag.no_ip else client_ip client.tags.append(ClientTag(ip,", "return # we have added to pools, which may have", "to clients # as an 'internal server error'. raise else:", "return submsg, mtype def send(self, submsg, timeout=None): send_pb(self.connection, pb.CovertResponse, submsg,", "how many could we take until the result would exceed", "get_current_genesis_hash from .protocol import Protocol from .util import (FusionError, sha256,", "size > size_best: time_best = ft tier_best = t size_best", "any results, rather just marking that # 'checking finished' so", "* 0xfc: # the smallest fusion could require 3-byte varint", "clients from all pools for t, pool in self.waiting_pools.items(): for", "in prevout_spenders.items(): if len(spenders) == 1: continue self.print_error(f\"multi-spend of f{prevout}", "of 1 + 5 (lokad) + 33 (session hash) )", "self.lock: chosen_clients = list(self.waiting_pools[tier].pool) # Notify that we will start.", "len(seen_salthashes) != expected_len: c.error('duplicate component commitment') if not collector.add((c, msg.initial_commitments,", "= dict() self.feerate = feerate self.round_pubkey = round_pubkey for c", "= [c for c in self.clients if not c.dead] if", "natively offer SSL support, however a server admin may run", "continue except Exception as e: self.print_error(f\"player indicated bad input but", "if len(all_components) != len(self.clients)*Params.num_components: skip_signatures = True self.print_error(\"problem detected: too", "self.print_error(f\"got commitments from {len(self.clients)} clients (dropped {prev_client_count - len(self.clients)})\") total_excess_fees", "c.addjob(clientjob_goodbye, 'internal server error') finally: covert_server.stop() for c in self.clients:", "to find out the # timing of a given input's", "self.send(pb.Error(message = msg), timeout=5) def error(self, msg): self.send_error(msg) raise FusionError(f'Rejected", "of the players to # broadcast a malleated version by", "ci, cj in commitment_master_list if ci != myindex] N =", "was successful!\") # Give our transaction a small head start", "If exactly one of the inputs is signed, we don't", "correlations about which client sent which proof proofs.sort(key = lambda", "a ClientThread made for them, and they are put into", "7.5, 8.2, 9.1] # TODO - make these configurable class", "network assert isinstance(donation_address, (Address, type(None))) if not schnorr.has_fast_sign() or not", "for testing -- the proof sharing thing doesn't even make", "!= 64: raise ValidationError('signature length is wrong') # It might", "def send_error(self, msg): self.send(pb.Error(message = msg), timeout=5) def error(self, msg):", "time.monotonic() + Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME * 2) self.sendall(pb.RestartRound()) class CovertClientThread(ClientHandlerThread):", "- time.monotonic() self.done_ev.wait(max(0., remtime)) with self.lock: ret = self.results del", "self.components return ret def start_signatures(self, sighashes, pubkeys): num_inputs = len(sighashes)", "PrintError): \"\"\" This controls the Fusion rounds running from server", "numbers that are almost geometrically uniform E6 = [1.0, 1.5,", "off the fusion. rng.shuffle(chosen_clients) fusion = FusionController(self. network, tier, chosen_clients,", "how are the various tags self.fill_threshold = fill_threshold # minimum", "time import traceback from collections import defaultdict import oregano.schnorr as", "signatures)): inp['signatures'][0] = sig.hex() + '41' assert tx.is_complete() txid =", "'internal server error') finally: covert_server.stop() for c in self.clients: c.addjob(clientjob_goodbye,", "we did this round_time = round(time.time()) collector = ResultsCollector(len(self.clients), done_on_fail", "More than one blame per proof is malicious. Boot client", "= ResultsCollector(len(self.clients), done_on_fail = False) def client_start(c, collector): with collector:", "tx_from_components, rand_position) from .validation import (check_playercommit, check_covert_component, validate_blame, ValidationError, check_input_electrumx)", "relays in results: for proof, src_commitment_idx, dest_client_idx, dest_key_idx in relays:", "collector): with collector: c.send(pb.StartRound(round_pubkey = round_pubkey, blind_nonce_points = [b.get_R() for", "# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT", "result would exceed 100 kB standard tx size limitation? max_clients", "towards any particular tier with self.lock: if self.stopping: return #", "reason we try to # not reference self.<variables> that may", "Software, # and to permit persons to whom the Software", "timeout=5) def error(self, msg): self.send_error(msg) raise FusionError(f'Rejected client: {msg}') class", "dict of {component: contrib}, where contrib is (+- amount -", "after this point can report back to the # verifier,", "misconfigured?\") # This probably indicates misconfiguration since fusion server ought", "port. - Before start of covert components phase, call start_components.", "put into the waiting pools. Once a Fusion thread is", "a pool started. start_ev = threading.Event() client.start_ev = start_ev if", "and this permission notice shall be # included in all", "client.tags: ts = self.tags[t] ts.pool += 1 if len(self.pool) ==", "- a lightweight Ergon client # CashFusion - an advanced", "for both inputs and outputs lists overhead = 58 min_excess_fee", "0: client.error(\"No tiers\") if len(msg.tags) > 5: client.error(\"Too many tags\")", "input_indices = tx_from_components(all_components, session_hash) sighashes = [sha256(sha256(bytes.fromhex(tx.serialize_preimage(i, 0x41, use_cache =", "outputs lists overhead = 62 elif min_safe_clients * num_components >=", "'all_') def __init__(self): self.pool = 0 self.all_ = 0 class", "proofs = msg.encrypted_proofs if len(proofs) != Params.num_components: client.error(\"wrong number of", "// (num_components * 173) # Every round, clients leave ...", "(session hash) ) if min_safe_clients * num_components >= 2 *", "this client to waiting pools for pool in mytierpools.values(): res", "\"\"\"Basic thread per connected client.\"\"\" def recv(self, *expected_msg_names, timeout=Protocol.STANDARD_TIMEOUT): submsg,", "Params.num_components # calculate the randomly chosen destinations, same way as", "to print a lot of logs noisy = False #", "threading.Event() self.lock = threading.Lock() self.results = [] self.fails = []", "send_pb, recv_pb, ClientHandlerThread, GenericServer, get_current_genesis_hash from .protocol import Protocol from", "self.print_error(f\"player indicated bad input but checking failed with exception {repr(e)}", "client.error(\"seed did not match commitment\") proofs = msg.encrypted_proofs if len(proofs)", "self.clients] for i, (commit, ci, cj) in enumerate(commitment_master_list): client_commit_indexes[ci][cj] =", "_ = collector.gather(deadline = time.monotonic() + Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME *", "mtype def send(self, submsg, timeout=None): send_pb(self.connection, pb.CovertResponse, submsg, timeout=timeout) def", "person # obtaining a copy of this software and associated", "delay -- after starting any fusion, wait this long before", "tx.is_complete() txid = tx.txid() self.print_error(\"completed the transaction! \" + txid)", "either inputs or outputs lists overhead = 60 else: #", "calling this try: self.pool.remove(client) except KeyError: in_pool = False try:", "while if many # checks against blockchain need to be", "pool in mytierpools.items(): if pool.remove(client): pool.try_move_from_queue() if self.tier_best in mytierpools:", "i in spenders) != 1: bad_inputs.update(spenders) if bad_inputs: bad_components.update(input_indices[i] for", "self.print_error(f\"ending covert signature acceptance. {missing_sigs} missing :{'(' if missing_sigs else", "= self.results del self.results return ret def add(self, result): with", "def sendall(self, msg, timeout = Protocol.STANDARD_TIMEOUT): for client in self.clients:", "match commitment\") proofs = msg.encrypted_proofs if len(proofs) != Params.num_components: client.error(\"wrong", "*, deadline): remtime = deadline - time.monotonic() self.done_ev.wait(max(0., remtime)) with", "self.sendall(pb.FusionBegin(tier = self.tier, covert_domain = annhost_b, covert_port = annport, covert_ssl", "for i, proof in enumerate(proofs): dest_client_idx, dest_key_idx = possible_commitment_destinations[rand_position(seed, N,", "in-place sort by source commitment idx removes ordering correlations about", "At this point we could blame the originator, however #", "for no-longer-used tags del self.tags[t] return True def try_move_from_queue(self): #", "config self.network = network self.announcehost = announcehost self.donation_address = donation_address", "c in self.clients if not c.dead] if len(live) < Params.min_safe_clients:", "for idx, (client, proofs) in enumerate(zip(self.clients, proofs_to_relay)): client.addjob(client_get_blames, idx, proofs,", "inputs or outputs lists overhead = 60 else: # the", ">= t.maxsimul: break else: self._add_pool(client) moved.append(client) for client in moved:", "many fuses. client.tags = [ClientTag(client_ip, b'', Params.ip_max_simul_fuse)] for tag in", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN", "be best anymore. self.reset_timer() raise class ResultsCollector: # Collect submissions", "repeatedly run rounds until successful or exception while True: covert_server.reset()", "t in mytiers: pool = mytierpools[t] pool.add(client) if len(pool.pool) >=", "def __init__(self): self.pool = 0 self.all_ = 0 class WaitingPool:", "tfill_thresh = tnow - Params.start_time_max for t, pool in mytierpools.items():", "attempt to move clients from queue into pool moved =", "Fusion thread is started, the ClientThreads are passed over to", "# minimum number of pool clients to trigger setting fill_time", "import fusion_pb2 as pb from .comms import send_pb, recv_pb, ClientHandlerThread,", "head start in relaying, before sharing the # signatures. This", "__exit__(self, exc_type, exc_value, traceback): if exc_type is not None: self.fails.append(exc_value)", "ret def start_signatures(self, sighashes, pubkeys): num_inputs = len(sighashes) assert num_inputs", "after calling this try: self.pool.remove(client) except KeyError: in_pool = False", "messages: # - how long from first connection to last", "= c.recv('playercommit') commit_messages = check_playercommit(msg, Params.min_excess_fee, Params.max_excess_fee, Params.num_components) newhashes =", "announcehost = None): super().__init__(name=\"FusionController\") self.network = network self.tier = tier", "src_commitment_idx, dest_client_idx, dest_key_idx in relays: proofs_to_relay[dest_client_idx].append((proof, src_commitment_idx, dest_key_idx, src_client)) live_clients", "= ResultsCollector(live_clients, done_on_fail = False) def client_get_blames(client, myindex, proofs, collector):", "don't punish him # because he's the honest guy and", "defaultdict(TagStatus) # how are the various tags self.fill_threshold = fill_threshold", "len([s for s in signatures if s is None]) ###", "# to have a good connection to the EC server.", "honest guy and all the other components were # just", "try: mytiers = list(mytierpools) rng.shuffle(mytiers) # shuffle the adding order", "def send(self, submsg, timeout=Protocol.STANDARD_TIMEOUT): send_pb(self.connection, pb.ServerMessage, submsg, timeout=timeout) def send_error(self,", "# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "sig, sighash): raise ValidationError('bad transaction signature') if existing_sig: # We", "if self.announcehost is None else self.announcehost annhost_b = annhost.encode('ascii') annport", "minimum (for privacy)? min_safe_clients = 6 # Choose the minimum", "self.queue.remove(client) except ValueError: return False else: in_pool = True if", "the favoured fuse: - Out of the pool(s) with the", "== self.tier_best: # this is the favoured pool, can start", "= self.round_pubkey feerate = self.feerate _ = self.components except AttributeError:", "check_covert_component(msg, round_pubkey, feerate) with self.lock: try: self.components[msg.component] = (sort_key, contrib)", "[contrib for comp, (sort_key, contrib) in component_master_list] del component_master_list #", "any particular tier with self.lock: if self.stopping: return # add", "\"\"\" with self.lock: time_best = None tier_best = None size_best", "time_best = ft tier_best = t size_best = size if", "FusionController to run the rounds.\"\"\" def __init__(self, config, network, bindhost,", "are filled then there is no favoured fuse. (since fill", "if time_best is None or ft < time_best or size", "copies or substantial portions of the Software. # # THE", "client.tags = [ClientTag(client_ip, b'', Params.ip_max_simul_fuse)] for tag in msg.tags: if", "self.print_error(\"problem detected: excess fee mismatch\") self.last_hash = session_hash = calc_round_hash(self.last_hash,", "x,y,z, _ in proofs])) msg = client.recv('blames', timeout = Protocol.STANDARD_TIMEOUT", ") if min_safe_clients * num_components >= 2 * 0xfc: #", "rounds until successful or exception while True: covert_server.reset() # Clean", "submissions. How it works: - Launch the server at any", "end_signatures(self): with self.lock: ret = self.signatures del self.signatures return ret", "do we need as an absolute minimum (for privacy)? min_safe_clients", "# an anti-spam measure we only allow one submission per", "cj,commit in enumerate(commitments)] rng.shuffle(commitment_master_list) all_commitments = tuple(commit for commit,ci,cj in", "is fine # since it might be a resubmission after", "started at this tier self.queue = list() # clients who", "+ len(getattr(self, 'results', ())) >= self.num_results: self.done_ev.set() def gather(self, *,", "= donation_address )) # We allow a long timeout for", "for s in E12] # How many clients do we", "# signed, then it's malicious behaviour! if sum((signatures[i] is not", "None for t in client.tags: ts = self.tags[t] ts.all_ -=", "can share same tag (in pool and queue) def check_add(self,", "time is a float, this will almost always be unique)", "Kick off the fusion. rng.shuffle(chosen_clients) fusion = FusionController(self. network, tier,", "re-signing one of their inputs. time.sleep(2) self.sendall(pb.FusionResult(ok = True, txsignatures", "see if we even have reasonable # privacy with what", "2: # Sanity check for testing -- the proof sharing", "of the pool(s) with the most number of players, -", "return self[0] class TagStatus: __slots__ = ('pool', 'all_') def __init__(self):", "(C) 2020 <NAME> # # Permission is hereby granted, free", "one (unless hit max time or pool is full). start_time_spacing", "= True self.print_error(\"problem detected: excess fee mismatch\") self.last_hash = session_hash", "= None # when did pool exceed fill_threshold self.tag_max =", "one round's component submission to the next round's component submission?", "c.send(pb.StartRound(round_pubkey = round_pubkey, blind_nonce_points = [b.get_R() for b in c.blinds],", "fill time is a float, this will almost always be", "round_pubkey, blind_nonce_points = [b.get_R() for b in c.blinds], server_time =", "preliminary checks to see whether we should just skip the", "hit max time or pool is full). start_time_spacing = 120", "self.pool = set() # clients who will be put into", "plenty of time raise FusionError(\"way too slow\") time.sleep(remtime) signatures =", "def error(self, msg): self.send_error(msg) raise FusionError(f'Rejected client: {msg}') class CovertServer(GenericServer):", "valid signature') with self.lock: try: self.signatures[msg.which_input] = sig except AttributeError:", "in the same fuse? ip_max_simul_fuse = 3 # Guaranteed time", "else: self.queue.append(client) return can_pool def remove(self, client): # make sure", "CPU power. if sig != existing_sig: if not schnorr.verify(pubkey, sig,", "int(num_results) self.done_on_fail = bool(done_on_fail) self.done_ev = threading.Event() self.lock = threading.Lock()", "schnorr from oregano.address import Address from oregano.util import PrintError, ServerError,", "input but checking failed with exception {repr(e)} ({outpoint})\") else: self.print_error(f\"player", "round(time.time()) collector = ResultsCollector(len(self.clients), done_on_fail = False) def client_start(c, collector):", "upnp) self.round_pubkey = None def start_components(self, round_pubkey, feerate): self.components =", "t in client.tags: ts = self.tags.get(t) if ts is not", "= statuses)) start_ev.wait(2) except: # Remove client from waiting pools", "comp, (sort_key, contrib) in component_master_list] component_contribs = [contrib for comp,", "# Event for signalling us that a pool started. start_ev", "bit generous with the timeout but that's OK. self.sendall(pb.AllCommitments(initial_commitments =", "== 1: continue self.print_error(f\"multi-spend of f{prevout} detected\") # If exactly", "client's commitments). myindex = self.clients.index(client) possible_commitment_destinations = [(ci,cj) for commit,", "an anti-spam measure we only allow one submission per connection", "import (check_playercommit, check_covert_component, validate_blame, ValidationError, check_input_electrumx) # Resistor \"E series\"", "if the pool has stayed at or above min_clients for", "long from one round's component submission to the next round's", "[round(b*s) for b in [10000, 100000, 1000000, 10000000, 100000000] for", "to pools, which may have changed the favoured tier self.reset_timer()", "more time. # Since nothing after this point can report", "overhead = 58 min_excess_fee = (overhead + min_safe_clients - 1)", "Params.min_excess_fee, max_excess_fee = Params.max_excess_fee, tiers = Params.tiers, donation_address = donation_address", "selected: {t}\") try: mytiers = list(mytierpools) rng.shuffle(mytiers) # shuffle the", "for t, pool in mytierpools.items(): if pool.remove(client): pool.try_move_from_queue() if self.tier_best", "\"\"\" with self.lock: chosen_clients = list(self.waiting_pools[tier].pool) # Notify that we", "wrong time') else: assert mtype == 'signature' try: sighash =", "probably indicates misconfiguration since fusion server ought # to have", "schnorr.has_fast_verify(): raise RuntimeError(\"Fusion requires libsecp256k1\") super().__init__(bindhost, port, ClientThread, upnp =", "same tag on a given tier (if more try to", "traceback from collections import defaultdict import oregano.schnorr as schnorr from", "E12] # How many clients do we want before starting", "is (+- amount - fee). - Before start of covert", "client not in pool.pool: continue status = pb.TierStatusUpdate.TierStatus(players = len(pool.pool),", "pb.CovertMessage, *expected_msg_names, timeout=timeout) return submsg, mtype def send(self, submsg, timeout=None):", "proof proofs.sort(key = lambda x:x[1]) client.send(pb.TheirProofsList(proofs = [ dict(encrypted_proof=x, src_commitment_idx=y,", "_,_,f in results) # Generate scrambled commitment list, but remember", "any fusion, wait this long before starting the next one", "the Software is furnished to do so, # subject to", "= Params.max_excess_fee, tiers = Params.tiers, donation_address = donation_address )) #", "have added to pools, which may have changed the favoured", "> 0: time.sleep(remtime) # Upload the full commitment list; we're", "time.sleep(Protocol.WARMUP_TIME) # repeatedly run rounds until successful or exception while", "tag_max): self.pool = set() # clients who will be put", "server implementation for CashFusion. Does not natively offer SSL support,", "covert signature acceptance\") tx, input_indices = tx_from_components(all_components, session_hash) sighashes =", "back to clients # as an 'internal server error'. raise", "any time. By default, will bind to an ephemeral port.", "signature') with self.lock: try: self.signatures[msg.which_input] = sig except AttributeError: client.error('signature", "Does not natively offer SSL support, however a server admin", "__init__(self, config, network, bindhost, port, upnp = None, announcehost =", "PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE", "EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE", "genesis_hash, we # do indeed disallow them connecting if they", "self.waiting_pools.items(): for c in chosen_clients: pool.remove(c) pool.try_move_from_queue() # Update timing", "try: self.components[msg.component] = (sort_key, contrib) except AttributeError: client.error('component submitted at", "else ')'}\") # mark all missing-signature components as bad. bad_inputs", "publish, distribute, sublicense, and/or sell copies of the Software, #", "in too many fuses. client.tags = [ClientTag(client_ip, b'', Params.ip_max_simul_fuse)] for", "173) # Every round, clients leave ... How many clients", "fine # since it might be a resubmission after ack", "timeout = Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME) # More than one blame", "doing this. remtime = covert_T0 + Protocol.T_START_COMPS - time.monotonic() if", "= None, announcehost = None): super().__init__(name=\"FusionController\") self.network = network self.tier", "' + reason) continue except Exception as e: self.print_error(f\"player indicated", "their pool. msg = client.recv('joinpools', timeout=120) if len(msg.tiers) == 0:", "port, ClientThread, upnp = upnp) self.config = config self.network =", "the connection as a result. # Note that we could", "# privacy with what we have. bad_components = set() ###", "def reset(self): try: del self.round_pubkey del self.components del self.feerate except", "upnp = upnp) self.round_pubkey = None def start_components(self, round_pubkey, feerate):", "False else: if len(self.fails) + len(self.results) >= self.num_results: self.done_ev.set() return", "# timing of a given input's signature submission. raise ValidationError('conflicting", "self.fails.append(exc_value) if self.done_on_fail: self.done_ev.set() elif len(self.fails) + len(getattr(self, 'results', ()))", "destination. proofs_to_relay = [list() for _ in self.clients] for src_client,", "signatures for c in self.clients: scalars = [b.sign(covert_priv, e) for", "# do indeed disallow them connecting if they are e.g.", "if we even have reasonable # privacy with what we", "is None) # further, search for duplicated inputs (through matching", "c in self.clients: c.addjob(clientjob_goodbye, None) self.clients = [] # gc", "_co in range(Params.num_components)] lock = threading.Lock() seen_salthashes = set() #", "'ban score' to the player. # we aren't collecting any", "2.2, 2.7, 3.3, 3.9, 4.7, 5.6, 6.8, 8.2] E24 =", "time_best is None: self.tier_best_starttime = None else: self.tier_best_starttime = max(time_best", "): return self def __exit__(self, exc_type, exc_value, traceback): if exc_type", "inputs and outputs lists overhead = 58 min_excess_fee = (overhead", "5.6, 6.8, 8.2] E24 = [1.0, 1.1, 1.2, 1.3, 1.5,", "time') else: assert mtype == 'signature' try: sighash = self.sighashes[msg.which_input]", "for commit,ci,cj in commitment_master_list) # Send blind signatures for c", "i, proof in enumerate(proofs): dest_client_idx, dest_key_idx = possible_commitment_destinations[rand_position(seed, N, i)]", "input but it was fine ({outpoint})\") # At this point", "E24 = [1.0, 1.1, 1.2, 1.3, 1.5, 1.6, 1.8, 2.0,", "= [] # gc def kick_missing_clients(self, goodclients, reason = None):", "we could blame the originator, however # blockchain checks are", "client.error('component submitted at wrong time') sort_key, contrib = check_covert_component(msg, round_pubkey,", "valid signature. This is not # allowed and we break", "if len(self.pool) == self.fill_threshold: self.fill_time = time.monotonic() def add(self, client):", "{blame.blame_reason!r})') continue if isinstance(ret, str): self.print_error(f\"verified a bad proof (for", "find out the # timing of a given input's signature", "sharing thing doesn't even make sense with one player. for", "i in range(len(tx.inputs()))] pubkeys = [bytes.fromhex(inp['pubkeys'][0]) for inp in tx.inputs()]", "in mytierpools.items(): if client not in pool.pool: continue status =", "ClientThread(ClientHandlerThread): \"\"\"Basic thread per connected client.\"\"\" def recv(self, *expected_msg_names, timeout=Protocol.STANDARD_TIMEOUT):", "can_pool def remove(self, client): # make sure to call try_move_from_queue()", "one player. for c in self.clients: c.kill('blame yourself!') return #", "= begin_time)) self.last_hash = calc_initial_hash(self.tier, annhost_b, annport, False, begin_time) time.sleep(Protocol.WARMUP_TIME)", "and queue) def check_add(self, client): for t in client.tags: ts", "selected tier. \"\"\" with self.lock: chosen_clients = list(self.waiting_pools[tier].pool) # Notify", "there is no favoured fuse. (since fill time is a", "AttributeError: return False else: if len(self.fails) + len(self.results) >= self.num_results:", "self.tags[t] return True def try_move_from_queue(self): # attempt to move clients", "= deadline - time.monotonic() self.done_ev.wait(max(0., remtime)) with self.lock: ret =", "start eventually remtime = pool.fill_time - tfill_thresh if t ==", "covert_T0 + Protocol.TS_EXPECTING_COVERT_COMPONENTS - time.monotonic() assert remtime > 0, \"timings", "_ in results] self.check_client_count() self.print_error(f\"got commitments from {len(self.clients)} clients (dropped", "timeout=timeout) def send_error(self, msg): self.send(pb.Error(message = msg), timeout=Protocol.STANDARD_TIMEOUT) def error(self,", "time.sleep(remtime) # Upload the full commitment list; we're a bit", "of covert signatures phase, owner calls end_signatures, which returns a", "return ret def add(self, result): with self.lock: try: self.results.append(result) except", "players; record the time we did this round_time = round(time.time())", "20: client.error(\"Tag id too long\") if not (0 < tag.limit", "OF OR IN # CONNECTION WITH THE SOFTWARE OR THE", "self.print_error(\"got bad blame; clamed reason was: \"+repr(blame.blame_reason)) client.kill(f'bad blame message:", "of our covert timeline. covert_T0 = time.monotonic() self.print_error(f\"startround sent at", "= None def start_components(self, round_pubkey, feerate): self.components = dict() self.feerate", "us that a pool started. start_ev = threading.Event() client.start_ev =", "-= 1 if in_pool: ts.pool -= 1 if ts.all_ ==", "OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE.", "() def __new__(cls, ipstr, tagbytes, maxsimul): ipb = ipstr.encode() b", "time.monotonic() def add(self, client): can_pool = True for t in", "continue independently of waiting server. # self.spawned_clients.difference_update(chosen_clients) # Kick off", "not None: client.error(res) for t in mytiers: pool = mytierpools[t]", "a lightweight Ergon client # CashFusion - an advanced coin", "e: reason = f'{e.args[0]} ({outpoint})' self.print_error(f\"blaming[{src_commitment_idx}] for bad input: {reason}\")", "(inp, sig) in enumerate(zip(tx.inputs(), signatures)): inp['signatures'][0] = sig.hex() + '41'", "self.num_results: self.done_ev.set() return True class FusionController(threading.Thread, PrintError): \"\"\" This controls", "For now, msg.genesis_hash is optional and we tolerate it #", "deadline. def __init__(self, num_results, done_on_fail = True): self.num_results = int(num_results)", "upgrade\") if msg.genesis_hash: if msg.genesis_hash != get_current_genesis_hash(): # For now,", "will start eventually remtime = pool.fill_time - tfill_thresh if t", "1) // min_safe_clients # How many clients can share same", "were # just imposters who didn't have private key. If", "be present in too many fuses. client.tags = [ClientTag(client_ip, b'',", "we left from best pool, so it might not be", "The above copyright notice and this permission notice shall be", "self.results del self.results return ret def add(self, result): with self.lock:", "behaviour! if sum((signatures[i] is not None) for i in spenders)", "+ Protocol.BLAME_VERIFY_TIME * 2) self.sendall(pb.RestartRound()) class CovertClientThread(ClientHandlerThread): def recv(self, *expected_msg_names,", "dict(encrypted_proof=x, src_commitment_idx=y, dst_key_idx=z) for x,y,z, _ in proofs])) msg =", "this back to clients # as an 'internal server error'.", "None # when did pool exceed fill_threshold self.tag_max = tag_max", "from all pools for t, pool in self.waiting_pools.items(): for c", "= float('inf') while True: with self.lock: if self.stopping or start_ev.is_set():", "not collector.add((c, msg.initial_commitments, msg.excess_fee)): c.error(\"late commitment\") # record for later", "True def try_move_from_queue(self): # attempt to move clients from queue", "for i in range(len(tx.inputs()))] pubkeys = [bytes.fromhex(inp['pubkeys'][0]) for inp in", "of pool clients to trigger setting fill_time self.fill_time = None", "isinstance(self.donation_address, Address): donation_address = self.donation_address.to_full_ui_string() client.send(pb.ServerHello( num_components = Params.num_components, component_feerate", "favoured pool, can start at a special time remtime =", "for c in self.clients if not c.dead] if len(live) <", "(slow!) for c in self.clients: c.blinds = [schnorr.BlindSigner() for _co", "really shouldn't happen, we had plenty of time raise FusionError(\"way", "goodbye than killing if text is not None: client.send_error(text) raise", "{inp['pubkeys'][0]}\"].append(i) for prevout, spenders in prevout_spenders.items(): if len(spenders) == 1:", "tag.id, tag.limit)) try: mytierpools = {t: self.waiting_pools[t] for t in", "c.dead] if len(live) < Params.min_safe_clients: for c in live: c.kill(\"too", "enumerate(zip(tx.inputs(), signatures)): inp['signatures'][0] = sig.hex() + '41' assert tx.is_complete() txid", "not in pool.pool: continue status = pb.TierStatusUpdate.TierStatus(players = len(pool.pool), min_players", "# If the blamed client is already dead, don't waste", "*expected_msg_names, timeout=timeout) return submsg, mtype def send(self, submsg, timeout=None): send_pb(self.connection,", "enumerate(results) for cj,commit in enumerate(commitments)] rng.shuffle(commitment_master_list) all_commitments = tuple(commit for", "based on dividing the overhead amongst players, in the smallest", "list and note where each client's commitments ended up client_commit_indexes", "as an 'internal server error'. raise else: self.print_error(\"broadcast was successful!\")", "len(self.clients) < 2: # Sanity check for testing -- the", "to # not reference self.<variables> that may change. for blame", "= self.sighashes[msg.which_input] pubkey = self.pubkeys[msg.which_input] existing_sig = self.signatures[msg.which_input] except AttributeError:", "if self.tier_best in mytierpools: # we left from best pool,", "myindex] N = len(possible_commitment_destinations) assert N == len(all_commitments) - Params.num_components", "self.tags[t] ts.all_ -= 1 if in_pool: ts.pool -= 1 if", "limit out of range\") ip = '' if tag.no_ip else", "configurable class Params: num_components = 23 component_feerate = 1000 #", "client.error(\"Too many tags\") # Event for signalling us that a", "tag\" def _add_pool(self, client): self.pool.add(client) for t in client.tags: ts", "in relaying, before sharing the # signatures. This makes it", "= 0 class WaitingPool: \"\"\" a waiting pool for a" ]
[ "df['not_at_all'] / 100 * df['sample_size'] return df def grafic4(label1, label2,", "valor2] # Fem la grafica plt.bar(eje_x, eje_y) # Llegenda de", "percentnotatall, \" % Persones\", \"Satisfacció\", \"% de persones preocupades o", "Mostrem Grafica plt.show() #Funcio per a l'excercici 4.4 def grades(df):", "Declaramos valors per l'eix x eje_x = [label1, label2] #", "pplvery, pplnot, \"Persones\", \"Satisfacció\", \"Nombre de persones preocupades o no", "grafiquem # Cal tancar el grafic per a seguir amb", "percentnotatall = (infectedf['ppl_not_at_all'].sum() / infectedf['sample_size'].sum()) * 100 # Els printem", "plt.ylabel(leyenday) # Legenda en el eje x plt.xlabel(leyendax) # Títol", "# Finalment, grafiquem # Cal tancar el grafic per a", "/ 100 * df['sample_size'] df['ppl_not_at_all'] = df['not_at_all'] / 100 *", "amb l'execució grafic4('People_Very', 'People_Not_At_All', pplvery, pplnot, \"Persones\", \"Satisfacció\", \"Nombre de", "en un altre df infectedf = filtrar(dataframe, \"infected\") # Calculem", "plt.title(titulo) # Mostrem Grafica plt.show() #Funcio per a l'excercici 4.4", "# Declaramos valors per l'eix x eje_x = [label1, label2]", "plt.show() #Funcio per a l'excercici 4.4 def grades(df): df['538 Grade']=df['538", "no per infected\") def printar(df, subject): # Printem a la", "per subject infected i ho desem en un altre df", "de persones preocupades o no per l'economia\") def filtrar(dataframe, subject1):", "l'economia\") def filtrar(dataframe, subject1): df = dataframe[dataframe['subject'].str.contains(subject1, case=False)].copy() # Afegim", "/ 100 * df['sample_size'] return df def grafic4(label1, label2, valor1,", "percentvery = (infectedf['ppl_very'].sum()/infectedf['sample_size'].sum())*100 percentnotatall = (infectedf['ppl_not_at_all'].sum() / infectedf['sample_size'].sum()) * 100", "per l'economia\") def filtrar(dataframe, subject1): df = dataframe[dataframe['subject'].str.contains(subject1, case=False)].copy() #", "= df['not_at_all'] / 100 * df['sample_size'] return df def grafic4(label1,", "ho desem en un altre df infectedf = filtrar(dataframe, \"infected\")", "\"infected\") # Calculem els percentatjes percentvery = (infectedf['ppl_very'].sum()/infectedf['sample_size'].sum())*100 percentnotatall =", "Els printem print(\"percentatge very: {}%\".format(percentvery)) print(\"percentatge not_at_all: {}%\".format(percentnotatall)) grafic4('People_Very', 'People_Not_At_All',", "subject infected i ho desem en un altre df infectedf", "df['ppl_not_at_all'].sum() print(\"Very: {}\".format(pplvery)) print(\"Not at All: {}\".format(pplnot)) # Finalment, grafiquem", "subject1): df = dataframe[dataframe['subject'].str.contains(subject1, case=False)].copy() # Afegim els valors en", "del samplesize a dues noves columnes df['ppl_very'] = df['very'] /", "Fem la grafica plt.bar(eje_x, eje_y) # Llegenda de l'eix x", "grafic4('People_Very', 'People_Not_At_All', percentvery, percentnotatall, \" % Persones\", \"Satisfacció\", \"% de", "leyenday, leyendax, titulo): # Declaramos valors per l'eix x eje_x", "print(\"Not at All: {}\".format(pplnot)) # Finalment, grafiquem # Cal tancar", "def printar(df, subject): # Printem a la consola els valors", "els valors en funció del samplesize a dues noves columnes", "per l'eix x eje_x = [label1, label2] # Declaramos valors", "/ infectedf['sample_size'].sum()) * 100 # Els printem print(\"percentatge very: {}%\".format(percentvery))", "Cal tancar el grafic per a seguir amb l'execució grafic4('People_Very',", "el eje x plt.xlabel(leyendax) # Títol de Grafica plt.title(titulo) #", "= (infectedf['ppl_not_at_all'].sum() / infectedf['sample_size'].sum()) * 100 # Els printem print(\"percentatge", "preocupades o no per l'economia\") def filtrar(dataframe, subject1): df =", "# Filtrem i tractem el dataset economydf = filtrar(dataframe, \"economy\")", "eje_x = [label1, label2] # Declaramos valors per l'eix y", "grafic per a seguir amb l'execució grafic4('People_Very', 'People_Not_At_All', pplvery, pplnot,", "df['ppl_very'].sum() pplnot = df['ppl_not_at_all'].sum() print(\"Very: {}\".format(pplvery)) print(\"Not at All: {}\".format(pplnot))", "[valor1, valor2] # Fem la grafica plt.bar(eje_x, eje_y) # Llegenda", "infected\") def printar(df, subject): # Printem a la consola els", "(infectedf['ppl_not_at_all'].sum() / infectedf['sample_size'].sum()) * 100 # Els printem print(\"percentatge very:", "l'execució grafic4('People_Very', 'People_Not_At_All', pplvery, pplnot, \"Persones\", \"Satisfacció\", \"Nombre de persones", "100 * df['sample_size'] return df def grafic4(label1, label2, valor1, valor2,", "un altre df infectedf = filtrar(dataframe, \"infected\") # Calculem els", "# Legenda en el eje x plt.xlabel(leyendax) # Títol de", "Títol de Grafica plt.title(titulo) # Mostrem Grafica plt.show() #Funcio per", "els valors print(\"Valors per subject {}\".format(subject)) pplvery = df['ppl_very'].sum() pplnot", "# Afegim els valors en funció del samplesize a dues", "tancar el grafic per a seguir amb l'execució grafic4('People_Very', 'People_Not_At_All',", "def grafic4(label1, label2, valor1, valor2, leyenday, leyendax, titulo): # Declaramos", "# Filtrem ara per subject infected i ho desem en", "ara per subject infected i ho desem en un altre", "infected i ho desem en un altre df infectedf =", "altre df infectedf = filtrar(dataframe, \"infected\") # Calculem els percentatjes", "filtrar(dataframe, \"economy\") # el printem printar(economydf, subject) # Filtrem ara", "eje_y = [valor1, valor2] # Fem la grafica plt.bar(eje_x, eje_y)", "Persones\", \"Satisfacció\", \"% de persones preocupades o no per infected\")", "o no per infected\") def printar(df, subject): # Printem a", "preocupades o no per infected\") def printar(df, subject): # Printem", "x eje_x = [label1, label2] # Declaramos valors per l'eix", "subject) # Filtrem ara per subject infected i ho desem", "= filtrar(dataframe, \"economy\") # el printem printar(economydf, subject) # Filtrem", "titulo): # Declaramos valors per l'eix x eje_x = [label1,", "Filtrem i tractem el dataset economydf = filtrar(dataframe, \"economy\") #", "noves columnes df['ppl_very'] = df['very'] / 100 * df['sample_size'] df['ppl_not_at_all']", "100 # Els printem print(\"percentatge very: {}%\".format(percentvery)) print(\"percentatge not_at_all: {}%\".format(percentnotatall))", "import matplotlib.pyplot as plt def countvalues(dataframe, subject): # Filtrem i", "percentatjes percentvery = (infectedf['ppl_very'].sum()/infectedf['sample_size'].sum())*100 percentnotatall = (infectedf['ppl_not_at_all'].sum() / infectedf['sample_size'].sum()) *", "df['sample_size'] df['ppl_not_at_all'] = df['not_at_all'] / 100 * df['sample_size'] return df", "plt.xlabel(leyendax) # Títol de Grafica plt.title(titulo) # Mostrem Grafica plt.show()", "print(\"Very: {}\".format(pplvery)) print(\"Not at All: {}\".format(pplnot)) # Finalment, grafiquem #", "* 100 # Els printem print(\"percentatge very: {}%\".format(percentvery)) print(\"percentatge not_at_all:", "# Calculem els percentatjes percentvery = (infectedf['ppl_very'].sum()/infectedf['sample_size'].sum())*100 percentnotatall = (infectedf['ppl_not_at_all'].sum()", "tractem el dataset economydf = filtrar(dataframe, \"economy\") # el printem", "df['ppl_not_at_all'] = df['not_at_all'] / 100 * df['sample_size'] return df def", "la grafica plt.bar(eje_x, eje_y) # Llegenda de l'eix x plt.ylabel(leyenday)", "el grafic per a seguir amb l'execució grafic4('People_Very', 'People_Not_At_All', pplvery,", "* df['sample_size'] return df def grafic4(label1, label2, valor1, valor2, leyenday,", "o no per l'economia\") def filtrar(dataframe, subject1): df = dataframe[dataframe['subject'].str.contains(subject1,", "leyendax, titulo): # Declaramos valors per l'eix x eje_x =", "Llegenda de l'eix x plt.ylabel(leyenday) # Legenda en el eje", "valors en funció del samplesize a dues noves columnes df['ppl_very']", "\"Satisfacció\", \"Nombre de persones preocupades o no per l'economia\") def", "per a seguir amb l'execució grafic4('People_Very', 'People_Not_At_All', pplvery, pplnot, \"Persones\",", "\"Nombre de persones preocupades o no per l'economia\") def filtrar(dataframe,", "subject {}\".format(subject)) pplvery = df['ppl_very'].sum() pplnot = df['ppl_not_at_all'].sum() print(\"Very: {}\".format(pplvery))", "plt.bar(eje_x, eje_y) # Llegenda de l'eix x plt.ylabel(leyenday) # Legenda", "a la consola els valors print(\"Valors per subject {}\".format(subject)) pplvery", "l'eix x eje_x = [label1, label2] # Declaramos valors per", "pplnot = df['ppl_not_at_all'].sum() print(\"Very: {}\".format(pplvery)) print(\"Not at All: {}\".format(pplnot)) #", "per l'eix y eje_y = [valor1, valor2] # Fem la", "\"Persones\", \"Satisfacció\", \"Nombre de persones preocupades o no per l'economia\")", "matplotlib.pyplot as plt def countvalues(dataframe, subject): # Filtrem i tractem", "# Títol de Grafica plt.title(titulo) # Mostrem Grafica plt.show() #Funcio", "a seguir amb l'execució grafic4('People_Very', 'People_Not_At_All', pplvery, pplnot, \"Persones\", \"Satisfacció\",", "i ho desem en un altre df infectedf = filtrar(dataframe,", "el printem printar(economydf, subject) # Filtrem ara per subject infected", "plt def countvalues(dataframe, subject): # Filtrem i tractem el dataset", "eje x plt.xlabel(leyendax) # Títol de Grafica plt.title(titulo) # Mostrem", "case=False)].copy() # Afegim els valors en funció del samplesize a", "printar(economydf, subject) # Filtrem ara per subject infected i ho", "Printem a la consola els valors print(\"Valors per subject {}\".format(subject))", "infectedf['sample_size'].sum()) * 100 # Els printem print(\"percentatge very: {}%\".format(percentvery)) print(\"percentatge", "valor2, leyenday, leyendax, titulo): # Declaramos valors per l'eix x", "x plt.ylabel(leyenday) # Legenda en el eje x plt.xlabel(leyendax) #", "consola els valors print(\"Valors per subject {}\".format(subject)) pplvery = df['ppl_very'].sum()", "def filtrar(dataframe, subject1): df = dataframe[dataframe['subject'].str.contains(subject1, case=False)].copy() # Afegim els", "# Llegenda de l'eix x plt.ylabel(leyenday) # Legenda en el", "{}\".format(subject)) pplvery = df['ppl_very'].sum() pplnot = df['ppl_not_at_all'].sum() print(\"Very: {}\".format(pplvery)) print(\"Not", "'People_Not_At_All', percentvery, percentnotatall, \" % Persones\", \"Satisfacció\", \"% de persones", "very: {}%\".format(percentvery)) print(\"percentatge not_at_all: {}%\".format(percentnotatall)) grafic4('People_Very', 'People_Not_At_All', percentvery, percentnotatall, \"", "print(\"percentatge very: {}%\".format(percentvery)) print(\"percentatge not_at_all: {}%\".format(percentnotatall)) grafic4('People_Very', 'People_Not_At_All', percentvery, percentnotatall,", "samplesize a dues noves columnes df['ppl_very'] = df['very'] / 100", "df def grafic4(label1, label2, valor1, valor2, leyenday, leyendax, titulo): #", "# Mostrem Grafica plt.show() #Funcio per a l'excercici 4.4 def", "df = dataframe[dataframe['subject'].str.contains(subject1, case=False)].copy() # Afegim els valors en funció", "pplnot, \"Persones\", \"Satisfacció\", \"Nombre de persones preocupades o no per", "label2, valor1, valor2, leyenday, leyendax, titulo): # Declaramos valors per", "{}%\".format(percentnotatall)) grafic4('People_Very', 'People_Not_At_All', percentvery, percentnotatall, \" % Persones\", \"Satisfacció\", \"%", "{}\".format(pplnot)) # Finalment, grafiquem # Cal tancar el grafic per", "= [label1, label2] # Declaramos valors per l'eix y eje_y", "Declaramos valors per l'eix y eje_y = [valor1, valor2] #", "dataset economydf = filtrar(dataframe, \"economy\") # el printem printar(economydf, subject)", "valors per l'eix y eje_y = [valor1, valor2] # Fem", "printem printar(economydf, subject) # Filtrem ara per subject infected i", "no per l'economia\") def filtrar(dataframe, subject1): df = dataframe[dataframe['subject'].str.contains(subject1, case=False)].copy()", "# Declaramos valors per l'eix y eje_y = [valor1, valor2]", "= [valor1, valor2] # Fem la grafica plt.bar(eje_x, eje_y) #", "# el printem printar(economydf, subject) # Filtrem ara per subject", "subject): # Filtrem i tractem el dataset economydf = filtrar(dataframe,", "per subject {}\".format(subject)) pplvery = df['ppl_very'].sum() pplnot = df['ppl_not_at_all'].sum() print(\"Very:", "[label1, label2] # Declaramos valors per l'eix y eje_y =", "pplvery = df['ppl_very'].sum() pplnot = df['ppl_not_at_all'].sum() print(\"Very: {}\".format(pplvery)) print(\"Not at", "= df['ppl_very'].sum() pplnot = df['ppl_not_at_all'].sum() print(\"Very: {}\".format(pplvery)) print(\"Not at All:", "return df def grafic4(label1, label2, valor1, valor2, leyenday, leyendax, titulo):", "a dues noves columnes df['ppl_very'] = df['very'] / 100 *", "de Grafica plt.title(titulo) # Mostrem Grafica plt.show() #Funcio per a", "de persones preocupades o no per infected\") def printar(df, subject):", "= df['ppl_not_at_all'].sum() print(\"Very: {}\".format(pplvery)) print(\"Not at All: {}\".format(pplnot)) # Finalment,", "subject): # Printem a la consola els valors print(\"Valors per", "valors per l'eix x eje_x = [label1, label2] # Declaramos", "Grafica plt.title(titulo) # Mostrem Grafica plt.show() #Funcio per a l'excercici", "printar(df, subject): # Printem a la consola els valors print(\"Valors", "\"economy\") # el printem printar(economydf, subject) # Filtrem ara per", "per infected\") def printar(df, subject): # Printem a la consola", "seguir amb l'execució grafic4('People_Very', 'People_Not_At_All', pplvery, pplnot, \"Persones\", \"Satisfacció\", \"Nombre", "dataframe[dataframe['subject'].str.contains(subject1, case=False)].copy() # Afegim els valors en funció del samplesize", "en el eje x plt.xlabel(leyendax) # Títol de Grafica plt.title(titulo)", "<gh_stars>0 import matplotlib.pyplot as plt def countvalues(dataframe, subject): # Filtrem", "= (infectedf['ppl_very'].sum()/infectedf['sample_size'].sum())*100 percentnotatall = (infectedf['ppl_not_at_all'].sum() / infectedf['sample_size'].sum()) * 100 #", "as plt def countvalues(dataframe, subject): # Filtrem i tractem el", "\"% de persones preocupades o no per infected\") def printar(df,", "label2] # Declaramos valors per l'eix y eje_y = [valor1,", "Filtrem ara per subject infected i ho desem en un", "All: {}\".format(pplnot)) # Finalment, grafiquem # Cal tancar el grafic", "infectedf = filtrar(dataframe, \"infected\") # Calculem els percentatjes percentvery =", "grafic4(label1, label2, valor1, valor2, leyenday, leyendax, titulo): # Declaramos valors", "\"Satisfacció\", \"% de persones preocupades o no per infected\") def", "printem print(\"percentatge very: {}%\".format(percentvery)) print(\"percentatge not_at_all: {}%\".format(percentnotatall)) grafic4('People_Very', 'People_Not_At_All', percentvery,", "df['ppl_very'] = df['very'] / 100 * df['sample_size'] df['ppl_not_at_all'] = df['not_at_all']", "i tractem el dataset economydf = filtrar(dataframe, \"economy\") # el", "desem en un altre df infectedf = filtrar(dataframe, \"infected\") #", "valors print(\"Valors per subject {}\".format(subject)) pplvery = df['ppl_very'].sum() pplnot =", "df['very'] / 100 * df['sample_size'] df['ppl_not_at_all'] = df['not_at_all'] / 100", "economydf = filtrar(dataframe, \"economy\") # el printem printar(economydf, subject) #", "Legenda en el eje x plt.xlabel(leyendax) # Títol de Grafica", "print(\"percentatge not_at_all: {}%\".format(percentnotatall)) grafic4('People_Very', 'People_Not_At_All', percentvery, percentnotatall, \" % Persones\",", "# Fem la grafica plt.bar(eje_x, eje_y) # Llegenda de l'eix", "la consola els valors print(\"Valors per subject {}\".format(subject)) pplvery =", "df['sample_size'] return df def grafic4(label1, label2, valor1, valor2, leyenday, leyendax,", "print(\"Valors per subject {}\".format(subject)) pplvery = df['ppl_very'].sum() pplnot = df['ppl_not_at_all'].sum()", "y eje_y = [valor1, valor2] # Fem la grafica plt.bar(eje_x,", "'People_Not_At_All', pplvery, pplnot, \"Persones\", \"Satisfacció\", \"Nombre de persones preocupades o", "columnes df['ppl_very'] = df['very'] / 100 * df['sample_size'] df['ppl_not_at_all'] =", "% Persones\", \"Satisfacció\", \"% de persones preocupades o no per", "Grafica plt.show() #Funcio per a l'excercici 4.4 def grades(df): df['538", "# Els printem print(\"percentatge very: {}%\".format(percentvery)) print(\"percentatge not_at_all: {}%\".format(percentnotatall)) grafic4('People_Very',", "percentvery, percentnotatall, \" % Persones\", \"Satisfacció\", \"% de persones preocupades", "de l'eix x plt.ylabel(leyenday) # Legenda en el eje x", "= filtrar(dataframe, \"infected\") # Calculem els percentatjes percentvery = (infectedf['ppl_very'].sum()/infectedf['sample_size'].sum())*100", "dues noves columnes df['ppl_very'] = df['very'] / 100 * df['sample_size']", "grafic4('People_Very', 'People_Not_At_All', pplvery, pplnot, \"Persones\", \"Satisfacció\", \"Nombre de persones preocupades", "filtrar(dataframe, subject1): df = dataframe[dataframe['subject'].str.contains(subject1, case=False)].copy() # Afegim els valors", "l'eix x plt.ylabel(leyenday) # Legenda en el eje x plt.xlabel(leyendax)", "countvalues(dataframe, subject): # Filtrem i tractem el dataset economydf =", "* df['sample_size'] df['ppl_not_at_all'] = df['not_at_all'] / 100 * df['sample_size'] return", "eje_y) # Llegenda de l'eix x plt.ylabel(leyenday) # Legenda en", "{}%\".format(percentvery)) print(\"percentatge not_at_all: {}%\".format(percentnotatall)) grafic4('People_Very', 'People_Not_At_All', percentvery, percentnotatall, \" %", "= df['very'] / 100 * df['sample_size'] df['ppl_not_at_all'] = df['not_at_all'] /", "(infectedf['ppl_very'].sum()/infectedf['sample_size'].sum())*100 percentnotatall = (infectedf['ppl_not_at_all'].sum() / infectedf['sample_size'].sum()) * 100 # Els", "Afegim els valors en funció del samplesize a dues noves", "persones preocupades o no per infected\") def printar(df, subject): #", "{}\".format(pplvery)) print(\"Not at All: {}\".format(pplnot)) # Finalment, grafiquem # Cal", "persones preocupades o no per l'economia\") def filtrar(dataframe, subject1): df", "en funció del samplesize a dues noves columnes df['ppl_very'] =", "per a l'excercici 4.4 def grades(df): df['538 Grade']=df['538 Grade'].str[0] print(df.groupby('538", "# Cal tancar el grafic per a seguir amb l'execució", "a l'excercici 4.4 def grades(df): df['538 Grade']=df['538 Grade'].str[0] print(df.groupby('538 Grade').size())", "Finalment, grafiquem # Cal tancar el grafic per a seguir", "funció del samplesize a dues noves columnes df['ppl_very'] = df['very']", "\" % Persones\", \"Satisfacció\", \"% de persones preocupades o no", "= dataframe[dataframe['subject'].str.contains(subject1, case=False)].copy() # Afegim els valors en funció del", "x plt.xlabel(leyendax) # Títol de Grafica plt.title(titulo) # Mostrem Grafica", "valor1, valor2, leyenday, leyendax, titulo): # Declaramos valors per l'eix", "grafica plt.bar(eje_x, eje_y) # Llegenda de l'eix x plt.ylabel(leyenday) #", "Calculem els percentatjes percentvery = (infectedf['ppl_very'].sum()/infectedf['sample_size'].sum())*100 percentnotatall = (infectedf['ppl_not_at_all'].sum() /", "not_at_all: {}%\".format(percentnotatall)) grafic4('People_Very', 'People_Not_At_All', percentvery, percentnotatall, \" % Persones\", \"Satisfacció\",", "df infectedf = filtrar(dataframe, \"infected\") # Calculem els percentatjes percentvery", "filtrar(dataframe, \"infected\") # Calculem els percentatjes percentvery = (infectedf['ppl_very'].sum()/infectedf['sample_size'].sum())*100 percentnotatall", "# Printem a la consola els valors print(\"Valors per subject", "def countvalues(dataframe, subject): # Filtrem i tractem el dataset economydf", "els percentatjes percentvery = (infectedf['ppl_very'].sum()/infectedf['sample_size'].sum())*100 percentnotatall = (infectedf['ppl_not_at_all'].sum() / infectedf['sample_size'].sum())", "at All: {}\".format(pplnot)) # Finalment, grafiquem # Cal tancar el", "#Funcio per a l'excercici 4.4 def grades(df): df['538 Grade']=df['538 Grade'].str[0]", "el dataset economydf = filtrar(dataframe, \"economy\") # el printem printar(economydf,", "100 * df['sample_size'] df['ppl_not_at_all'] = df['not_at_all'] / 100 * df['sample_size']", "l'eix y eje_y = [valor1, valor2] # Fem la grafica" ]
[ "EbookInfo(figures=figures, markdowns=markdowns), DefaultInfo( files=depset(figures+markdowns), runfiles=runfiles, ), ] markdown_lib = rule(", "= [script], command = \"\"\"\\ {script} \\ pandoc -s --gladtex", "+ \".png\") figures += [out_file] script_cmd = _script_cmd(docker_run.path, in_file.path) ctx.actions.run_shell(", "compile\", ), \"deps\": attr.label_list( doc = \"The dependencies, any targets", "= \"Transform a graphviz dot file into png using dot\",", "\"_script\": attr.label( default=\"//build:docker_run\", executable=True, cfg=\"host\"), }, doc = \"Transform an", "src_copy.short_path), outputs = [src_copy], inputs = [src], command=\"cp {} {}\".format(src.path,", "the targets you need to make this book work.\", providers", "return src_copy def _markdown_lib_impl(ctx): markdowns = [] for target in", "[]) markdowns += (provider.markdowns or []) runfiles = ctx.runfiles(files=figures+markdowns) for", "= \"Transform a graphviz dot file into png using neato\",", "\"Transform a timing diagram file into png using drawtiming\", )", "figures += [out_file] script_cmd = _script_cmd(asycc.path, in_file.path) ctx.actions.run_shell( progress_message =", "the directory dir_reference, not the # directory where the build", "attr.label( default=\"//build:docker_run\", executable=True, cfg=\"host\"), }, doc = \"Transform a graphviz", "epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path), ebook_epub=_strip_reference_dir(dir_reference, ebook_epub.path), html_file=_strip_reference_dir(dir_reference, html_file.path), )) runfiles = ctx.runfiles(files=[ebook_epub])", "pandoc --epub-metadata={epub_metadata} \\ --mathml -o {ebook_pdf} {markdowns} \\ \"\"\".format( script=script_cmd,", "dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ DefaultInfo( files=depset([ebook_pdf]),", "doc = \"The markdown source files\", ), \"deps\": attr.label_list( doc", "= markdowns, outputs = [htex_file], tools = [script], command =", "runfiles.merge(dep[DefaultInfo].data_runfiles) return [ DefaultInfo( files=depset([ebook_pdf]), runfiles=runfiles, ) ] ebook_pdf =", "to use for this book\", ), \"metadata_xml\": attr.label( allow_files =", "ctx.actions.run_shell( progress_message = \"Building EPUB for: {}\".format(name), inputs = inputs,", "= \"\"\"\\ {script} --cd-to-dir-reference \\ pandoc --epub-metadata={epub_metadata} \\ -f html", "script=script_cmd, epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path), ebook_epub=_strip_reference_dir(dir_reference, ebook_epub.path), html_file=_strip_reference_dir(dir_reference, html_file.path), )) runfiles =", "files=depset([ebook_pdf]), runfiles=runfiles, ) ] ebook_pdf = rule( implementation = _ebook_pdf_impl,", "] ebook_pdf = rule( implementation = _ebook_pdf_impl, attrs = {", "dot\", ) def _asymptote_impl(ctx): asycc = ctx.executable._script figures = []", "where the build happens! This is needed because we can", "is gonna be fun! epub_metadata = ctx.attr.metadata_xml.files.to_list()[0] epub_metadata = _copy_file_to_workdir(ctx,", "in ctx.attr.deps: provider = target[EbookInfo] figures += (provider.figures or [])", "in_file.path) ctx.actions.run_shell( progress_message = \"ASY to PNG: {0}\".format(in_file.short_path), inputs =", "= [\".md\"], doc = \"The markdown source files\", ), \"deps\":", "dir_reference = epub_file script = ctx.executable._script name = ctx.label.name script_cmd", "for: {}\".format(name), inputs = markdowns, outputs = [htex_file], tools =", "-t epub3 -o {ebook_epub} {html_file} \\ \"\"\".format( script=script_cmd, epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path),", "--output \"{out_file}\" \"{in_file}\" \"\"\".format( cmd=cmd, out_file=out_file.path, in_file=in_file.path, script=script_cmd), ) deps", "for dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ dep[EbookInfo],", "= \"The epub-metadata.xml file to use for this book\", ),", "= ctx.label.name # steps # run htex on all *md,", "-o \"{out_file}\" \"{in_file}\" \"\"\".format( cmd=cmd, out_file=out_file.path, in_file=in_file.path, script=script_cmd), ) deps", "[outdir], outputs = [outdir_tar], command = tar_command, ) # run", "= runfiles.merge(dep[DefaultInfo].data_runfiles) return [ DefaultInfo( files=depset([mobi_file, captured_output]), runfiles=runfiles, ) ]", "runfiles = ctx.runfiles(files=[ebook_epub]) for dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)", "{script} --cd-to-dir-reference \\ pandoc --epub-metadata={epub_metadata} \\ --mathml -o {ebook_pdf} {markdowns}", "# First provider is EbookInfo, second is DefaultInfo. (ebook_info, default_info)", "epub_metadata = _copy_file_to_workdir_renamed(ctx, epub_metadata) title_yaml = ctx.attr.title_yaml.files.to_list()[0] title_yaml = _copy_file_to_workdir_renamed(ctx,", "= markdowns[0] htex_file = ctx.actions.declare_file(\"{}.htex\".format(name)) markdowns_paths = [file.path for file", "default_info.files.to_list() epub_file = outputs[0] equation_outdir = outputs[1] equation_outdir_tar = outputs[2]", "(provider.markdowns or []) runfiles = ctx.runfiles(files=figures+markdowns) for dep in ctx.attr.deps:", "markdowns += [_copy_file_to_workdir(ctx, src)] figures = [] for target in", "epub_file=_strip_reference_dir(dir_reference, epub_file.path), mobi_file=_strip_reference_dir(dir_reference, mobi_file.path), )) runfiles = ctx.runfiles(files=[mobi_file]) for dep", "ctx.actions.run_shell( progress_message = \"graphviz to PNG with {1}: {0}\".format(in_file.short_path, cmd),", "{target} {sources} \\ \"\"\".format( script=script_cmd, target=htex_file.path, sources=\" \".join(markdowns_paths)) ) #", "any targets should be allowed\", ), \"output\": attr.output(doc=\"The generated file\"),", "in target.files.to_list(): in_file = src out_file = ctx.actions.declare_file(in_file.basename + \".png\")", "fun! epub_metadata = ctx.attr.metadata_xml.files.to_list()[0] epub_metadata = _copy_file_to_workdir_renamed(ctx, epub_metadata) title_yaml =", "_ebook_epub_impl(ctx): name = ctx.label.name # This is duplicated in _ebook_pdf_impl.", "= [src], command=\"cp {} {}\".format(src.path, src_copy.path), ) return src_copy def", "not needed. tar_command = \"(cd {base} ; tar xvf {archive})", "epub_file.path), mobi_file=_strip_reference_dir(dir_reference, mobi_file.path), )) runfiles = ctx.runfiles(files=[mobi_file]) for dep in", "dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ DefaultInfo( files=depset([mobi_file,", "provider(fields=[\"figures\", \"markdowns\"]) # Returns the docker_run script invocation command based", "[epub_metadata, title_yaml] + markdowns + figures ctx.actions.run_shell( progress_message = \"Building", "= ctx.label.name # This is duplicated in _ebook_pdf_impl. # steps", "using neato\", ) def _dot_png_impl(ctx): return _generalized_graphviz_rule_impl(ctx, \"dot\") dot_png =", "+= provider.markdowns figures += provider.figures dir_reference = markdowns[0] htex_file =", "set of markdown files\", attrs = { \"srcs\": attr.label_list( allow_files", "ctx.actions.declare_file(\"{}.mobi\".format(ctx.label.name)) # First provider is EbookInfo, second is DefaultInfo. (ebook_info,", "= { \"srcs\": attr.label_list( allow_files = [\".dot\"], doc = \"The", "in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ dep[EbookInfo], DefaultInfo( files=depset([ebook_epub,", "attrs = { \"deps\": attr.label_list( doc = \"All the targets", "\"Copying {} to {}\".format(src.short_path, src_copy.short_path), outputs = [src_copy], inputs =", "= _script_cmd(script.path, dir_reference.path) # run htexepub to obtain book.epub. #", "\"deps\": attr.label_list( doc = \"The file to compile\", providers =", "equation environments for: {}\".format(name), inputs = markdowns, outputs = [htex_file],", "asycc = ctx.executable._script figures = [] for target in ctx.attr.srcs:", "targets should be allowed\", ), \"output\": attr.output(doc=\"The generated file\"), \"_script\":", "tar cf {archive} {dir})\".format( base=outdir_tar.dirname, archive=outdir_tar.basename, dir=outdir.basename) ctx.actions.run_shell( progress_message =", "= target[EbookInfo] if not ebook_provider: continue deps += ebook_provider.figures runfiles", "= _drawtiming_png_impl, attrs = { \"srcs\": attr.label_list( allow_files = [\".t\"],", "gladtex on the resulting htex to obtain html and output", "{}\".format(src.short_path), outputs = [src_copy], inputs = [src], command=\"cp {} {}\".format(src.path,", "{htex_file} \\ \"\"\".format( script=script_cmd, outdir=_strip_reference_dir(dir_reference, outdir.path), htex_file=_strip_reference_dir(dir_reference, htex_file.path), ) )", "to invoke # dir_reference: (string) The path to a file", "allow_files = [\".asy\"], doc = \"The file to compile\", ),", "the # directory where the build happens! This is needed", "dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ dep[EbookInfo], DefaultInfo(", "ctx.actions.run_shell( progress_message = \"ASY to PNG: {0}\".format(in_file.short_path), inputs = [in_file],", "EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] def _neato_png_impl(ctx): return _generalized_graphviz_rule_impl(ctx, \"neato\")", "provider = target[EbookInfo] figures += (provider.figures or []) markdowns +=", "allow_files = [\".t\"], doc = \"The file to compile\", ),", "directory dir_reference, not the # directory where the build happens!", "doc = \"All the targets you need to make this", "doc = \"The file to compile\", ), \"deps\": attr.label_list( doc", "-s --gladtex -o {target} {sources} \\ \"\"\".format( script=script_cmd, target=htex_file.path, sources=\"", "title_yaml = ctx.attr.title_yaml.files.to_list()[0] title_yaml = _copy_file_to_workdir_renamed(ctx, epub_metadata) ebook_epub = ctx.actions.declare_file(\"{}.epub\".format(name))", "_ebook_pdf_impl. # steps # run htex on all *md, gives", "run htex on all *md, gives book.htex markdowns = []", "= ctx.actions.declare_file(\"{}.epub\".format(name)) inputs = [epub_metadata, title_yaml, html_file, outdir, outdir_tar] +", "epub_metadata) title_yaml = ctx.attr.title_yaml.files.to_list()[0] title_yaml = _copy_file_to_workdir(ctx, title_yaml) ebook_pdf =", "--mathml -o {ebook_pdf} {markdowns} \\ \"\"\".format( script=script_cmd, epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path), ebook_pdf=_strip_reference_dir(dir_reference,", "attr.label( allow_files = True, doc = \"The title.yaml file to", "[htex_file], tools = [script], command = \"\"\"\\ {script} \\ pandoc", "\"\"\".format( out_file=out_file.path, in_file=in_file.path, script=script_cmd), ) deps = [] for target", "= inputs, tools = [script], outputs = [ebook_epub], command =", "{}\".format(src.short_path, src_copy.short_path), outputs = [src_copy], inputs = [src], command=\"cp {}", "[ dep[EbookInfo], DefaultInfo( files=depset([ebook_epub, outdir, outdir_tar]), runfiles=runfiles, ) ] ebook_epub", ") def _ebook_kindle_impl(ctx): mobi_file = ctx.actions.declare_file(\"{}.mobi\".format(ctx.label.name)) # First provider is", "{base} ; tar xvf {archive}) > {output}\".format( base=equation_outdir_tar.dirname, archive=equation_outdir_tar.basename, output=captured_output.path)", "ctx.executable._script script_cmd = _script_cmd(script.path, markdowns_paths[0]) ctx.actions.run_shell( progress_message = \"Building equation", "= \"Copying {} to {}\".format(src.short_path, src_copy.short_path), outputs = [src_copy], inputs", "runfiles=runfiles), ] asymptote = rule(implementation = _asymptote_impl, attrs = {", "= rule(implementation = _drawtiming_png_impl, attrs = { \"srcs\": attr.label_list( allow_files", "runfiles = ctx.runfiles(files=[mobi_file]) for dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)", "#CONTAINER = \"ebook-buildenv:local\" EbookInfo = provider(fields=[\"figures\", \"markdowns\"]) # Returns the", "such file outputs = default_info.files.to_list() epub_file = outputs[0] equation_outdir =", "*md, gives book.htex markdowns = [] figures = [] for", "dot_png = rule(implementation = _dot_png_impl, attrs = { \"srcs\": attr.label_list(", "command=\"cp {} {}\".format(src.path, src_copy.path), ) return src_copy def _copy_file_to_workdir(ctx, src):", "+= (provider.markdowns or []) runfiles = ctx.runfiles(files=figures+markdowns) for dep in", "return _generalized_graphviz_rule_impl(ctx, \"neato\") neato_png = rule(implementation = _neato_png_impl, attrs =", "continue deps += ebook_provider.figures runfiles = ctx.runfiles(files=figures+deps) for dep in", "src_copy = ctx.actions.declare_file(\"{}_{}\".format(ctx.label.name, src.short_path)) ctx.actions.run_shell( progress_message = \"Copying {} to", "outdir_tar]), runfiles=runfiles, ) ] ebook_epub = rule( implementation = _ebook_epub_impl,", "[\".asy\"], doc = \"The file to compile\", ), \"deps\": attr.label_list(", "--cd-to-dir-reference \\ pandoc --epub-metadata={epub_metadata} \\ -f html -t epub3 -o", ") ] ebook_pdf = rule( implementation = _ebook_pdf_impl, attrs =", "\"Building MOBI for: {}\".format(name), inputs = [epub_file, equation_outdir], tools =", "title.yaml file to use for this book\", ), \"metadata_xml\": attr.label(", "if not ebook_provider: continue deps += ebook_provider.figures runfiles = ctx.runfiles(files", "doc = \"Generate an ebook in PDF format\" ) def", "[\".md\"], doc = \"The markdown source files\", ), \"deps\": attr.label_list(", "= \"Generate an ebook in PDF format\" ) def _ebook_kindle_impl(ctx):", "to {}\".format(src.short_path, src_copy.short_path), outputs = [src_copy], inputs = [src], command=\"cp", "= ctx.runfiles(files=figures+markdowns) for dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return", "for this book\", ), \"metadata_xml\": attr.label( allow_files = True, doc", "[out_file], tools = [asycc], command = \"\"\"\\ {script} \\ asy", "[] for target in ctx.attr.srcs: for src in target.files.to_list(): markdowns", "can not control # figure inclusion. markdowns_paths = _strip_reference_dir_from_files(dir_reference, markdowns)", "attr.label( default=\"//build:docker_run\", executable=True, cfg=\"host\"), }, doc = \"Transform a timing", "in ctx.attr.deps: provider = dep[EbookInfo] markdowns += provider.markdowns figures +=", "EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] drawtiming_png = rule(implementation = _drawtiming_png_impl,", "ctx.executable._script name = ctx.label.name script_cmd = _script_cmd(script.path, epub_file.path) ctx.actions.run_shell( progress_message", "+= ebook_provider.figures runfiles = ctx.runfiles(files=figures+deps) for dep in ctx.attr.deps: runfiles", "+= [out_file] script_cmd = _script_cmd(asycc.path, in_file.path) ctx.actions.run_shell( progress_message = \"ASY", "an asymptote file into png\", ) def _copy_file_to_workdir_renamed(ctx, src): src_copy", "script=script_cmd, target=htex_file.path, sources=\" \".join(markdowns_paths)) ) # run gladtex on the", "+ markdowns + figures ctx.actions.run_shell( progress_message = \"Building PDF for:", "file into png using neato\", ) def _dot_png_impl(ctx): return _generalized_graphviz_rule_impl(ctx,", "= \"(cd {base} ; tar cf {archive} {dir})\".format( base=outdir_tar.dirname, archive=outdir_tar.basename,", "src): src_copy = ctx.actions.declare_file(src.basename) ctx.actions.run_shell( progress_message = \"Copying {}\".format(src.short_path), outputs", "ebook-convert {epub_file} {mobi_file} \\ \"\"\".format( script=script_cmd, epub_file=_strip_reference_dir(dir_reference, epub_file.path), mobi_file=_strip_reference_dir(dir_reference, mobi_file.path),", "\"{in_file}\" \"\"\".format( cmd=cmd, out_file=out_file.path, in_file=in_file.path, script=script_cmd), ) deps = []", "= \"Generate an ebook in EPUB format\" ) def _strip_reference_dir(reference_dir,", "markdown source files\", ), \"deps\": attr.label_list( doc = \"The file", "markdowns_paths = _strip_reference_dir_from_files(dir_reference, markdowns) script = ctx.executable._script script_cmd = _script_cmd(script.path,", "ebook in EPUB format\" ) def _strip_reference_dir(reference_dir, path): return path.replace(reference_dir.dirname+\"/\",", "for src in target.files.to_list(): in_file = src out_file = ctx.actions.declare_file(in_file.basename", "files=depset(figures+markdowns), runfiles=runfiles, ), ] markdown_lib = rule( implementation = _markdown_lib_impl,", "Please see the LICENSE # file at the root of", "tools = [script], command = \"\"\"\\ {script} --cd-to-dir-reference \\ gladtex", "for: {}\".format(name), inputs = [epub_file, equation_outdir], tools = [script], outputs", "ebook_epub = ctx.actions.declare_file(\"{}.epub\".format(name)) inputs = [epub_metadata, title_yaml, html_file, outdir, outdir_tar]", "ebook_pdf = ctx.actions.declare_file(\"{}.pdf\".format(name)) inputs = [epub_metadata, title_yaml] + markdowns +", "png\", ) def _copy_file_to_workdir_renamed(ctx, src): src_copy = ctx.actions.declare_file(\"{}_{}\".format(ctx.label.name, src.short_path)) ctx.actions.run_shell(", "tools = [docker_run], command = \"\"\"\\ {script} \\ {cmd} -Tpng", "= \"\"\"\\ {script} \\ {cmd} --output \"{out_file}\" \"{in_file}\" \"\"\".format( cmd=cmd,", "is not needed. tar_command = \"(cd {base} ; tar xvf", "png -o \"{out_file}\" \"{in_file}\" \"\"\".format( out_file=out_file.path, in_file=in_file.path, script=script_cmd), ) deps", "ctx.executable._script script_cmd = _script_cmd(script.path, dir_reference.path) # run htexepub to obtain", "= ctx.runfiles(files=[ebook_pdf]) for dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return", "= ctx.attr.title_yaml.files.to_list()[0] title_yaml = _copy_file_to_workdir_renamed(ctx, epub_metadata) ebook_epub = ctx.actions.declare_file(\"{}.epub\".format(name)) inputs", "[asycc], command = \"\"\"\\ {script} \\ asy -render 5 -f", "be fun! epub_metadata = ctx.attr.metadata_xml.files.to_list()[0] epub_metadata = _copy_file_to_workdir_renamed(ctx, epub_metadata) title_yaml", "-o {ebook_epub} {html_file} \\ \"\"\".format( script=script_cmd, epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path), ebook_epub=_strip_reference_dir(dir_reference, ebook_epub.path),", "= { \"srcs\": attr.label_list( allow_files = [\".md\"], doc = \"The", "\".png\") figures += [out_file] script_cmd = _script_cmd(docker_run.path, in_file.path) ctx.actions.run_shell( progress_message", "\\ pandoc -s --gladtex -o {target} {sources} \\ \"\"\".format( script=script_cmd,", "is DefaultInfo. (ebook_info, default_info) = _ebook_epub_impl(ctx) # There can be", "runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ]", "src out_file = ctx.actions.declare_file(in_file.basename + \".png\") figures += [out_file] script_cmd", "diagram to PNG with {1}: {0}\".format(in_file.short_path, cmd), inputs = [in_file],", "= _ebook_epub_impl, attrs = { \"deps\": attr.label_list( doc = \"All", "to make this book work.\", providers = [EbookInfo], ), \"title_yaml\":", "target[EbookInfo] if not ebook_provider: continue deps += ebook_provider.figures runfiles =", "ctx.actions.declare_directory(\"{}.eqn\".format(name)) html_file = ctx.actions.declare_file(\"{}.html\".format(name)) ctx.actions.run_shell( progress_message = \"Extracting equations for:", "_markdown_lib_impl(ctx): markdowns = [] for target in ctx.attr.srcs: for src", "the container CONTAINER = \"filipfilmar/ebook-buildenv:1.1\" # Use this for quick", "doc = \"The dependencies, any targets should be allowed\", ),", "title_yaml) ebook_pdf = ctx.actions.declare_file(\"{}.pdf\".format(name)) inputs = [epub_metadata, title_yaml] + markdowns", "been licensed under Apache 2.0 license. Please see the LICENSE", "attr.label_list( allow_files = [\".t\"], doc = \"The file to compile\",", "= _copy_file_to_workdir(ctx, title_yaml) ebook_pdf = ctx.actions.declare_file(\"{}.pdf\".format(name)) inputs = [epub_metadata, title_yaml]", "rule(implementation = _asymptote_impl, attrs = { \"srcs\": attr.label_list( allow_files =", "not the # directory where the build happens! This is", "and its reference directory. # # Params: # script_path: (string)", "tar_command, ) dir_reference = epub_file script = ctx.executable._script name =", "markdowns=\" \".join(markdowns_paths), )) runfiles = ctx.runfiles(files=[ebook_pdf]) for dep in ctx.attr.deps:", "PNG: {0}\".format(in_file.short_path), inputs = [in_file], outputs = [out_file], tools =", "directory where the build happens! This is needed because we", "= ctx.label.name script_cmd = _script_cmd(script.path, epub_file.path) ctx.actions.run_shell( progress_message = \"Building", "+= (provider.figures or []) markdowns += (provider.markdowns or []) runfiles", "target=htex_file.path, sources=\" \".join(markdowns_paths)) ) # run gladtex on the resulting", "dir_reference = markdowns[0] htex_file = ctx.actions.declare_file(\"{}.htex\".format(name)) markdowns_paths = [file.path for", "need to make this book work.\", providers = [EbookInfo], ),", "\"The markdown source files\", ), \"deps\": attr.label_list( doc = \"The", "= [out_file], tools = [docker_run], command = \"\"\"\\ {script} \\", "}, doc = \"Generate an ebook in the Kindle's MOBI", "= True, doc = \"The title.yaml file to use for", "epub-metadata.xml file to use for this book\", ), \"_script\": attr.label(", "True, doc = \"The title.yaml file to use for this", "+= provider.figures dir_reference = markdowns[0] htex_file = ctx.actions.declare_file(\"{}.htex\".format(name)) markdowns_paths =", "attrs = { \"srcs\": attr.label_list( allow_files = [\".asy\"], doc =", "_generalized_graphviz_rule_impl(ctx, \"dot\") dot_png = rule(implementation = _dot_png_impl, attrs = {", "DefaultInfo( files=depset([mobi_file, captured_output]), runfiles=runfiles, ) ] ebook_kindle = rule( implementation", "inputs = [outdir], outputs = [outdir_tar], command = tar_command, )", "outputs[0] equation_outdir = outputs[1] equation_outdir_tar = outputs[2] captured_output = ctx.actions.declare_file(", "outdir, outdir_tar] + markdowns + figures ctx.actions.run_shell( progress_message = \"Building", "= \"Copying {}\".format(src.short_path), outputs = [src_copy], inputs = [src], command=\"cp", "EPUB format\" ) def _strip_reference_dir(reference_dir, path): return path.replace(reference_dir.dirname+\"/\", \"\") def", "base=equation_outdir_tar.dirname, archive=equation_outdir_tar.basename, output=captured_output.path) ctx.actions.run_shell( progress_message = \"Unarchiving equations: {}\".format(equation_outdir_tar.short_path), inputs", "rule( implementation = _ebook_pdf_impl, attrs = { \"deps\": attr.label_list( doc", "ctx.label.name # steps # run htex on all *md, gives", "= \"Building equation environments for: {}\".format(name), inputs = markdowns, outputs", "= [] figures = [] for dep in ctx.attr.deps: provider", "ctx.runfiles(files = figures) return [ EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ]", "= ctx.actions.declare_file(\"{}.pdf\".format(name)) inputs = [epub_metadata, title_yaml] + markdowns + figures", "in ctx.attr.srcs: for src in target.files.to_list(): markdowns += [_copy_file_to_workdir(ctx, src)]", "up paths -- relative to the directory dir_reference, not the", "equation_outdir_tar = outputs[2] captured_output = ctx.actions.declare_file( \"{}.untar-out\".format(ctx.label.name)) # untar the", "-o {ebook_pdf} {markdowns} \\ \"\"\".format( script=script_cmd, epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path), ebook_pdf=_strip_reference_dir(dir_reference, ebook_pdf.path),", "\\ -f html -t epub3 -o {ebook_epub} {html_file} \\ \"\"\".format(", "= \"\"\"\\ {script} \\ {cmd} -Tpng -o \"{out_file}\" \"{in_file}\" \"\"\".format(", "script=script_cmd, epub_file=_strip_reference_dir(dir_reference, epub_file.path), mobi_file=_strip_reference_dir(dir_reference, mobi_file.path), )) runfiles = ctx.runfiles(files=[mobi_file]) for", "graphviz dot file into png using dot\", ) def _asymptote_impl(ctx):", "# Maybe this is not needed. tar_command = \"(cd {base}", "_strip_reference_dir(reference_dir, file.path) for file in files] def _ebook_pdf_impl(ctx): name =", "path to a file used for figuring out # the", "cfg=\"host\"), }, doc = \"Generate an ebook in EPUB format\"", "based on the # script path and its reference directory.", "\"timing diagram to PNG with {1}: {0}\".format(in_file.short_path, cmd), inputs =", "html_file=_strip_reference_dir(dir_reference, html_file.path), )) runfiles = ctx.runfiles(files=[ebook_epub]) for dep in ctx.attr.deps:", "{ \"deps\": attr.label_list( doc = \"All the targets you need", "{ebook_pdf} {markdowns} \\ \"\"\".format( script=script_cmd, epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path), ebook_pdf=_strip_reference_dir(dir_reference, ebook_pdf.path), markdowns=\"", "= rule( implementation = _ebook_epub_impl, attrs = { \"deps\": attr.label_list(", "DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] drawtiming_png = rule(implementation = _drawtiming_png_impl, attrs =", "cfg=\"host\"), }, doc = \"Generate an ebook in the Kindle's", "def _drawtiming_png_impl(ctx): cmd = \"drawtiming\" docker_run = ctx.executable._script figures =", "This is the container CONTAINER = \"filipfilmar/ebook-buildenv:1.1\" # Use this", "= dep[EbookInfo] markdowns += provider.markdowns figures += provider.figures dir_reference =", "return src_copy def _copy_file_to_workdir(ctx, src): src_copy = ctx.actions.declare_file(src.basename) ctx.actions.run_shell( progress_message", "progress_message = \"Building EPUB for: {}\".format(name), inputs = inputs, tools", "runfiles = ctx.runfiles(files=figures+deps) for dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)", "dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ EbookInfo(figures=figures, markdowns=markdowns),", "resulting htex to obtain html and output directory with figures.", "-f html -t epub3 -o {ebook_epub} {html_file} \\ \"\"\".format( script=script_cmd,", "attr.label( default=\"//build:docker_run\", executable=True, cfg=\"host\"), }, doc = \"Transform an asymptote", "progress_message = \"Copying {} to {}\".format(src.short_path, src_copy.short_path), outputs = [src_copy],", "captured_output]), runfiles=runfiles, ) ] ebook_kindle = rule( implementation = _ebook_kindle_impl,", "[] for target in ctx.attr.deps: ebook_provider = target[EbookInfo] if not", "untar the equation dir # Maybe this is not needed.", "}, doc = \"Transform an asymptote file into png\", )", "= ctx.actions.declare_file(\"{}.html\".format(name)) ctx.actions.run_shell( progress_message = \"Extracting equations for: {}\".format(name), inputs", "{script} \\ pandoc -s --gladtex -o {target} {sources} \\ \"\"\".format(", "tools = [script], outputs = [mobi_file], command = \"\"\"\\ {script}", "repository. # Build rules for building ebooks. # This is", "# run gladtex on the resulting htex to obtain html", "be only one such file outputs = default_info.files.to_list() epub_file =", "gonna be fun! epub_metadata = ctx.attr.metadata_xml.files.to_list()[0] epub_metadata = _copy_file_to_workdir_renamed(ctx, epub_metadata)", "), }, ) def _ebook_epub_impl(ctx): name = ctx.label.name # This", "run gladtex on the resulting htex to obtain html and", "Returns the docker_run script invocation command based on the #", "progress_message = \"Unarchiving equations: {}\".format(equation_outdir_tar.short_path), inputs = [equation_outdir_tar], outputs =", "invocation command based on the # script path and its", "_script_cmd(script.path, epub_file.path) ctx.actions.run_shell( progress_message = \"Building MOBI for: {}\".format(name), inputs", "] ebook_epub = rule( implementation = _ebook_epub_impl, attrs = {", "# This is duplicated in _ebook_pdf_impl. # steps # run", "duplicated in _ebook_pdf_impl. # steps # run htex on all", "First provider is EbookInfo, second is DefaultInfo. (ebook_info, default_info) =", "{} {}\".format(src.path, src_copy.path), ) return src_copy def _markdown_lib_impl(ctx): markdowns =", "= ctx.runfiles(files=[ebook_epub]) for dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return", "for: {}\".format(name), inputs = [htex_file], outputs = [outdir, html_file], tools", "= ctx.executable._script script_cmd = _script_cmd(script.path, dir_reference.path) # run htexepub to", "markdowns=markdowns), DefaultInfo( files=depset(figures+markdowns), runfiles=runfiles, ), ] markdown_lib = rule( implementation", "in EPUB format\" ) def _strip_reference_dir(reference_dir, path): return path.replace(reference_dir.dirname+\"/\", \"\")", "file into png\", ) def _copy_file_to_workdir_renamed(ctx, src): src_copy = ctx.actions.declare_file(\"{}_{}\".format(ctx.label.name,", "is the container CONTAINER = \"filipfilmar/ebook-buildenv:1.1\" # Use this for", "epub_file = outputs[0] equation_outdir = outputs[1] equation_outdir_tar = outputs[2] captured_output", "= rule( implementation = _markdown_lib_impl, doc = \"Declares a set", "ctx.actions.run_shell( progress_message = \"Building equation environments for: {}\".format(name), inputs =", "\"dot\") dot_png = rule(implementation = _dot_png_impl, attrs = { \"srcs\":", "html_file, outdir, outdir_tar] + markdowns + figures ctx.actions.run_shell( progress_message =", "200 -d {outdir} {htex_file} \\ \"\"\".format( script=script_cmd, outdir=_strip_reference_dir(dir_reference, outdir.path), htex_file=_strip_reference_dir(dir_reference,", "\"Declares a set of markdown files\", attrs = { \"srcs\":", "{}\".format(src.path, src_copy.path), ) return src_copy def _markdown_lib_impl(ctx): markdowns = []", "= _strip_reference_dir_from_files(dir_reference, markdowns) script = ctx.executable._script script_cmd = _script_cmd(script.path, markdowns_paths[0])", "def _markdown_lib_impl(ctx): markdowns = [] for target in ctx.attr.srcs: for", "[ebook_epub], command = \"\"\"\\ {script} --cd-to-dir-reference \\ pandoc --epub-metadata={epub_metadata} \\", "# Returns the docker_run script invocation command based on the", "progress_message = \"ASY to PNG: {0}\".format(in_file.short_path), inputs = [in_file], outputs", "ebook_epub.path), html_file=_strip_reference_dir(dir_reference, html_file.path), )) runfiles = ctx.runfiles(files=[ebook_epub]) for dep in", "{script} \\ {cmd} --output \"{out_file}\" \"{in_file}\" \"\"\".format( cmd=cmd, out_file=out_file.path, in_file=in_file.path,", "def _ebook_epub_impl(ctx): name = ctx.label.name # This is duplicated in", "to PNG with {1}: {0}\".format(in_file.short_path, cmd), inputs = [in_file], outputs", "format\" ) def _strip_reference_dir(reference_dir, path): return path.replace(reference_dir.dirname+\"/\", \"\") def _strip_reference_dir_from_files(reference_dir,", "[script], command = \"\"\"\\ {script} \\ pandoc -s --gladtex -o", "or []) runfiles = ctx.runfiles(files=figures+markdowns) for dep in ctx.attr.deps: runfiles", "doc = \"Transform a graphviz dot file into png using", "(ebook_info, default_info) = _ebook_epub_impl(ctx) # There can be only one", "path to the script to invoke # dir_reference: (string) The", "doc = \"The epub-metadata.xml file to use for this book\",", "= figures) return [ EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] def", ") def _copy_file_to_workdir_renamed(ctx, src): src_copy = ctx.actions.declare_file(\"{}_{}\".format(ctx.label.name, src.short_path)) ctx.actions.run_shell( progress_message", "outputs = [src_copy], inputs = [src], command=\"cp {} {}\".format(src.path, src_copy.path),", "_drawtiming_png_impl, attrs = { \"srcs\": attr.label_list( allow_files = [\".t\"], doc", "ctx.actions.declare_file(\"{}.html\".format(name)) ctx.actions.run_shell( progress_message = \"Extracting equations for: {}\".format(name), inputs =", "ebook_provider: continue deps += ebook_provider.figures runfiles = ctx.runfiles(files=figures+deps) for dep", "= _copy_file_to_workdir_renamed(ctx, epub_metadata) ebook_epub = ctx.actions.declare_file(\"{}.epub\".format(name)) inputs = [epub_metadata, title_yaml,", "runfiles = ctx.runfiles(files=figures+markdowns) for dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)", "book\", ), \"_script\": attr.label( default=\"//build:docker_run\", executable=True, cfg=\"host\"), }, doc =", "= \"Declares a set of markdown files\", attrs = {", "progress_message = \"Building MOBI for: {}\".format(name), inputs = [epub_file, equation_outdir],", "under Apache 2.0 license. Please see the LICENSE # file", "}, doc = \"Generate an ebook in EPUB format\" )", "dir_reference.path) # run htexepub to obtain book.epub. # This is", "outputs = [mobi_file], command = \"\"\"\\ {script} --cd-to-dir-reference \\ ebook-convert", "outdir_tar] + markdowns + figures ctx.actions.run_shell( progress_message = \"Building EPUB", "implementation = _markdown_lib_impl, doc = \"Declares a set of markdown", "script = ctx.executable._script script_cmd = _script_cmd(script.path, dir_reference.path) # run htexepub", "file.path) for file in files] def _ebook_pdf_impl(ctx): name = ctx.label.name", "name = ctx.label.name # steps # run htex on all", "\"\"\".format( script=script_cmd, target=htex_file.path, sources=\" \".join(markdowns_paths)) ) # run gladtex on", "[htex_file], outputs = [outdir, html_file], tools = [script], command =", "= \"Building MOBI for: {}\".format(name), inputs = [epub_file, equation_outdir], tools", "captured_output = ctx.actions.declare_file( \"{}.untar-out\".format(ctx.label.name)) # untar the equation dir #", "inclusion. markdowns_paths = _strip_reference_dir_from_files(dir_reference, markdowns) script = ctx.executable._script script_cmd =", "outputs = [out_file], tools = [docker_run], command = \"\"\"\\ {script}", "# figure inclusion. markdowns_paths = _strip_reference_dir_from_files(dir_reference, markdowns) script = ctx.executable._script", "markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] def _neato_png_impl(ctx): return _generalized_graphviz_rule_impl(ctx, \"neato\") neato_png", "= _script_cmd(asycc.path, in_file.path) ctx.actions.run_shell( progress_message = \"ASY to PNG: {0}\".format(in_file.short_path),", "= [asycc], command = \"\"\"\\ {script} \\ asy -render 5", "_ebook_kindle_impl, attrs = { \"deps\": attr.label_list( doc = \"All the", "] drawtiming_png = rule(implementation = _drawtiming_png_impl, attrs = { \"srcs\":", "{1}: {0}\".format(in_file.short_path, cmd), inputs = [in_file], outputs = [out_file], tools", "EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] asymptote = rule(implementation = _asymptote_impl,", "steps # run htex on all *md, gives book.htex markdowns", "\\ pandoc --epub-metadata={epub_metadata} \\ -f html -t epub3 -o {ebook_epub}", "compile\", providers = [EbookInfo], ), }, ) def _ebook_epub_impl(ctx): name", "{archive}) > {output}\".format( base=equation_outdir_tar.dirname, archive=equation_outdir_tar.basename, output=captured_output.path) ctx.actions.run_shell( progress_message = \"Unarchiving", "def _copy_file_to_workdir(ctx, src): src_copy = ctx.actions.declare_file(src.basename) ctx.actions.run_shell( progress_message = \"Copying", "return \"\"\"\\ {script} \\ --container={container} \\ --dir-reference={dir_reference}\"\"\".format( script=script_path, container=CONTAINER, dir_reference=dir_reference,", "[\".dot\"], doc = \"The file to compile\", ), \"deps\": attr.label_list(", "\"\"\"\\ {script} \\ pandoc -s --gladtex -o {target} {sources} \\", "LICENSE # file at the root of the repository. #", "inputs = [epub_file, equation_outdir], tools = [script], outputs = [mobi_file],", "sources=\" \".join(markdowns_paths)) ) # run gladtex on the resulting htex", "= ctx.executable._script figures = [] for target in ctx.attr.srcs: for", "we can not control # figure inclusion. markdowns_paths = _strip_reference_dir_from_files(dir_reference,", "# steps # run htex on all *md, gives book.htex", "all *md, gives book.htex markdowns = [] figures = []", "\"The title.yaml file to use for this book\", ), \"metadata_xml\":", "command = \"\"\"\\ {script} --cd-to-dir-reference \\ gladtex -r 200 -d", "inputs = [equation_outdir_tar], outputs = [captured_output], command = tar_command, )", "[out_file] script_cmd = _script_cmd(docker_run.path, in_file.path) ctx.actions.run_shell( progress_message = \"graphviz to", "markdowns, outputs = [htex_file], tools = [script], command = \"\"\"\\", "= ctx.actions.declare_file(\"{}.htex\".format(name)) markdowns_paths = [file.path for file in markdowns] markdowns_paths_stripped", "[src], command=\"cp {} {}\".format(src.path, src_copy.path), ) return src_copy def _markdown_lib_impl(ctx):", "cfg=\"host\"), }, doc = \"Transform a graphviz dot file into", "tools = [asycc], command = \"\"\"\\ {script} \\ asy -render", "mobi_file=_strip_reference_dir(dir_reference, mobi_file.path), )) runfiles = ctx.runfiles(files=[mobi_file]) for dep in ctx.attr.deps:", "\"\"\"\\ {script} \\ --container={container} \\ --dir-reference={dir_reference}\"\"\".format( script=script_path, container=CONTAINER, dir_reference=dir_reference, )", "diagram file into png using drawtiming\", ) def _generalized_graphviz_rule_impl(ctx, cmd):", "), \"output\": attr.output(doc=\"The generated file\"), \"_script\": attr.label( default=\"//build:docker_run\", executable=True, cfg=\"host\"),", "the build happens! This is needed because we can not", "= _ebook_pdf_impl, attrs = { \"deps\": attr.label_list( doc = \"All", "def _strip_reference_dir(reference_dir, path): return path.replace(reference_dir.dirname+\"/\", \"\") def _strip_reference_dir_from_files(reference_dir, files): return", "to compile\", providers = [EbookInfo], ), }, ) def _ebook_epub_impl(ctx):", "is gonna be fun! epub_metadata = ctx.attr.metadata_xml.files.to_list()[0] epub_metadata = _copy_file_to_workdir_renamed(ctx,", "invoke # dir_reference: (string) The path to a file used", "ctx.actions.run_shell( progress_message = \"Unarchiving equations: {}\".format(equation_outdir_tar.short_path), inputs = [equation_outdir_tar], outputs", "in_file=in_file.path, script=script_cmd), ) deps = [] for target in ctx.attr.deps:", "\"The file to compile\", ), \"deps\": attr.label_list( doc = \"The", "figures = [] for target in ctx.attr.srcs: for src in", "default=\"//build:docker_run\", executable=True, cfg=\"host\"), }, doc = \"Transform a graphviz dot", "doc = \"Declares a set of markdown files\", attrs =", "inputs = [src], command=\"cp {} {}\".format(src.path, src_copy.path), ) return src_copy", "Use this for quick local runs. #CONTAINER = \"ebook-buildenv:local\" EbookInfo", "# run htex on all *md, gives book.htex markdowns =", "[ _strip_reference_dir(reference_dir, file.path) for file in files] def _ebook_pdf_impl(ctx): name", "= [htex_file], tools = [script], command = \"\"\"\\ {script} \\", "(C) 2020 Google Inc. # # This file has been", "(string) The full path to the script to invoke #", "= ctx.attr.metadata_xml.files.to_list()[0] epub_metadata = _copy_file_to_workdir(ctx, epub_metadata) title_yaml = ctx.attr.title_yaml.files.to_list()[0] title_yaml", "--cd-to-dir-reference \\ ebook-convert {epub_file} {mobi_file} \\ \"\"\".format( script=script_cmd, epub_file=_strip_reference_dir(dir_reference, epub_file.path),", "[docker_run], command = \"\"\"\\ {script} \\ {cmd} --output \"{out_file}\" \"{in_file}\"", "tar_command, ) # run htexepub to obtain book.epub. # This", "markdown_lib = rule( implementation = _markdown_lib_impl, doc = \"Declares a", "mobi_file.path), )) runfiles = ctx.runfiles(files=[mobi_file]) for dep in ctx.attr.deps: runfiles", "ctx.actions.run_shell( progress_message = \"Archiving equations: {}\".format(outdir_tar.short_path), inputs = [outdir], outputs", "tools = [script], outputs = [ebook_pdf], command = \"\"\"\\ {script}", "# # This file has been licensed under Apache 2.0", ") return src_copy def _copy_file_to_workdir(ctx, src): src_copy = ctx.actions.declare_file(src.basename) ctx.actions.run_shell(", "[ DefaultInfo( files=depset([ebook_pdf]), runfiles=runfiles, ) ] ebook_pdf = rule( implementation", "= [htex_file], outputs = [outdir, html_file], tools = [script], command", "file to use for this book\", ), \"_script\": attr.label( default=\"//build:docker_run\",", "path and its reference directory. # # Params: # script_path:", "PNG with {1}: {0}\".format(in_file.short_path, cmd), inputs = [in_file], outputs =", "command = \"\"\"\\ {script} \\ pandoc -s --gladtex -o {target}", "# directory where the build happens! This is needed because", "\\ \"\"\".format( script=script_cmd, epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path), ebook_pdf=_strip_reference_dir(dir_reference, ebook_pdf.path), markdowns=\" \".join(markdowns_paths), ))", "[EbookInfo], ), }, ) def _ebook_epub_impl(ctx): name = ctx.label.name #", "ctx.actions.declare_file(\"{}.pdf\".format(name)) inputs = [epub_metadata, title_yaml] + markdowns + figures ctx.actions.run_shell(", "# Params: # script_path: (string) The full path to the", "{0}\".format(in_file.short_path), inputs = [in_file], outputs = [out_file], tools = [asycc],", "make this book work.\", providers = [EbookInfo], ), \"title_yaml\": attr.label(", "outputs = default_info.files.to_list() epub_file = outputs[0] equation_outdir = outputs[1] equation_outdir_tar", "= default_info.files.to_list() epub_file = outputs[0] equation_outdir = outputs[1] equation_outdir_tar =", "targets you need to make this book work.\", providers =", "fun! epub_metadata = ctx.attr.metadata_xml.files.to_list()[0] epub_metadata = _copy_file_to_workdir(ctx, epub_metadata) title_yaml =", "out_file=out_file.path, in_file=in_file.path, script=script_cmd), ) deps = [] for target in", "pandoc -s --gladtex -o {target} {sources} \\ \"\"\".format( script=script_cmd, target=htex_file.path,", "= [\".dot\"], doc = \"The file to compile\", ), \"deps\":", "gives book.htex markdowns = [] figures = [] for dep", "[ DefaultInfo( files=depset([mobi_file, captured_output]), runfiles=runfiles, ) ] ebook_kindle = rule(", "= [outdir], outputs = [outdir_tar], command = tar_command, ) #", "tar_command = \"(cd {base} ; tar cf {archive} {dir})\".format( base=outdir_tar.dirname,", "\\ gladtex -r 200 -d {outdir} {htex_file} \\ \"\"\".format( script=script_cmd,", "[script], command = \"\"\"\\ {script} --cd-to-dir-reference \\ gladtex -r 200", "\"srcs\": attr.label_list( allow_files = [\".t\"], doc = \"The file to", "{}\".format(name), inputs = [epub_file, equation_outdir], tools = [script], outputs =", "\"Transform an asymptote file into png\", ) def _copy_file_to_workdir_renamed(ctx, src):", "\"Building PDF for: {}\".format(name), inputs = inputs, tools = [script],", "epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path), ebook_pdf=_strip_reference_dir(dir_reference, ebook_pdf.path), markdowns=\" \".join(markdowns_paths), )) runfiles = ctx.runfiles(files=[ebook_pdf])", "] ebook_kindle = rule( implementation = _ebook_kindle_impl, attrs = {", "_generalized_graphviz_rule_impl(ctx, cmd): docker_run = ctx.executable._script figures = [] for target", "EbookInfo = provider(fields=[\"figures\", \"markdowns\"]) # Returns the docker_run script invocation", "[file.path for file in markdowns] markdowns_paths_stripped = _strip_reference_dir_from_files(dir_reference, markdowns) script", ") def _drawtiming_png_impl(ctx): cmd = \"drawtiming\" docker_run = ctx.executable._script figures", "{}\".format(name), inputs = inputs, tools = [script], outputs = [ebook_epub],", "ctx.runfiles(files=figures+deps) for dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [", "for src in target.files.to_list(): markdowns += [_copy_file_to_workdir(ctx, src)] figures =", "\"ebook-buildenv:local\" EbookInfo = provider(fields=[\"figures\", \"markdowns\"]) # Returns the docker_run script", "[script], outputs = [ebook_pdf], command = \"\"\"\\ {script} --cd-to-dir-reference \\", "[out_file], tools = [docker_run], command = \"\"\"\\ {script} \\ {cmd}", "markdowns + figures ctx.actions.run_shell( progress_message = \"Building PDF for: {}\".format(name),", "dir=outdir.basename) ctx.actions.run_shell( progress_message = \"Archiving equations: {}\".format(outdir_tar.short_path), inputs = [outdir],", "at the root of the repository. # Build rules for", "_neato_png_impl(ctx): return _generalized_graphviz_rule_impl(ctx, \"neato\") neato_png = rule(implementation = _neato_png_impl, attrs", "# run htexepub to obtain book.epub. # This is gonna", "is duplicated in _ebook_pdf_impl. # steps # run htex on", "ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ dep[EbookInfo], DefaultInfo( files=depset([ebook_epub, outdir,", "for target in ctx.attr.srcs: for src in target.files.to_list(): markdowns +=", "_script_cmd(asycc.path, in_file.path) ctx.actions.run_shell( progress_message = \"ASY to PNG: {0}\".format(in_file.short_path), inputs", "doc = \"Transform an asymptote file into png\", ) def", "into png\", ) def _copy_file_to_workdir_renamed(ctx, src): src_copy = ctx.actions.declare_file(\"{}_{}\".format(ctx.label.name, src.short_path))", "[equation_outdir_tar], outputs = [captured_output], command = tar_command, ) dir_reference =", "-render 5 -f png -o \"{out_file}\" \"{in_file}\" \"\"\".format( out_file=out_file.path, in_file=in_file.path,", "an ebook in PDF format\" ) def _ebook_kindle_impl(ctx): mobi_file =", "runfiles.merge(dep[DefaultInfo].data_runfiles) return [ DefaultInfo( files=depset([mobi_file, captured_output]), runfiles=runfiles, ) ] ebook_kindle", "to obtain book.epub. # This is gonna be fun! epub_metadata", "), \"_script\": attr.label( default=\"//build:docker_run\", executable=True, cfg=\"host\"), }, doc = \"Generate", "[src_copy], inputs = [src], command=\"cp {} {}\".format(src.path, src_copy.path), ) return", "cmd), inputs = [in_file], outputs = [out_file], tools = [docker_run],", "this for quick local runs. #CONTAINER = \"ebook-buildenv:local\" EbookInfo =", "to use for this book\", ), \"_script\": attr.label( default=\"//build:docker_run\", executable=True,", "_copy_file_to_workdir_renamed(ctx, src): src_copy = ctx.actions.declare_file(\"{}_{}\".format(ctx.label.name, src.short_path)) ctx.actions.run_shell( progress_message = \"Copying", "equations for: {}\".format(name), inputs = [htex_file], outputs = [outdir, html_file],", "\"The dependencies, any targets should be allowed\", ), \"output\": attr.output(doc=\"The", "inputs = markdowns, outputs = [htex_file], tools = [script], command", "outputs = [outdir, html_file], tools = [script], command = \"\"\"\\", "Inc. # # This file has been licensed under Apache", "\\ {cmd} -Tpng -o \"{out_file}\" \"{in_file}\" \"\"\".format( cmd=cmd, out_file=out_file.path, in_file=in_file.path,", "{script} \\ asy -render 5 -f png -o \"{out_file}\" \"{in_file}\"", "for quick local runs. #CONTAINER = \"ebook-buildenv:local\" EbookInfo = provider(fields=[\"figures\",", "the resulting htex to obtain html and output directory with", "src_copy.path), ) return src_copy def _markdown_lib_impl(ctx): markdowns = [] for", "[script], outputs = [ebook_epub], command = \"\"\"\\ {script} --cd-to-dir-reference \\", "= [script], outputs = [mobi_file], command = \"\"\"\\ {script} --cd-to-dir-reference", "needed. tar_command = \"(cd {base} ; tar xvf {archive}) >", "[] for target in ctx.attr.srcs: for src in target.files.to_list(): in_file", "return [ dep[EbookInfo], DefaultInfo( files=depset([ebook_epub, outdir, outdir_tar]), runfiles=runfiles, ) ]", "dir_reference: (string) The path to a file used for figuring", "markdowns + figures ctx.actions.run_shell( progress_message = \"Building EPUB for: {}\".format(name),", "= ctx.executable._script name = ctx.label.name script_cmd = _script_cmd(script.path, epub_file.path) ctx.actions.run_shell(", "into png using neato\", ) def _dot_png_impl(ctx): return _generalized_graphviz_rule_impl(ctx, \"dot\")", "+= [out_file] script_cmd = _script_cmd(docker_run.path, in_file.path) ctx.actions.run_shell( progress_message = \"timing", "directories (build root and repo root). def _script_cmd(script_path, dir_reference): return", "\"\"\"\\ {script} --cd-to-dir-reference \\ pandoc --epub-metadata={epub_metadata} \\ --mathml -o {ebook_pdf}", "dot file into png using neato\", ) def _dot_png_impl(ctx): return", "ebook_pdf.path), markdowns=\" \".join(markdowns_paths), )) runfiles = ctx.runfiles(files=[ebook_pdf]) for dep in", "[epub_metadata, title_yaml, html_file, outdir, outdir_tar] + markdowns + figures ctx.actions.run_shell(", "script=script_cmd), ) deps = [] for target in ctx.attr.deps: ebook_provider", "= [\".t\"], doc = \"The file to compile\", ), \"deps\":", "implementation = _ebook_epub_impl, attrs = { \"deps\": attr.label_list( doc =", "= inputs, tools = [script], outputs = [ebook_pdf], command =", "continue deps += ebook_provider.figures runfiles = ctx.runfiles(files = figures) return", "= ctx.runfiles(files=figures+deps) for dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return", "files=depset([mobi_file, captured_output]), runfiles=runfiles, ) ] ebook_kindle = rule( implementation =", "html_file.path), )) runfiles = ctx.runfiles(files=[ebook_epub]) for dep in ctx.attr.deps: runfiles", "ctx.actions.run_shell( progress_message = \"Building MOBI for: {}\".format(name), inputs = [epub_file,", "= \"timing diagram to PNG with {1}: {0}\".format(in_file.short_path, cmd), inputs", "epub3 -o {ebook_epub} {html_file} \\ \"\"\".format( script=script_cmd, epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path), ebook_epub=_strip_reference_dir(dir_reference,", "ebook_provider.figures runfiles = ctx.runfiles(files = figures) return [ EbookInfo(figures=figures+deps, markdowns=[]),", "if not ebook_provider: continue deps += ebook_provider.figures runfiles = ctx.runfiles(files=figures+deps)", "> {output}\".format( base=equation_outdir_tar.dirname, archive=equation_outdir_tar.basename, output=captured_output.path) ctx.actions.run_shell( progress_message = \"Unarchiving equations:", "out # the reference directories (build root and repo root).", "a file used for figuring out # the reference directories", "in PDF format\" ) def _ebook_kindle_impl(ctx): mobi_file = ctx.actions.declare_file(\"{}.mobi\".format(ctx.label.name)) #", "= [epub_file, equation_outdir], tools = [script], outputs = [mobi_file], command", "_script_cmd(docker_run.path, in_file.path) ctx.actions.run_shell( progress_message = \"timing diagram to PNG with", "= ctx.actions.declare_directory(\"{}.eqn\".format(name)) html_file = ctx.actions.declare_file(\"{}.html\".format(name)) ctx.actions.run_shell( progress_message = \"Extracting equations", "\"markdowns\"]) # Returns the docker_run script invocation command based on", "src): src_copy = ctx.actions.declare_file(\"{}_{}\".format(ctx.label.name, src.short_path)) ctx.actions.run_shell( progress_message = \"Copying {}", "rules for building ebooks. # This is the container CONTAINER", "= [] for target in ctx.attr.deps: provider = target[EbookInfo] figures", "file outputs = default_info.files.to_list() epub_file = outputs[0] equation_outdir = outputs[1]", "= \"Extracting equations for: {}\".format(name), inputs = [htex_file], outputs =", "[in_file], outputs = [out_file], tools = [asycc], command = \"\"\"\\", "markdowns_paths[0]) ctx.actions.run_shell( progress_message = \"Building equation environments for: {}\".format(name), inputs", "doc = \"The title.yaml file to use for this book\",", "{outdir} {htex_file} \\ \"\"\".format( script=script_cmd, outdir=_strip_reference_dir(dir_reference, outdir.path), htex_file=_strip_reference_dir(dir_reference, htex_file.path), )", "file to compile\", providers = [EbookInfo], ), }, ) def", "\\ \"\"\".format( script=script_cmd, epub_file=_strip_reference_dir(dir_reference, epub_file.path), mobi_file=_strip_reference_dir(dir_reference, mobi_file.path), )) runfiles =", "\\ --dir-reference={dir_reference}\"\"\".format( script=script_path, container=CONTAINER, dir_reference=dir_reference, ) def _drawtiming_png_impl(ctx): cmd =", "epub_metadata.path), ebook_pdf=_strip_reference_dir(dir_reference, ebook_pdf.path), markdowns=\" \".join(markdowns_paths), )) runfiles = ctx.runfiles(files=[ebook_pdf]) for", "a timing diagram file into png using drawtiming\", ) def", "happens! This is needed because we can not control #", "script_cmd = _script_cmd(script.path, dir_reference.path) # run htexepub to obtain book.epub.", "the equation dir # Maybe this is not needed. tar_command", "\"Archiving equations: {}\".format(outdir_tar.short_path), inputs = [outdir], outputs = [outdir_tar], command", "{script} --cd-to-dir-reference \\ pandoc --epub-metadata={epub_metadata} \\ -f html -t epub3", "of the repository. # Build rules for building ebooks. #", "[] for target in ctx.attr.deps: provider = target[EbookInfo] figures +=", "is EbookInfo, second is DefaultInfo. (ebook_info, default_info) = _ebook_epub_impl(ctx) #", "{ \"srcs\": attr.label_list( allow_files = [\".md\"], doc = \"The markdown", "because we can not control # figure inclusion. markdowns_paths =", "figures += [out_file] script_cmd = _script_cmd(docker_run.path, in_file.path) ctx.actions.run_shell( progress_message =", "= [docker_run], command = \"\"\"\\ {script} \\ {cmd} --output \"{out_file}\"", "allow_files = [\".dot\"], doc = \"The file to compile\", ),", "= _script_cmd(docker_run.path, in_file.path) ctx.actions.run_shell( progress_message = \"graphviz to PNG with", "src.short_path)) ctx.actions.run_shell( progress_message = \"Copying {} to {}\".format(src.short_path, src_copy.short_path), outputs", "name = ctx.label.name # This is duplicated in _ebook_pdf_impl. #", "\"Generate an ebook in EPUB format\" ) def _strip_reference_dir(reference_dir, path):", "ctx.label.name script_cmd = _script_cmd(script.path, epub_file.path) ctx.actions.run_shell( progress_message = \"Building MOBI", "# This file has been licensed under Apache 2.0 license.", "{ \"srcs\": attr.label_list( allow_files = [\".t\"], doc = \"The file", "= runfiles.merge(dep[DefaultInfo].data_runfiles) return [ EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] asymptote", "\"\"\"\\ {script} --cd-to-dir-reference \\ gladtex -r 200 -d {outdir} {htex_file}", "file has been licensed under Apache 2.0 license. Please see", "inputs, tools = [script], outputs = [ebook_epub], command = \"\"\"\\", "= _script_cmd(script.path, markdowns_paths[0]) ctx.actions.run_shell( progress_message = \"Building equation environments for:", "outdir.path), htex_file=_strip_reference_dir(dir_reference, htex_file.path), ) ) outdir_tar = ctx.actions.declare_file(\"{}.tar\".format(outdir.basename)) tar_command =", "the reference directories (build root and repo root). def _script_cmd(script_path,", "for target in ctx.attr.deps: ebook_provider = target[EbookInfo] if not ebook_provider:", "\"The epub-metadata.xml file to use for this book\", ), \"_script\":", "for dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ EbookInfo(figures=figures,", "tools = [docker_run], command = \"\"\"\\ {script} \\ {cmd} --output", "_script_cmd(script_path, dir_reference): return \"\"\"\\ {script} \\ --container={container} \\ --dir-reference={dir_reference}\"\"\".format( script=script_path,", "command = \"\"\"\\ {script} \\ asy -render 5 -f png", "dep[EbookInfo], DefaultInfo( files=depset([ebook_epub, outdir, outdir_tar]), runfiles=runfiles, ) ] ebook_epub =", "= \"\"\"\\ {script} --cd-to-dir-reference \\ ebook-convert {epub_file} {mobi_file} \\ \"\"\".format(", "_asymptote_impl, attrs = { \"srcs\": attr.label_list( allow_files = [\".asy\"], doc", "_strip_reference_dir_from_files(reference_dir, files): return [ _strip_reference_dir(reference_dir, file.path) for file in files]", "script_cmd = _script_cmd(asycc.path, in_file.path) ctx.actions.run_shell( progress_message = \"ASY to PNG:", "= ctx.actions.declare_file(\"{}.mobi\".format(ctx.label.name)) # First provider is EbookInfo, second is DefaultInfo.", "runfiles = ctx.runfiles(files=[ebook_pdf]) for dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)", "in files] def _ebook_pdf_impl(ctx): name = ctx.label.name # steps #", "= \"\"\"\\ {script} --cd-to-dir-reference \\ pandoc --epub-metadata={epub_metadata} \\ --mathml -o", "def _generalized_graphviz_rule_impl(ctx, cmd): docker_run = ctx.executable._script figures = [] for", "\"deps\": attr.label_list( doc = \"All the targets you need to", "ebooks. # This is the container CONTAINER = \"filipfilmar/ebook-buildenv:1.1\" #", "ctx.actions.declare_file( \"{}.untar-out\".format(ctx.label.name)) # untar the equation dir # Maybe this", "the docker_run script invocation command based on the # script", "files] def _ebook_pdf_impl(ctx): name = ctx.label.name # steps # run", "\"All the targets you need to make this book work.\",", "= epub_file script = ctx.executable._script name = ctx.label.name script_cmd =", "for building ebooks. # This is the container CONTAINER =", "mobi_file = ctx.actions.declare_file(\"{}.mobi\".format(ctx.label.name)) # First provider is EbookInfo, second is", "dir # Maybe this is not needed. tar_command = \"(cd", "def _script_cmd(script_path, dir_reference): return \"\"\"\\ {script} \\ --container={container} \\ --dir-reference={dir_reference}\"\"\".format(", "should be allowed\", ), \"output\": attr.output(doc=\"The generated file\"), \"_script\": attr.label(", "htex_file.path), ) ) outdir_tar = ctx.actions.declare_file(\"{}.tar\".format(outdir.basename)) tar_command = \"(cd {base}", "\"Generate an ebook in PDF format\" ) def _ebook_kindle_impl(ctx): mobi_file", "for dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ EbookInfo(figures=figures+deps,", "{}\".format(src.path, src_copy.path), ) return src_copy def _copy_file_to_workdir(ctx, src): src_copy =", "_asymptote_impl(ctx): asycc = ctx.executable._script figures = [] for target in", "script path and its reference directory. # # Params: #", "to compile\", ), \"deps\": attr.label_list( doc = \"The dependencies, any", "= provider(fields=[\"figures\", \"markdowns\"]) # Returns the docker_run script invocation command", "provider.figures dir_reference = markdowns[0] # Fixed up paths -- relative", "\"ASY to PNG: {0}\".format(in_file.short_path), inputs = [in_file], outputs = [out_file],", "archive=outdir_tar.basename, dir=outdir.basename) ctx.actions.run_shell( progress_message = \"Archiving equations: {}\".format(outdir_tar.short_path), inputs =", "[_copy_file_to_workdir(ctx, src)] figures = [] for target in ctx.attr.deps: provider", "= \"drawtiming\" docker_run = ctx.executable._script figures = [] for target", "to obtain html and output directory with figures. outdir =", "_copy_file_to_workdir(ctx, src): src_copy = ctx.actions.declare_file(src.basename) ctx.actions.run_shell( progress_message = \"Copying {}\".format(src.short_path),", "= [docker_run], command = \"\"\"\\ {script} \\ {cmd} -Tpng -o", "an ebook in EPUB format\" ) def _strip_reference_dir(reference_dir, path): return", "this book\", ), \"metadata_xml\": attr.label( allow_files = True, doc =", "ebook_provider.figures runfiles = ctx.runfiles(files=figures+deps) for dep in ctx.attr.deps: runfiles =", "htex_file = ctx.actions.declare_file(\"{}.htex\".format(name)) markdowns_paths = [file.path for file in markdowns]", "dot file into png using dot\", ) def _asymptote_impl(ctx): asycc", "[ EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] def _neato_png_impl(ctx): return _generalized_graphviz_rule_impl(ctx,", "return [ EbookInfo(figures=figures, markdowns=markdowns), DefaultInfo( files=depset(figures+markdowns), runfiles=runfiles, ), ] markdown_lib", "files\", attrs = { \"srcs\": attr.label_list( allow_files = [\".md\"], doc", "] asymptote = rule(implementation = _asymptote_impl, attrs = { \"srcs\":", "script_cmd = _script_cmd(script.path, epub_file.path) ctx.actions.run_shell( progress_message = \"Building MOBI for:", "deps = [] for target in ctx.attr.deps: ebook_provider = target[EbookInfo]", "html_file = ctx.actions.declare_file(\"{}.html\".format(name)) ctx.actions.run_shell( progress_message = \"Extracting equations for: {}\".format(name),", "+= ebook_provider.figures runfiles = ctx.runfiles(files = figures) return [ EbookInfo(figures=figures+deps,", "for dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ DefaultInfo(", "[mobi_file], command = \"\"\"\\ {script} --cd-to-dir-reference \\ ebook-convert {epub_file} {mobi_file}", "\"{out_file}\" \"{in_file}\" \"\"\".format( out_file=out_file.path, in_file=in_file.path, script=script_cmd), ) deps = []", "outputs = [outdir_tar], command = tar_command, ) # run htexepub", "runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ EbookInfo(figures=figures, markdowns=markdowns), DefaultInfo( files=depset(figures+markdowns), runfiles=runfiles,", "ctx.actions.run_shell( progress_message = \"Copying {} to {}\".format(src.short_path, src_copy.short_path), outputs =", "This file has been licensed under Apache 2.0 license. Please", "= _script_cmd(script.path, epub_file.path) ctx.actions.run_shell( progress_message = \"Building MOBI for: {}\".format(name),", "epub_file script = ctx.executable._script name = ctx.label.name script_cmd = _script_cmd(script.path,", "_strip_reference_dir_from_files(dir_reference, markdowns) script = ctx.executable._script script_cmd = _script_cmd(script.path, dir_reference.path) #", "executable=True, cfg=\"host\"), }, doc = \"Transform a timing diagram file", "script_cmd = _script_cmd(script.path, markdowns_paths[0]) ctx.actions.run_shell( progress_message = \"Building equation environments", "= ctx.actions.declare_file(\"{}.tar\".format(outdir.basename)) tar_command = \"(cd {base} ; tar cf {archive}", "allow_files = True, doc = \"The title.yaml file to use", "doc = \"Generate an ebook in EPUB format\" ) def", "runfiles=runfiles, ) ] ebook_pdf = rule( implementation = _ebook_pdf_impl, attrs", "file used for figuring out # the reference directories (build", "doc = \"Transform a timing diagram file into png using", "return [ EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] drawtiming_png = rule(implementation", "--gladtex -o {target} {sources} \\ \"\"\".format( script=script_cmd, target=htex_file.path, sources=\" \".join(markdowns_paths))", "= \"The file to compile\", providers = [EbookInfo], ), },", "= [in_file], outputs = [out_file], tools = [asycc], command =", "for file in files] def _ebook_pdf_impl(ctx): name = ctx.label.name #", "into png using dot\", ) def _asymptote_impl(ctx): asycc = ctx.executable._script", "rule( implementation = _ebook_kindle_impl, attrs = { \"deps\": attr.label_list( doc", "\"\"\".format( cmd=cmd, out_file=out_file.path, in_file=in_file.path, script=script_cmd), ) deps = [] for", "in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ DefaultInfo( files=depset([mobi_file, captured_output]),", "\"_script\": attr.label( default=\"//build:docker_run\", executable=True, cfg=\"host\"), }, doc = \"Generate an", "_strip_reference_dir_from_files(dir_reference, markdowns) script = ctx.executable._script script_cmd = _script_cmd(script.path, markdowns_paths[0]) ctx.actions.run_shell(", "= rule(implementation = _dot_png_impl, attrs = { \"srcs\": attr.label_list( allow_files", "epub_metadata) ebook_epub = ctx.actions.declare_file(\"{}.epub\".format(name)) inputs = [epub_metadata, title_yaml, html_file, outdir,", "docker_run script invocation command based on the # script path", "= tar_command, ) # run htexepub to obtain book.epub. #", "with figures. outdir = ctx.actions.declare_directory(\"{}.eqn\".format(name)) html_file = ctx.actions.declare_file(\"{}.html\".format(name)) ctx.actions.run_shell( progress_message", "dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ EbookInfo(figures=figures+deps, markdowns=[]),", "_dot_png_impl(ctx): return _generalized_graphviz_rule_impl(ctx, \"dot\") dot_png = rule(implementation = _dot_png_impl, attrs", "file to compile\", ), \"deps\": attr.label_list( doc = \"The dependencies,", "files\", ), \"deps\": attr.label_list( doc = \"The file to compile\",", "asymptote file into png\", ) def _copy_file_to_workdir_renamed(ctx, src): src_copy =", "runfiles=runfiles, ), ] markdown_lib = rule( implementation = _markdown_lib_impl, doc", "[captured_output], command = tar_command, ) dir_reference = epub_file script =", "rule( implementation = _markdown_lib_impl, doc = \"Declares a set of", "for figuring out # the reference directories (build root and", "-Tpng -o \"{out_file}\" \"{in_file}\" \"\"\".format( cmd=cmd, out_file=out_file.path, in_file=in_file.path, script=script_cmd), )", "license. Please see the LICENSE # file at the root", "ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles),", "cfg=\"host\"), }, doc = \"Transform an asymptote file into png\",", "# Use this for quick local runs. #CONTAINER = \"ebook-buildenv:local\"", "{} to {}\".format(src.short_path, src_copy.short_path), outputs = [src_copy], inputs = [src],", "ctx.label.name # This is duplicated in _ebook_pdf_impl. # steps #", ")) runfiles = ctx.runfiles(files=[ebook_pdf]) for dep in ctx.attr.deps: runfiles =", "neato_png = rule(implementation = _neato_png_impl, attrs = { \"srcs\": attr.label_list(", "of markdown files\", attrs = { \"srcs\": attr.label_list( allow_files =", "\"Transform a graphviz dot file into png using neato\", )", "= _ebook_kindle_impl, attrs = { \"deps\": attr.label_list( doc = \"All", "def _neato_png_impl(ctx): return _generalized_graphviz_rule_impl(ctx, \"neato\") neato_png = rule(implementation = _neato_png_impl,", "), \"title_yaml\": attr.label( allow_files = True, doc = \"The title.yaml", "drawtiming_png = rule(implementation = _drawtiming_png_impl, attrs = { \"srcs\": attr.label_list(", "= outputs[2] captured_output = ctx.actions.declare_file( \"{}.untar-out\".format(ctx.label.name)) # untar the equation", "= \"(cd {base} ; tar xvf {archive}) > {output}\".format( base=equation_outdir_tar.dirname,", "to a file used for figuring out # the reference", "[] figures = [] for dep in ctx.attr.deps: provider =", "attrs = { \"srcs\": attr.label_list( allow_files = [\".md\"], doc =", "# script path and its reference directory. # # Params:", "def _ebook_pdf_impl(ctx): name = ctx.label.name # steps # run htex", "= _copy_file_to_workdir(ctx, epub_metadata) title_yaml = ctx.attr.title_yaml.files.to_list()[0] title_yaml = _copy_file_to_workdir(ctx, title_yaml)", "name = ctx.label.name script_cmd = _script_cmd(script.path, epub_file.path) ctx.actions.run_shell( progress_message =", "{html_file} \\ \"\"\".format( script=script_cmd, epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path), ebook_epub=_strip_reference_dir(dir_reference, ebook_epub.path), html_file=_strip_reference_dir(dir_reference, html_file.path),", "), \"metadata_xml\": attr.label( allow_files = True, doc = \"The epub-metadata.xml", "ctx.attr.deps: provider = dep[EbookInfo] markdowns += provider.markdowns figures += provider.figures", "outputs = [out_file], tools = [asycc], command = \"\"\"\\ {script}", "\"The file to compile\", providers = [EbookInfo], ), }, )", "htex_file=_strip_reference_dir(dir_reference, htex_file.path), ) ) outdir_tar = ctx.actions.declare_file(\"{}.tar\".format(outdir.basename)) tar_command = \"(cd", "= \"Building EPUB for: {}\".format(name), inputs = inputs, tools =", "DefaultInfo. (ebook_info, default_info) = _ebook_epub_impl(ctx) # There can be only", "runfiles=runfiles, ) ] ebook_epub = rule( implementation = _ebook_epub_impl, attrs", "reference directories (build root and repo root). def _script_cmd(script_path, dir_reference):", "doc = \"Generate an ebook in the Kindle's MOBI format\"", "file in markdowns] markdowns_paths_stripped = _strip_reference_dir_from_files(dir_reference, markdowns) script = ctx.executable._script", "dep[EbookInfo] markdowns += provider.markdowns figures += provider.figures dir_reference = markdowns[0]", "tools = [script], command = \"\"\"\\ {script} \\ pandoc -s", "ctx.runfiles(files=[mobi_file]) for dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [", "= _strip_reference_dir_from_files(dir_reference, markdowns) script = ctx.executable._script script_cmd = _script_cmd(script.path, dir_reference.path)", "default=\"//build:docker_run\", executable=True, cfg=\"host\"), }, doc = \"Transform a timing diagram", "markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] asymptote = rule(implementation = _asymptote_impl, attrs", "def _dot_png_impl(ctx): return _generalized_graphviz_rule_impl(ctx, \"dot\") dot_png = rule(implementation = _dot_png_impl,", "outdir, outdir_tar]), runfiles=runfiles, ) ] ebook_epub = rule( implementation =", "environments for: {}\".format(name), inputs = markdowns, outputs = [htex_file], tools", "path): return path.replace(reference_dir.dirname+\"/\", \"\") def _strip_reference_dir_from_files(reference_dir, files): return [ _strip_reference_dir(reference_dir,", "_copy_file_to_workdir_renamed(ctx, epub_metadata) ebook_epub = ctx.actions.declare_file(\"{}.epub\".format(name)) inputs = [epub_metadata, title_yaml, html_file,", "# # Params: # script_path: (string) The full path to", "= [EbookInfo], ), \"title_yaml\": attr.label( allow_files = True, doc =", "\"(cd {base} ; tar xvf {archive}) > {output}\".format( base=equation_outdir_tar.dirname, archive=equation_outdir_tar.basename,", "= _copy_file_to_workdir_renamed(ctx, epub_metadata) title_yaml = ctx.attr.title_yaml.files.to_list()[0] title_yaml = _copy_file_to_workdir_renamed(ctx, epub_metadata)", "= \"The file to compile\", ), \"deps\": attr.label_list( doc =", "file into png using drawtiming\", ) def _generalized_graphviz_rule_impl(ctx, cmd): docker_run", "runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ DefaultInfo( files=depset([ebook_pdf]), runfiles=runfiles, ) ]", "be allowed\", ), \"output\": attr.output(doc=\"The generated file\"), \"_script\": attr.label( default=\"//build:docker_run\",", "This is duplicated in _ebook_pdf_impl. # steps # run htex", "runfiles.merge(dep[DefaultInfo].data_runfiles) return [ dep[EbookInfo], DefaultInfo( files=depset([ebook_epub, outdir, outdir_tar]), runfiles=runfiles, )", "quick local runs. #CONTAINER = \"ebook-buildenv:local\" EbookInfo = provider(fields=[\"figures\", \"markdowns\"])", "generated file\"), \"_script\": attr.label( default=\"//build:docker_run\", executable=True, cfg=\"host\"), }, doc =", "\"{in_file}\" \"\"\".format( out_file=out_file.path, in_file=in_file.path, script=script_cmd), ) deps = [] for", "epub_metadata = _copy_file_to_workdir(ctx, epub_metadata) title_yaml = ctx.attr.title_yaml.files.to_list()[0] title_yaml = _copy_file_to_workdir(ctx,", "{archive} {dir})\".format( base=outdir_tar.dirname, archive=outdir_tar.basename, dir=outdir.basename) ctx.actions.run_shell( progress_message = \"Archiving equations:", "\\ pandoc --epub-metadata={epub_metadata} \\ --mathml -o {ebook_pdf} {markdowns} \\ \"\"\".format(", ")) runfiles = ctx.runfiles(files=[mobi_file]) for dep in ctx.attr.deps: runfiles =", "ctx.runfiles(files=[ebook_epub]) for dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [", "root of the repository. # Build rules for building ebooks.", "allow_files = True, doc = \"The epub-metadata.xml file to use", "\"metadata_xml\": attr.label( allow_files = True, doc = \"The epub-metadata.xml file", ") ] ebook_kindle = rule( implementation = _ebook_kindle_impl, attrs =", "# Fixed up paths -- relative to the directory dir_reference,", "book.epub. # This is gonna be fun! epub_metadata = ctx.attr.metadata_xml.files.to_list()[0]", "allowed\", ), \"output\": attr.output(doc=\"The generated file\"), \"_script\": attr.label( default=\"//build:docker_run\", executable=True,", "Copyright (C) 2020 Google Inc. # # This file has", "default=\"//build:docker_run\", executable=True, cfg=\"host\"), }, doc = \"Generate an ebook in", "outputs = [captured_output], command = tar_command, ) dir_reference = epub_file", "ctx.attr.metadata_xml.files.to_list()[0] epub_metadata = _copy_file_to_workdir_renamed(ctx, epub_metadata) title_yaml = ctx.attr.title_yaml.files.to_list()[0] title_yaml =", "\"deps\": attr.label_list( doc = \"The dependencies, any targets should be", "= _asymptote_impl, attrs = { \"srcs\": attr.label_list( allow_files = [\".asy\"],", ") # run htexepub to obtain book.epub. # This is", "[script], outputs = [mobi_file], command = \"\"\"\\ {script} --cd-to-dir-reference \\", "# This is the container CONTAINER = \"filipfilmar/ebook-buildenv:1.1\" # Use", "tar_command = \"(cd {base} ; tar xvf {archive}) > {output}\".format(", "container CONTAINER = \"filipfilmar/ebook-buildenv:1.1\" # Use this for quick local", "file into png using dot\", ) def _asymptote_impl(ctx): asycc =", "ebook_epub = rule( implementation = _ebook_epub_impl, attrs = { \"deps\":", "\"Copying {}\".format(src.short_path), outputs = [src_copy], inputs = [src], command=\"cp {}", "markdowns += provider.markdowns figures += provider.figures dir_reference = markdowns[0] #", "file at the root of the repository. # Build rules", "in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ DefaultInfo( files=depset([ebook_pdf]), runfiles=runfiles,", "\\ \"\"\".format( script=script_cmd, outdir=_strip_reference_dir(dir_reference, outdir.path), htex_file=_strip_reference_dir(dir_reference, htex_file.path), ) ) outdir_tar", "ctx.actions.declare_file(in_file.basename + \".png\") figures += [out_file] script_cmd = _script_cmd(docker_run.path, in_file.path)", "\"{out_file}\" \"{in_file}\" \"\"\".format( cmd=cmd, out_file=out_file.path, in_file=in_file.path, script=script_cmd), ) deps =", "only one such file outputs = default_info.files.to_list() epub_file = outputs[0]", "second is DefaultInfo. (ebook_info, default_info) = _ebook_epub_impl(ctx) # There can", "\"\"\".format( script=script_cmd, epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path), ebook_pdf=_strip_reference_dir(dir_reference, ebook_pdf.path), markdowns=\" \".join(markdowns_paths), )) runfiles", "script = ctx.executable._script script_cmd = _script_cmd(script.path, markdowns_paths[0]) ctx.actions.run_shell( progress_message =", "relative to the directory dir_reference, not the # directory where", "= ctx.actions.declare_file(\"{}_{}\".format(ctx.label.name, src.short_path)) ctx.actions.run_shell( progress_message = \"Copying {} to {}\".format(src.short_path,", "DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] asymptote = rule(implementation = _asymptote_impl, attrs =", "a graphviz dot file into png using neato\", ) def", "= ctx.actions.declare_file(in_file.basename + \".png\") figures += [out_file] script_cmd = _script_cmd(asycc.path,", "figuring out # the reference directories (build root and repo", "= \"Transform a timing diagram file into png using drawtiming\",", "}, ) def _ebook_epub_impl(ctx): name = ctx.label.name # This is", "+ \".png\") figures += [out_file] script_cmd = _script_cmd(asycc.path, in_file.path) ctx.actions.run_shell(", "html_file], tools = [script], command = \"\"\"\\ {script} --cd-to-dir-reference \\", "epub_metadata = ctx.attr.metadata_xml.files.to_list()[0] epub_metadata = _copy_file_to_workdir(ctx, epub_metadata) title_yaml = ctx.attr.title_yaml.files.to_list()[0]", "figures += provider.figures dir_reference = markdowns[0] # Fixed up paths", "for file in markdowns] markdowns_paths_stripped = _strip_reference_dir_from_files(dir_reference, markdowns) script =", "\"{}.untar-out\".format(ctx.label.name)) # untar the equation dir # Maybe this is", "= _ebook_epub_impl(ctx) # There can be only one such file", "_generalized_graphviz_rule_impl(ctx, \"neato\") neato_png = rule(implementation = _neato_png_impl, attrs = {", "] markdown_lib = rule( implementation = _markdown_lib_impl, doc = \"Declares", "attr.label_list( allow_files = [\".asy\"], doc = \"The file to compile\",", "markdowns = [] figures = [] for dep in ctx.attr.deps:", "xvf {archive}) > {output}\".format( base=equation_outdir_tar.dirname, archive=equation_outdir_tar.basename, output=captured_output.path) ctx.actions.run_shell( progress_message =", "ctx.executable._script figures = [] for target in ctx.attr.srcs: for src", "ctx.actions.declare_file(\"{}.epub\".format(name)) inputs = [epub_metadata, title_yaml, html_file, outdir, outdir_tar] + markdowns", "asy -render 5 -f png -o \"{out_file}\" \"{in_file}\" \"\"\".format( out_file=out_file.path,", "= ctx.runfiles(files=[mobi_file]) for dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return", "\\ ebook-convert {epub_file} {mobi_file} \\ \"\"\".format( script=script_cmd, epub_file=_strip_reference_dir(dir_reference, epub_file.path), mobi_file=_strip_reference_dir(dir_reference,", "src_copy = ctx.actions.declare_file(src.basename) ctx.actions.run_shell( progress_message = \"Copying {}\".format(src.short_path), outputs =", "has been licensed under Apache 2.0 license. Please see the", ") outdir_tar = ctx.actions.declare_file(\"{}.tar\".format(outdir.basename)) tar_command = \"(cd {base} ; tar", "timing diagram file into png using drawtiming\", ) def _generalized_graphviz_rule_impl(ctx,", "executable=True, cfg=\"host\"), }, doc = \"Transform an asymptote file into", "[epub_file, equation_outdir], tools = [script], outputs = [mobi_file], command =", "figures = [] for target in ctx.attr.deps: provider = target[EbookInfo]", "-f png -o \"{out_file}\" \"{in_file}\" \"\"\".format( out_file=out_file.path, in_file=in_file.path, script=script_cmd), )", ") ] ebook_epub = rule( implementation = _ebook_epub_impl, attrs =", "title_yaml = ctx.attr.title_yaml.files.to_list()[0] title_yaml = _copy_file_to_workdir(ctx, title_yaml) ebook_pdf = ctx.actions.declare_file(\"{}.pdf\".format(name))", "run htexepub to obtain book.epub. # This is gonna be", "progress_message = \"timing diagram to PNG with {1}: {0}\".format(in_file.short_path, cmd),", "this is not needed. tar_command = \"(cd {base} ; tar", "= outputs[0] equation_outdir = outputs[1] equation_outdir_tar = outputs[2] captured_output =", "= [script], outputs = [ebook_epub], command = \"\"\"\\ {script} --cd-to-dir-reference", "this book work.\", providers = [EbookInfo], ), \"title_yaml\": attr.label( allow_files", "in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps),", "markdowns_paths_stripped = _strip_reference_dir_from_files(dir_reference, markdowns) script = ctx.executable._script script_cmd = _script_cmd(script.path,", "title_yaml = _copy_file_to_workdir_renamed(ctx, epub_metadata) ebook_epub = ctx.actions.declare_file(\"{}.epub\".format(name)) inputs = [epub_metadata,", "{}\".format(equation_outdir_tar.short_path), inputs = [equation_outdir_tar], outputs = [captured_output], command = tar_command,", "= \"ASY to PNG: {0}\".format(in_file.short_path), inputs = [in_file], outputs =", "attr.label_list( doc = \"The dependencies, any targets should be allowed\",", "\\ --mathml -o {ebook_pdf} {markdowns} \\ \"\"\".format( script=script_cmd, epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path),", "src_copy def _markdown_lib_impl(ctx): markdowns = [] for target in ctx.attr.srcs:", "attr.label_list( doc = \"The file to compile\", providers = [EbookInfo],", "_ebook_pdf_impl(ctx): name = ctx.label.name # steps # run htex on", "ebook_kindle = rule( implementation = _ebook_kindle_impl, attrs = { \"deps\":", "outputs[2] captured_output = ctx.actions.declare_file( \"{}.untar-out\".format(ctx.label.name)) # untar the equation dir", "= \"Generate an ebook in the Kindle's MOBI format\" )", "\\ \"\"\".format( script=script_cmd, epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path), ebook_epub=_strip_reference_dir(dir_reference, ebook_epub.path), html_file=_strip_reference_dir(dir_reference, html_file.path), ))", "= figures) return [ EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] drawtiming_png", "outdir = ctx.actions.declare_directory(\"{}.eqn\".format(name)) html_file = ctx.actions.declare_file(\"{}.html\".format(name)) ctx.actions.run_shell( progress_message = \"Extracting", "= ctx.runfiles(files = figures) return [ EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles),", "figures += provider.figures dir_reference = markdowns[0] htex_file = ctx.actions.declare_file(\"{}.htex\".format(name)) markdowns_paths", "deps += ebook_provider.figures runfiles = ctx.runfiles(files=figures+deps) for dep in ctx.attr.deps:", "= [script], outputs = [ebook_pdf], command = \"\"\"\\ {script} --cd-to-dir-reference", "on all *md, gives book.htex markdowns = [] figures =", "The full path to the script to invoke # dir_reference:", "+ markdowns + figures ctx.actions.run_shell( progress_message = \"Building EPUB for:", "markdowns[0] htex_file = ctx.actions.declare_file(\"{}.htex\".format(name)) markdowns_paths = [file.path for file in", "= [script], command = \"\"\"\\ {script} --cd-to-dir-reference \\ gladtex -r", "script to invoke # dir_reference: (string) The path to a", "attr.output(doc=\"The generated file\"), \"_script\": attr.label( default=\"//build:docker_run\", executable=True, cfg=\"host\"), }, doc", "command based on the # script path and its reference", "for dep in ctx.attr.deps: provider = dep[EbookInfo] markdowns += provider.markdowns", "inputs = [in_file], outputs = [out_file], tools = [asycc], command", "work.\", providers = [EbookInfo], ), \"title_yaml\": attr.label( allow_files = True,", "= ctx.actions.declare_file(src.basename) ctx.actions.run_shell( progress_message = \"Copying {}\".format(src.short_path), outputs = [src_copy],", "= ctx.attr.title_yaml.files.to_list()[0] title_yaml = _copy_file_to_workdir(ctx, title_yaml) ebook_pdf = ctx.actions.declare_file(\"{}.pdf\".format(name)) inputs", "paths -- relative to the directory dir_reference, not the #", "the repository. # Build rules for building ebooks. # This", "= \"\"\"\\ {script} --cd-to-dir-reference \\ gladtex -r 200 -d {outdir}", "ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ DefaultInfo( files=depset([mobi_file, captured_output]), runfiles=runfiles,", "{} {}\".format(src.path, src_copy.path), ) return src_copy def _copy_file_to_workdir(ctx, src): src_copy", "+ figures ctx.actions.run_shell( progress_message = \"Building EPUB for: {}\".format(name), inputs", "neato\", ) def _dot_png_impl(ctx): return _generalized_graphviz_rule_impl(ctx, \"dot\") dot_png = rule(implementation", "ctx.runfiles(files=[ebook_pdf]) for dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [", ") # run gladtex on the resulting htex to obtain", "{base} ; tar cf {archive} {dir})\".format( base=outdir_tar.dirname, archive=outdir_tar.basename, dir=outdir.basename) ctx.actions.run_shell(", "directory. # # Params: # script_path: (string) The full path", "\"\"\".format( script=script_cmd, epub_file=_strip_reference_dir(dir_reference, epub_file.path), mobi_file=_strip_reference_dir(dir_reference, mobi_file.path), )) runfiles = ctx.runfiles(files=[mobi_file])", "_dot_png_impl, attrs = { \"srcs\": attr.label_list( allow_files = [\".dot\"], doc", "implementation = _ebook_pdf_impl, attrs = { \"deps\": attr.label_list( doc =", "[outdir, html_file], tools = [script], command = \"\"\"\\ {script} --cd-to-dir-reference", "provider = dep[EbookInfo] markdowns += provider.markdowns figures += provider.figures dir_reference", "\"srcs\": attr.label_list( allow_files = [\".dot\"], doc = \"The file to", "title_yaml, html_file, outdir, outdir_tar] + markdowns + figures ctx.actions.run_shell( progress_message", "{cmd} --output \"{out_file}\" \"{in_file}\" \"\"\".format( cmd=cmd, out_file=out_file.path, in_file=in_file.path, script=script_cmd), )", "(provider.figures or []) markdowns += (provider.markdowns or []) runfiles =", "target.files.to_list(): markdowns += [_copy_file_to_workdir(ctx, src)] figures = [] for target", "markdowns_paths = [file.path for file in markdowns] markdowns_paths_stripped = _strip_reference_dir_from_files(dir_reference,", "= [in_file], outputs = [out_file], tools = [docker_run], command =", "outdir_tar = ctx.actions.declare_file(\"{}.tar\".format(outdir.basename)) tar_command = \"(cd {base} ; tar cf", "\"Extracting equations for: {}\".format(name), inputs = [htex_file], outputs = [outdir,", "{}\".format(name), inputs = inputs, tools = [script], outputs = [ebook_pdf],", "files): return [ _strip_reference_dir(reference_dir, file.path) for file in files] def", "use for this book\", ), \"metadata_xml\": attr.label( allow_files = True,", "{markdowns} \\ \"\"\".format( script=script_cmd, epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path), ebook_pdf=_strip_reference_dir(dir_reference, ebook_pdf.path), markdowns=\" \".join(markdowns_paths),", "= runfiles.merge(dep[DefaultInfo].data_runfiles) return [ dep[EbookInfo], DefaultInfo( files=depset([ebook_epub, outdir, outdir_tar]), runfiles=runfiles,", "), \"deps\": attr.label_list( doc = \"The dependencies, any targets should", "directory with figures. outdir = ctx.actions.declare_directory(\"{}.eqn\".format(name)) html_file = ctx.actions.declare_file(\"{}.html\".format(name)) ctx.actions.run_shell(", "cmd): docker_run = ctx.executable._script figures = [] for target in", "ebook_provider: continue deps += ebook_provider.figures runfiles = ctx.runfiles(files = figures)", "using drawtiming\", ) def _generalized_graphviz_rule_impl(ctx, cmd): docker_run = ctx.executable._script figures", "+= [_copy_file_to_workdir(ctx, src)] figures = [] for target in ctx.attr.deps:", "tar xvf {archive}) > {output}\".format( base=equation_outdir_tar.dirname, archive=equation_outdir_tar.basename, output=captured_output.path) ctx.actions.run_shell( progress_message", "implementation = _ebook_kindle_impl, attrs = { \"deps\": attr.label_list( doc =", "-d {outdir} {htex_file} \\ \"\"\".format( script=script_cmd, outdir=_strip_reference_dir(dir_reference, outdir.path), htex_file=_strip_reference_dir(dir_reference, htex_file.path),", "= outputs[1] equation_outdir_tar = outputs[2] captured_output = ctx.actions.declare_file( \"{}.untar-out\".format(ctx.label.name)) #", "return _generalized_graphviz_rule_impl(ctx, \"dot\") dot_png = rule(implementation = _dot_png_impl, attrs =", "reference directory. # # Params: # script_path: (string) The full", "= { \"srcs\": attr.label_list( allow_files = [\".asy\"], doc = \"The", "command = \"\"\"\\ {script} \\ {cmd} -Tpng -o \"{out_file}\" \"{in_file}\"", "_copy_file_to_workdir_renamed(ctx, epub_metadata) title_yaml = ctx.attr.title_yaml.files.to_list()[0] title_yaml = _copy_file_to_workdir_renamed(ctx, epub_metadata) ebook_epub", "{output}\".format( base=equation_outdir_tar.dirname, archive=equation_outdir_tar.basename, output=captured_output.path) ctx.actions.run_shell( progress_message = \"Unarchiving equations: {}\".format(equation_outdir_tar.short_path),", "script_cmd = _script_cmd(docker_run.path, in_file.path) ctx.actions.run_shell( progress_message = \"graphviz to PNG", "attr.label_list( allow_files = [\".dot\"], doc = \"The file to compile\",", "+= provider.figures dir_reference = markdowns[0] # Fixed up paths --", "}, doc = \"Transform a graphviz dot file into png", "the # script path and its reference directory. # #", "\"drawtiming\" docker_run = ctx.executable._script figures = [] for target in", "This is gonna be fun! epub_metadata = ctx.attr.metadata_xml.files.to_list()[0] epub_metadata =", "[out_file] script_cmd = _script_cmd(docker_run.path, in_file.path) ctx.actions.run_shell( progress_message = \"timing diagram", "script=script_cmd, epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path), ebook_pdf=_strip_reference_dir(dir_reference, ebook_pdf.path), markdowns=\" \".join(markdowns_paths), )) runfiles =", "ctx.attr.metadata_xml.files.to_list()[0] epub_metadata = _copy_file_to_workdir(ctx, epub_metadata) title_yaml = ctx.attr.title_yaml.files.to_list()[0] title_yaml =", "(string) The path to a file used for figuring out", "-r 200 -d {outdir} {htex_file} \\ \"\"\".format( script=script_cmd, outdir=_strip_reference_dir(dir_reference, outdir.path),", "ctx.actions.run_shell( progress_message = \"Building PDF for: {}\".format(name), inputs = inputs,", "markdowns] markdowns_paths_stripped = _strip_reference_dir_from_files(dir_reference, markdowns) script = ctx.executable._script script_cmd =", "src in target.files.to_list(): markdowns += [_copy_file_to_workdir(ctx, src)] figures = []", "markdowns += provider.markdowns figures += provider.figures dir_reference = markdowns[0] htex_file", "figures += (provider.figures or []) markdowns += (provider.markdowns or [])", "dependencies, any targets should be allowed\", ), \"output\": attr.output(doc=\"The generated", "[EbookInfo], ), \"title_yaml\": attr.label( allow_files = True, doc = \"The", "\"output\": attr.output(doc=\"The generated file\"), \"_script\": attr.label( default=\"//build:docker_run\", executable=True, cfg=\"host\"), },", "progress_message = \"Copying {}\".format(src.short_path), outputs = [src_copy], inputs = [src],", "markdowns += (provider.markdowns or []) runfiles = ctx.runfiles(files=figures+markdowns) for dep", "\"\"\"\\ {script} \\ {cmd} -Tpng -o \"{out_file}\" \"{in_file}\" \"\"\".format( cmd=cmd,", "\\ --container={container} \\ --dir-reference={dir_reference}\"\"\".format( script=script_path, container=CONTAINER, dir_reference=dir_reference, ) def _drawtiming_png_impl(ctx):", "PDF for: {}\".format(name), inputs = inputs, tools = [script], outputs", "{cmd} -Tpng -o \"{out_file}\" \"{in_file}\" \"\"\".format( cmd=cmd, out_file=out_file.path, in_file=in_file.path, script=script_cmd),", "runfiles = ctx.runfiles(files = figures) return [ EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps),", "= { \"deps\": attr.label_list( doc = \"All the targets you", "attrs = { \"srcs\": attr.label_list( allow_files = [\".t\"], doc =", "provider.markdowns figures += provider.figures dir_reference = markdowns[0] # Fixed up", "[ EbookInfo(figures=figures, markdowns=markdowns), DefaultInfo( files=depset(figures+markdowns), runfiles=runfiles, ), ] markdown_lib =", "can be only one such file outputs = default_info.files.to_list() epub_file", "= rule(implementation = _asymptote_impl, attrs = { \"srcs\": attr.label_list( allow_files", "= True, doc = \"The epub-metadata.xml file to use for", "target in ctx.attr.deps: ebook_provider = target[EbookInfo] if not ebook_provider: continue", "}, doc = \"Transform a timing diagram file into png", "= \"Unarchiving equations: {}\".format(equation_outdir_tar.short_path), inputs = [equation_outdir_tar], outputs = [captured_output],", "= [outdir, html_file], tools = [script], command = \"\"\"\\ {script}", "provider.markdowns figures += provider.figures dir_reference = markdowns[0] htex_file = ctx.actions.declare_file(\"{}.htex\".format(name))", "repo root). def _script_cmd(script_path, dir_reference): return \"\"\"\\ {script} \\ --container={container}", "building ebooks. # This is the container CONTAINER = \"filipfilmar/ebook-buildenv:1.1\"", "the root of the repository. # Build rules for building", "\"srcs\": attr.label_list( allow_files = [\".asy\"], doc = \"The file to", "# dir_reference: (string) The path to a file used for", "progress_message = \"Building equation environments for: {}\".format(name), inputs = markdowns,", "\"\") def _strip_reference_dir_from_files(reference_dir, files): return [ _strip_reference_dir(reference_dir, file.path) for file", "\"_script\": attr.label( default=\"//build:docker_run\", executable=True, cfg=\"host\"), }, doc = \"Transform a", "cfg=\"host\"), }, doc = \"Transform a timing diagram file into", "outputs = [htex_file], tools = [script], command = \"\"\"\\ {script}", "= [ebook_epub], command = \"\"\"\\ {script} --cd-to-dir-reference \\ pandoc --epub-metadata={epub_metadata}", "ctx.actions.run_shell( progress_message = \"Copying {}\".format(src.short_path), outputs = [src_copy], inputs =", ")) runfiles = ctx.runfiles(files=[ebook_epub]) for dep in ctx.attr.deps: runfiles =", "_script_cmd(script.path, markdowns_paths[0]) ctx.actions.run_shell( progress_message = \"Building equation environments for: {}\".format(name),", "dir_reference, not the # directory where the build happens! This", "[] for dep in ctx.attr.deps: provider = dep[EbookInfo] markdowns +=", ") def _dot_png_impl(ctx): return _generalized_graphviz_rule_impl(ctx, \"dot\") dot_png = rule(implementation =", "+= provider.markdowns figures += provider.figures dir_reference = markdowns[0] # Fixed", "\"(cd {base} ; tar cf {archive} {dir})\".format( base=outdir_tar.dirname, archive=outdir_tar.basename, dir=outdir.basename)", "ctx.actions.run_shell( progress_message = \"timing diagram to PNG with {1}: {0}\".format(in_file.short_path,", "\"\"\"\\ {script} --cd-to-dir-reference \\ pandoc --epub-metadata={epub_metadata} \\ -f html -t", "= [mobi_file], command = \"\"\"\\ {script} --cd-to-dir-reference \\ ebook-convert {epub_file}", "runfiles.merge(dep[DefaultInfo].data_runfiles) return [ EbookInfo(figures=figures, markdowns=markdowns), DefaultInfo( files=depset(figures+markdowns), runfiles=runfiles, ), ]", "\\ \"\"\".format( script=script_cmd, target=htex_file.path, sources=\" \".join(markdowns_paths)) ) # run gladtex", "[ebook_pdf], command = \"\"\"\\ {script} --cd-to-dir-reference \\ pandoc --epub-metadata={epub_metadata} \\", "target in ctx.attr.srcs: for src in target.files.to_list(): markdowns += [_copy_file_to_workdir(ctx,", "--dir-reference={dir_reference}\"\"\".format( script=script_path, container=CONTAINER, dir_reference=dir_reference, ) def _drawtiming_png_impl(ctx): cmd = \"drawtiming\"", "outputs = [ebook_pdf], command = \"\"\"\\ {script} --cd-to-dir-reference \\ pandoc", "return [ _strip_reference_dir(reference_dir, file.path) for file in files] def _ebook_pdf_impl(ctx):", "[\".t\"], doc = \"The file to compile\", ), \"deps\": attr.label_list(", "inputs = [in_file], outputs = [out_file], tools = [docker_run], command", "\\ asy -render 5 -f png -o \"{out_file}\" \"{in_file}\" \"\"\".format(", "src in target.files.to_list(): in_file = src out_file = ctx.actions.declare_file(in_file.basename +", "container=CONTAINER, dir_reference=dir_reference, ) def _drawtiming_png_impl(ctx): cmd = \"drawtiming\" docker_run =", "{0}\".format(in_file.short_path, cmd), inputs = [in_file], outputs = [out_file], tools =", "outputs[1] equation_outdir_tar = outputs[2] captured_output = ctx.actions.declare_file( \"{}.untar-out\".format(ctx.label.name)) # untar", "progress_message = \"graphviz to PNG with {1}: {0}\".format(in_file.short_path, cmd), inputs", "script = ctx.executable._script name = ctx.label.name script_cmd = _script_cmd(script.path, epub_file.path)", "attr.label_list( allow_files = [\".md\"], doc = \"The markdown source files\",", "inputs = [epub_metadata, title_yaml] + markdowns + figures ctx.actions.run_shell( progress_message", "file\"), \"_script\": attr.label( default=\"//build:docker_run\", executable=True, cfg=\"host\"), }, doc = \"Transform", "= ctx.executable._script script_cmd = _script_cmd(script.path, markdowns_paths[0]) ctx.actions.run_shell( progress_message = \"Building", "= { \"srcs\": attr.label_list( allow_files = [\".t\"], doc = \"The", "a set of markdown files\", attrs = { \"srcs\": attr.label_list(", "ctx.actions.declare_file(\"{}.htex\".format(name)) markdowns_paths = [file.path for file in markdowns] markdowns_paths_stripped =", "in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ EbookInfo(figures=figures, markdowns=markdowns), DefaultInfo(", "html -t epub3 -o {ebook_epub} {html_file} \\ \"\"\".format( script=script_cmd, epub_metadata=_strip_reference_dir(dir_reference,", "control # figure inclusion. markdowns_paths = _strip_reference_dir_from_files(dir_reference, markdowns) script =", "[ EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] drawtiming_png = rule(implementation =", "return path.replace(reference_dir.dirname+\"/\", \"\") def _strip_reference_dir_from_files(reference_dir, files): return [ _strip_reference_dir(reference_dir, file.path)", "script_cmd = _script_cmd(docker_run.path, in_file.path) ctx.actions.run_shell( progress_message = \"timing diagram to", "{}\".format(outdir_tar.short_path), inputs = [outdir], outputs = [outdir_tar], command = tar_command,", "rule( implementation = _ebook_epub_impl, attrs = { \"deps\": attr.label_list( doc", "drawtiming\", ) def _generalized_graphviz_rule_impl(ctx, cmd): docker_run = ctx.executable._script figures =", "executable=True, cfg=\"host\"), }, doc = \"Generate an ebook in the", "\"\"\".format( script=script_cmd, outdir=_strip_reference_dir(dir_reference, outdir.path), htex_file=_strip_reference_dir(dir_reference, htex_file.path), ) ) outdir_tar =", "ctx.attr.deps: provider = target[EbookInfo] figures += (provider.figures or []) markdowns", "= \"Archiving equations: {}\".format(outdir_tar.short_path), inputs = [outdir], outputs = [outdir_tar],", "for target in ctx.attr.srcs: for src in target.files.to_list(): in_file =", "in ctx.attr.srcs: for src in target.files.to_list(): in_file = src out_file", "output=captured_output.path) ctx.actions.run_shell( progress_message = \"Unarchiving equations: {}\".format(equation_outdir_tar.short_path), inputs = [equation_outdir_tar],", "command = tar_command, ) # run htexepub to obtain book.epub.", "= \"filipfilmar/ebook-buildenv:1.1\" # Use this for quick local runs. #CONTAINER", "_ebook_epub_impl(ctx) # There can be only one such file outputs", "Params: # script_path: (string) The full path to the script", "{mobi_file} \\ \"\"\".format( script=script_cmd, epub_file=_strip_reference_dir(dir_reference, epub_file.path), mobi_file=_strip_reference_dir(dir_reference, mobi_file.path), )) runfiles", "not ebook_provider: continue deps += ebook_provider.figures runfiles = ctx.runfiles(files =", "= \"ebook-buildenv:local\" EbookInfo = provider(fields=[\"figures\", \"markdowns\"]) # Returns the docker_run", "in target.files.to_list(): markdowns += [_copy_file_to_workdir(ctx, src)] figures = [] for", "format\" ) def _ebook_kindle_impl(ctx): mobi_file = ctx.actions.declare_file(\"{}.mobi\".format(ctx.label.name)) # First provider", "= _script_cmd(docker_run.path, in_file.path) ctx.actions.run_shell( progress_message = \"timing diagram to PNG", "# This is gonna be fun! epub_metadata = ctx.attr.metadata_xml.files.to_list()[0] epub_metadata", "return [ DefaultInfo( files=depset([mobi_file, captured_output]), runfiles=runfiles, ) ] ebook_kindle =", "+ figures ctx.actions.run_shell( progress_message = \"Building PDF for: {}\".format(name), inputs", "\"\"\"\\ {script} --cd-to-dir-reference \\ ebook-convert {epub_file} {mobi_file} \\ \"\"\".format( script=script_cmd,", "\"title_yaml\": attr.label( allow_files = True, doc = \"The title.yaml file", "DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] def _neato_png_impl(ctx): return _generalized_graphviz_rule_impl(ctx, \"neato\") neato_png =", "out_file = ctx.actions.declare_file(in_file.basename + \".png\") figures += [out_file] script_cmd =", "# There can be only one such file outputs =", "provider.figures dir_reference = markdowns[0] htex_file = ctx.actions.declare_file(\"{}.htex\".format(name)) markdowns_paths = [file.path", "in ctx.attr.deps: ebook_provider = target[EbookInfo] if not ebook_provider: continue deps", "Build rules for building ebooks. # This is the container", "\"\"\"\\ {script} \\ {cmd} --output \"{out_file}\" \"{in_file}\" \"\"\".format( cmd=cmd, out_file=out_file.path,", "= \"\"\"\\ {script} \\ pandoc -s --gladtex -o {target} {sources}", "equations: {}\".format(outdir_tar.short_path), inputs = [outdir], outputs = [outdir_tar], command =", "return [ EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] def _neato_png_impl(ctx): return", "= _neato_png_impl, attrs = { \"srcs\": attr.label_list( allow_files = [\".dot\"],", ") def _strip_reference_dir(reference_dir, path): return path.replace(reference_dir.dirname+\"/\", \"\") def _strip_reference_dir_from_files(reference_dir, files):", "-- relative to the directory dir_reference, not the # directory", "}, doc = \"Generate an ebook in PDF format\" )", "src_copy def _copy_file_to_workdir(ctx, src): src_copy = ctx.actions.declare_file(src.basename) ctx.actions.run_shell( progress_message =", "attr.label( default=\"//build:docker_run\", executable=True, cfg=\"host\"), }, doc = \"Generate an ebook", "equation_outdir = outputs[1] equation_outdir_tar = outputs[2] captured_output = ctx.actions.declare_file( \"{}.untar-out\".format(ctx.label.name))", "attr.label( allow_files = True, doc = \"The epub-metadata.xml file to", "and output directory with figures. outdir = ctx.actions.declare_directory(\"{}.eqn\".format(name)) html_file =", "= [epub_metadata, title_yaml] + markdowns + figures ctx.actions.run_shell( progress_message =", "= _dot_png_impl, attrs = { \"srcs\": attr.label_list( allow_files = [\".dot\"],", "_ebook_pdf_impl, attrs = { \"deps\": attr.label_list( doc = \"All the", "ebook_provider = target[EbookInfo] if not ebook_provider: continue deps += ebook_provider.figures", "] def _neato_png_impl(ctx): return _generalized_graphviz_rule_impl(ctx, \"neato\") neato_png = rule(implementation =", "a graphviz dot file into png using dot\", ) def", "= [\".asy\"], doc = \"The file to compile\", ), \"deps\":", "target in ctx.attr.srcs: for src in target.files.to_list(): in_file = src", "return [ EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] asymptote = rule(implementation", "outputs = [ebook_epub], command = \"\"\"\\ {script} --cd-to-dir-reference \\ pandoc", "the script to invoke # dir_reference: (string) The path to", "cf {archive} {dir})\".format( base=outdir_tar.dirname, archive=outdir_tar.basename, dir=outdir.basename) ctx.actions.run_shell( progress_message = \"Archiving", "; tar xvf {archive}) > {output}\".format( base=equation_outdir_tar.dirname, archive=equation_outdir_tar.basename, output=captured_output.path) ctx.actions.run_shell(", "= ctx.actions.declare_file(in_file.basename + \".png\") figures += [out_file] script_cmd = _script_cmd(docker_run.path,", "figures ctx.actions.run_shell( progress_message = \"Building PDF for: {}\".format(name), inputs =", "htex on all *md, gives book.htex markdowns = [] figures", "into png using drawtiming\", ) def _generalized_graphviz_rule_impl(ctx, cmd): docker_run =", "target.files.to_list(): in_file = src out_file = ctx.actions.declare_file(in_file.basename + \".png\") figures", "epub_metadata) title_yaml = ctx.attr.title_yaml.files.to_list()[0] title_yaml = _copy_file_to_workdir_renamed(ctx, epub_metadata) ebook_epub =", "ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ EbookInfo(figures=figures, markdowns=markdowns), DefaultInfo( files=depset(figures+markdowns),", "Google Inc. # # This file has been licensed under", "executable=True, cfg=\"host\"), }, doc = \"Generate an ebook in PDF", "_ebook_epub_impl, attrs = { \"deps\": attr.label_list( doc = \"All the", "_copy_file_to_workdir(ctx, title_yaml) ebook_pdf = ctx.actions.declare_file(\"{}.pdf\".format(name)) inputs = [epub_metadata, title_yaml] +", "runfiles=runfiles), ] drawtiming_png = rule(implementation = _drawtiming_png_impl, attrs = {", "_strip_reference_dir(reference_dir, path): return path.replace(reference_dir.dirname+\"/\", \"\") def _strip_reference_dir_from_files(reference_dir, files): return [", "= [] for target in ctx.attr.deps: ebook_provider = target[EbookInfo] if", "graphviz dot file into png using neato\", ) def _dot_png_impl(ctx):", "{}\".format(name), inputs = markdowns, outputs = [htex_file], tools = [script],", "rule(implementation = _dot_png_impl, attrs = { \"srcs\": attr.label_list( allow_files =", "--epub-metadata={epub_metadata} \\ --mathml -o {ebook_pdf} {markdowns} \\ \"\"\".format( script=script_cmd, epub_metadata=_strip_reference_dir(dir_reference,", "# the reference directories (build root and repo root). def", "The path to a file used for figuring out #", "figures ctx.actions.run_shell( progress_message = \"Building EPUB for: {}\".format(name), inputs =", "ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ DefaultInfo( files=depset([ebook_pdf]), runfiles=runfiles, )", "_markdown_lib_impl, doc = \"Declares a set of markdown files\", attrs", "-o \"{out_file}\" \"{in_file}\" \"\"\".format( out_file=out_file.path, in_file=in_file.path, script=script_cmd), ) deps =", "ctx.attr.title_yaml.files.to_list()[0] title_yaml = _copy_file_to_workdir(ctx, title_yaml) ebook_pdf = ctx.actions.declare_file(\"{}.pdf\".format(name)) inputs =", "title_yaml] + markdowns + figures ctx.actions.run_shell( progress_message = \"Building PDF", "htex to obtain html and output directory with figures. outdir", "source files\", ), \"deps\": attr.label_list( doc = \"The file to", "full path to the script to invoke # dir_reference: (string)", "providers = [EbookInfo], ), }, ) def _ebook_epub_impl(ctx): name =", "= [file.path for file in markdowns] markdowns_paths_stripped = _strip_reference_dir_from_files(dir_reference, markdowns)", "for this book\", ), \"_script\": attr.label( default=\"//build:docker_run\", executable=True, cfg=\"host\"), },", "in_file.path) ctx.actions.run_shell( progress_message = \"timing diagram to PNG with {1}:", "= runfiles.merge(dep[DefaultInfo].data_runfiles) return [ DefaultInfo( files=depset([ebook_pdf]), runfiles=runfiles, ) ] ebook_pdf", "Maybe this is not needed. tar_command = \"(cd {base} ;", "asymptote = rule(implementation = _asymptote_impl, attrs = { \"srcs\": attr.label_list(", "EbookInfo, second is DefaultInfo. (ebook_info, default_info) = _ebook_epub_impl(ctx) # There", "[]) runfiles = ctx.runfiles(files=figures+markdowns) for dep in ctx.attr.deps: runfiles =", "= [] for dep in ctx.attr.deps: provider = dep[EbookInfo] markdowns", ") deps = [] for target in ctx.attr.deps: ebook_provider =", "progress_message = \"Building PDF for: {}\".format(name), inputs = inputs, tools", "command = \"\"\"\\ {script} --cd-to-dir-reference \\ pandoc --epub-metadata={epub_metadata} \\ --mathml", "2.0 license. Please see the LICENSE # file at the", "), \"deps\": attr.label_list( doc = \"The file to compile\", providers", "book.htex markdowns = [] figures = [] for dep in", ") def _asymptote_impl(ctx): asycc = ctx.executable._script figures = [] for", "executable=True, cfg=\"host\"), }, doc = \"Transform a graphviz dot file", "= runfiles.merge(dep[DefaultInfo].data_runfiles) return [ EbookInfo(figures=figures, markdowns=markdowns), DefaultInfo( files=depset(figures+markdowns), runfiles=runfiles, ),", "= rule(implementation = _neato_png_impl, attrs = { \"srcs\": attr.label_list( allow_files", "archive=equation_outdir_tar.basename, output=captured_output.path) ctx.actions.run_shell( progress_message = \"Unarchiving equations: {}\".format(equation_outdir_tar.short_path), inputs =", "= [ebook_pdf], command = \"\"\"\\ {script} --cd-to-dir-reference \\ pandoc --epub-metadata={epub_metadata}", "\"filipfilmar/ebook-buildenv:1.1\" # Use this for quick local runs. #CONTAINER =", "= \"All the targets you need to make this book", "dir_reference = markdowns[0] # Fixed up paths -- relative to", "command = \"\"\"\\ {script} --cd-to-dir-reference \\ ebook-convert {epub_file} {mobi_file} \\", "[out_file] script_cmd = _script_cmd(asycc.path, in_file.path) ctx.actions.run_shell( progress_message = \"ASY to", "= \"graphviz to PNG with {1}: {0}\".format(in_file.short_path, cmd), inputs =", "progress_message = \"Archiving equations: {}\".format(outdir_tar.short_path), inputs = [outdir], outputs =", "markdowns) script = ctx.executable._script script_cmd = _script_cmd(script.path, markdowns_paths[0]) ctx.actions.run_shell( progress_message", "inputs, tools = [script], outputs = [ebook_pdf], command = \"\"\"\\", "DefaultInfo( files=depset(figures+markdowns), runfiles=runfiles, ), ] markdown_lib = rule( implementation =", "epub_metadata.path), ebook_epub=_strip_reference_dir(dir_reference, ebook_epub.path), html_file=_strip_reference_dir(dir_reference, html_file.path), )) runfiles = ctx.runfiles(files=[ebook_epub]) for", "ctx.attr.title_yaml.files.to_list()[0] title_yaml = _copy_file_to_workdir_renamed(ctx, epub_metadata) ebook_epub = ctx.actions.declare_file(\"{}.epub\".format(name)) inputs =", "rule(implementation = _drawtiming_png_impl, attrs = { \"srcs\": attr.label_list( allow_files =", "\"Building EPUB for: {}\".format(name), inputs = inputs, tools = [script],", "\"Building equation environments for: {}\".format(name), inputs = markdowns, outputs =", "--epub-metadata={epub_metadata} \\ -f html -t epub3 -o {ebook_epub} {html_file} \\", "= [epub_metadata, title_yaml, html_file, outdir, outdir_tar] + markdowns + figures", "to the script to invoke # dir_reference: (string) The path", "files=depset([ebook_epub, outdir, outdir_tar]), runfiles=runfiles, ) ] ebook_epub = rule( implementation", "True, doc = \"The epub-metadata.xml file to use for this", "markdowns[0] # Fixed up paths -- relative to the directory", "with {1}: {0}\".format(in_file.short_path, cmd), inputs = [in_file], outputs = [out_file],", "build happens! This is needed because we can not control", "\"neato\") neato_png = rule(implementation = _neato_png_impl, attrs = { \"srcs\":", "{epub_file} {mobi_file} \\ \"\"\".format( script=script_cmd, epub_file=_strip_reference_dir(dir_reference, epub_file.path), mobi_file=_strip_reference_dir(dir_reference, mobi_file.path), ))", "\"graphviz to PNG with {1}: {0}\".format(in_file.short_path, cmd), inputs = [in_file],", "= ctx.attr.metadata_xml.files.to_list()[0] epub_metadata = _copy_file_to_workdir_renamed(ctx, epub_metadata) title_yaml = ctx.attr.title_yaml.files.to_list()[0] title_yaml", "_ebook_kindle_impl(ctx): mobi_file = ctx.actions.declare_file(\"{}.mobi\".format(ctx.label.name)) # First provider is EbookInfo, second", "ctx.runfiles(files=figures+markdowns) for dep in ctx.attr.deps: runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [", "5 -f png -o \"{out_file}\" \"{in_file}\" \"\"\".format( out_file=out_file.path, in_file=in_file.path, script=script_cmd),", ") return src_copy def _markdown_lib_impl(ctx): markdowns = [] for target", "= ctx.actions.declare_file( \"{}.untar-out\".format(ctx.label.name)) # untar the equation dir # Maybe", "in_file.path) ctx.actions.run_shell( progress_message = \"graphviz to PNG with {1}: {0}\".format(in_file.short_path,", "figures) return [ EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] def _neato_png_impl(ctx):", "[src], command=\"cp {} {}\".format(src.path, src_copy.path), ) return src_copy def _copy_file_to_workdir(ctx,", "CONTAINER = \"filipfilmar/ebook-buildenv:1.1\" # Use this for quick local runs.", "ctx.attr.srcs: for src in target.files.to_list(): in_file = src out_file =", "tools = [script], outputs = [ebook_epub], command = \"\"\"\\ {script}", "{sources} \\ \"\"\".format( script=script_cmd, target=htex_file.path, sources=\" \".join(markdowns_paths)) ) # run", "file in files] def _ebook_pdf_impl(ctx): name = ctx.label.name # steps", "rule(implementation = _neato_png_impl, attrs = { \"srcs\": attr.label_list( allow_files =", "def _ebook_kindle_impl(ctx): mobi_file = ctx.actions.declare_file(\"{}.mobi\".format(ctx.label.name)) # First provider is EbookInfo,", "{}\".format(name), inputs = [htex_file], outputs = [outdir, html_file], tools =", "_neato_png_impl, attrs = { \"srcs\": attr.label_list( allow_files = [\".dot\"], doc", "outdir=_strip_reference_dir(dir_reference, outdir.path), htex_file=_strip_reference_dir(dir_reference, htex_file.path), ) ) outdir_tar = ctx.actions.declare_file(\"{}.tar\".format(outdir.basename)) tar_command", "_script_cmd(docker_run.path, in_file.path) ctx.actions.run_shell( progress_message = \"graphviz to PNG with {1}:", "= target[EbookInfo] figures += (provider.figures or []) markdowns += (provider.markdowns", "in_file = src out_file = ctx.actions.declare_file(in_file.basename + \".png\") figures +=", "= tar_command, ) dir_reference = epub_file script = ctx.executable._script name", "{script} \\ {cmd} -Tpng -o \"{out_file}\" \"{in_file}\" \"\"\".format( cmd=cmd, out_file=out_file.path,", "ebook_pdf = rule( implementation = _ebook_pdf_impl, attrs = { \"deps\":", "\"srcs\": attr.label_list( allow_files = [\".md\"], doc = \"The markdown source", "book work.\", providers = [EbookInfo], ), \"title_yaml\": attr.label( allow_files =", "# untar the equation dir # Maybe this is not", "dir_reference=dir_reference, ) def _drawtiming_png_impl(ctx): cmd = \"drawtiming\" docker_run = ctx.executable._script", "= [equation_outdir_tar], outputs = [captured_output], command = tar_command, ) dir_reference", "ctx.actions.declare_file(src.basename) ctx.actions.run_shell( progress_message = \"Copying {}\".format(src.short_path), outputs = [src_copy], inputs", "= src out_file = ctx.actions.declare_file(in_file.basename + \".png\") figures += [out_file]", "= [captured_output], command = tar_command, ) dir_reference = epub_file script", "runs. #CONTAINER = \"ebook-buildenv:local\" EbookInfo = provider(fields=[\"figures\", \"markdowns\"]) # Returns", "dep in ctx.attr.deps: provider = dep[EbookInfo] markdowns += provider.markdowns figures", "ctx.actions.declare_file(in_file.basename + \".png\") figures += [out_file] script_cmd = _script_cmd(asycc.path, in_file.path)", "allow_files = [\".md\"], doc = \"The markdown source files\", ),", "on the # script path and its reference directory. #", "using dot\", ) def _asymptote_impl(ctx): asycc = ctx.executable._script figures =", "default=\"//build:docker_run\", executable=True, cfg=\"host\"), }, doc = \"Transform an asymptote file", "progress_message = \"Extracting equations for: {}\".format(name), inputs = [htex_file], outputs", "be fun! epub_metadata = ctx.attr.metadata_xml.files.to_list()[0] epub_metadata = _copy_file_to_workdir(ctx, epub_metadata) title_yaml", "# Copyright (C) 2020 Google Inc. # # This file", "\".join(markdowns_paths)) ) # run gladtex on the resulting htex to", "--cd-to-dir-reference \\ pandoc --epub-metadata={epub_metadata} \\ --mathml -o {ebook_pdf} {markdowns} \\", "= [src_copy], inputs = [src], command=\"cp {} {}\".format(src.path, src_copy.path), )", "and repo root). def _script_cmd(script_path, dir_reference): return \"\"\"\\ {script} \\", "_copy_file_to_workdir(ctx, epub_metadata) title_yaml = ctx.attr.title_yaml.files.to_list()[0] title_yaml = _copy_file_to_workdir(ctx, title_yaml) ebook_pdf", "docker_run = ctx.executable._script figures = [] for target in ctx.attr.srcs:", "ctx.attr.deps: ebook_provider = target[EbookInfo] if not ebook_provider: continue deps +=", "--cd-to-dir-reference \\ gladtex -r 200 -d {outdir} {htex_file} \\ \"\"\".format(", "script=script_path, container=CONTAINER, dir_reference=dir_reference, ) def _drawtiming_png_impl(ctx): cmd = \"drawtiming\" docker_run", "runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ DefaultInfo( files=depset([mobi_file, captured_output]), runfiles=runfiles, )", "= \"The markdown source files\", ), \"deps\": attr.label_list( doc =", "gonna be fun! epub_metadata = ctx.attr.metadata_xml.files.to_list()[0] epub_metadata = _copy_file_to_workdir(ctx, epub_metadata)", "= [outdir_tar], command = tar_command, ) # run htexepub to", "local runs. #CONTAINER = \"ebook-buildenv:local\" EbookInfo = provider(fields=[\"figures\", \"markdowns\"]) #", "= [EbookInfo], ), }, ) def _ebook_epub_impl(ctx): name = ctx.label.name", "script=script_cmd, outdir=_strip_reference_dir(dir_reference, outdir.path), htex_file=_strip_reference_dir(dir_reference, htex_file.path), ) ) outdir_tar = ctx.actions.declare_file(\"{}.tar\".format(outdir.basename))", "figures. outdir = ctx.actions.declare_directory(\"{}.eqn\".format(name)) html_file = ctx.actions.declare_file(\"{}.html\".format(name)) ctx.actions.run_shell( progress_message =", "def _copy_file_to_workdir_renamed(ctx, src): src_copy = ctx.actions.declare_file(\"{}_{}\".format(ctx.label.name, src.short_path)) ctx.actions.run_shell( progress_message =", "equation_outdir], tools = [script], outputs = [mobi_file], command = \"\"\"\\", "inputs = [htex_file], outputs = [outdir, html_file], tools = [script],", "2020 Google Inc. # # This file has been licensed", "\".join(markdowns_paths), )) runfiles = ctx.runfiles(files=[ebook_pdf]) for dep in ctx.attr.deps: runfiles", "or []) markdowns += (provider.markdowns or []) runfiles = ctx.runfiles(files=figures+markdowns)", "title_yaml = _copy_file_to_workdir(ctx, title_yaml) ebook_pdf = ctx.actions.declare_file(\"{}.pdf\".format(name)) inputs = [epub_metadata,", "ctx.actions.run_shell( progress_message = \"Extracting equations for: {}\".format(name), inputs = [htex_file],", "), ] markdown_lib = rule( implementation = _markdown_lib_impl, doc =", "markdowns) script = ctx.executable._script script_cmd = _script_cmd(script.path, dir_reference.path) # run", "png using neato\", ) def _dot_png_impl(ctx): return _generalized_graphviz_rule_impl(ctx, \"dot\") dot_png", "Apache 2.0 license. Please see the LICENSE # file at", "markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] drawtiming_png = rule(implementation = _drawtiming_png_impl, attrs", "obtain book.epub. # This is gonna be fun! epub_metadata =", "markdown files\", attrs = { \"srcs\": attr.label_list( allow_files = [\".md\"],", "attr.label_list( doc = \"All the targets you need to make", "ebook in PDF format\" ) def _ebook_kindle_impl(ctx): mobi_file = ctx.actions.declare_file(\"{}.mobi\".format(ctx.label.name))", "for: {}\".format(name), inputs = inputs, tools = [script], outputs =", "There can be only one such file outputs = default_info.files.to_list()", "\"Transform a graphviz dot file into png using dot\", )", "[ EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] asymptote = rule(implementation =", "pandoc --epub-metadata={epub_metadata} \\ -f html -t epub3 -o {ebook_epub} {html_file}", "deps += ebook_provider.figures runfiles = ctx.runfiles(files = figures) return [", "ctx.actions.declare_file(\"{}_{}\".format(ctx.label.name, src.short_path)) ctx.actions.run_shell( progress_message = \"Copying {} to {}\".format(src.short_path, src_copy.short_path),", "Fixed up paths -- relative to the directory dir_reference, not", "DefaultInfo( files=depset([ebook_epub, outdir, outdir_tar]), runfiles=runfiles, ) ] ebook_epub = rule(", "dir_reference): return \"\"\"\\ {script} \\ --container={container} \\ --dir-reference={dir_reference}\"\"\".format( script=script_path, container=CONTAINER,", "{script} --cd-to-dir-reference \\ ebook-convert {epub_file} {mobi_file} \\ \"\"\".format( script=script_cmd, epub_file=_strip_reference_dir(dir_reference,", "target in ctx.attr.deps: provider = target[EbookInfo] figures += (provider.figures or", "inputs = inputs, tools = [script], outputs = [ebook_pdf], command", "inputs = [epub_metadata, title_yaml, html_file, outdir, outdir_tar] + markdowns +", "in _ebook_pdf_impl. # steps # run htex on all *md,", "doc = \"The file to compile\", providers = [EbookInfo], ),", "runfiles=runfiles), ] def _neato_png_impl(ctx): return _generalized_graphviz_rule_impl(ctx, \"neato\") neato_png = rule(implementation", "(build root and repo root). def _script_cmd(script_path, dir_reference): return \"\"\"\\", "epub_metadata = ctx.attr.metadata_xml.files.to_list()[0] epub_metadata = _copy_file_to_workdir_renamed(ctx, epub_metadata) title_yaml = ctx.attr.title_yaml.files.to_list()[0]", "output directory with figures. outdir = ctx.actions.declare_directory(\"{}.eqn\".format(name)) html_file = ctx.actions.declare_file(\"{}.html\".format(name))", "equation dir # Maybe this is not needed. tar_command =", "ebook_epub=_strip_reference_dir(dir_reference, ebook_epub.path), html_file=_strip_reference_dir(dir_reference, html_file.path), )) runfiles = ctx.runfiles(files=[ebook_epub]) for dep", "the LICENSE # file at the root of the repository.", ") dir_reference = epub_file script = ctx.executable._script name = ctx.label.name", "# Build rules for building ebooks. # This is the", "path.replace(reference_dir.dirname+\"/\", \"\") def _strip_reference_dir_from_files(reference_dir, files): return [ _strip_reference_dir(reference_dir, file.path) for", "attrs = { \"srcs\": attr.label_list( allow_files = [\".dot\"], doc =", "= \"\"\"\\ {script} \\ asy -render 5 -f png -o", "command = \"\"\"\\ {script} --cd-to-dir-reference \\ pandoc --epub-metadata={epub_metadata} \\ -f", "{dir})\".format( base=outdir_tar.dirname, archive=outdir_tar.basename, dir=outdir.basename) ctx.actions.run_shell( progress_message = \"Archiving equations: {}\".format(outdir_tar.short_path),", "not control # figure inclusion. markdowns_paths = _strip_reference_dir_from_files(dir_reference, markdowns) script", "= \"Building PDF for: {}\".format(name), inputs = inputs, tools =", "png using drawtiming\", ) def _generalized_graphviz_rule_impl(ctx, cmd): docker_run = ctx.executable._script", "--container={container} \\ --dir-reference={dir_reference}\"\"\".format( script=script_path, container=CONTAINER, dir_reference=dir_reference, ) def _drawtiming_png_impl(ctx): cmd", "equations: {}\".format(equation_outdir_tar.short_path), inputs = [equation_outdir_tar], outputs = [captured_output], command =", "providers = [EbookInfo], ), \"title_yaml\": attr.label( allow_files = True, doc", "script invocation command based on the # script path and", "provider is EbookInfo, second is DefaultInfo. (ebook_info, default_info) = _ebook_epub_impl(ctx)", "def _asymptote_impl(ctx): asycc = ctx.executable._script figures = [] for target", "figure inclusion. markdowns_paths = _strip_reference_dir_from_files(dir_reference, markdowns) script = ctx.executable._script script_cmd", "PDF format\" ) def _ebook_kindle_impl(ctx): mobi_file = ctx.actions.declare_file(\"{}.mobi\".format(ctx.label.name)) # First", "<gh_stars>1-10 # Copyright (C) 2020 Google Inc. # # This", "executable=True, cfg=\"host\"), }, doc = \"Generate an ebook in EPUB", "script_path: (string) The full path to the script to invoke", "\"Unarchiving equations: {}\".format(equation_outdir_tar.short_path), inputs = [equation_outdir_tar], outputs = [captured_output], command", "; tar cf {archive} {dir})\".format( base=outdir_tar.dirname, archive=outdir_tar.basename, dir=outdir.basename) ctx.actions.run_shell( progress_message", "target[EbookInfo] figures += (provider.figures or []) markdowns += (provider.markdowns or", "inputs = inputs, tools = [script], outputs = [ebook_epub], command", "you need to make this book work.\", providers = [EbookInfo],", "used for figuring out # the reference directories (build root", "\"\"\"\\ {script} \\ asy -render 5 -f png -o \"{out_file}\"", "= rule( implementation = _ebook_pdf_impl, attrs = { \"deps\": attr.label_list(", "cmd=cmd, out_file=out_file.path, in_file=in_file.path, script=script_cmd), ) deps = [] for target", "png using dot\", ) def _asymptote_impl(ctx): asycc = ctx.executable._script figures", ") ) outdir_tar = ctx.actions.declare_file(\"{}.tar\".format(outdir.basename)) tar_command = \"(cd {base} ;", "= \"The dependencies, any targets should be allowed\", ), \"output\":", "base=outdir_tar.dirname, archive=outdir_tar.basename, dir=outdir.basename) ctx.actions.run_shell( progress_message = \"Archiving equations: {}\".format(outdir_tar.short_path), inputs", "runfiles.merge(dep[DefaultInfo].data_runfiles) return [ EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] asymptote =", "_script_cmd(script.path, dir_reference.path) # run htexepub to obtain book.epub. # This", "root and repo root). def _script_cmd(script_path, dir_reference): return \"\"\"\\ {script}", "src)] figures = [] for target in ctx.attr.deps: provider =", "= markdowns[0] # Fixed up paths -- relative to the", "MOBI for: {}\".format(name), inputs = [epub_file, equation_outdir], tools = [script],", "ctx.attr.srcs: for src in target.files.to_list(): markdowns += [_copy_file_to_workdir(ctx, src)] figures", "[outdir_tar], command = tar_command, ) # run htexepub to obtain", "return [ DefaultInfo( files=depset([ebook_pdf]), runfiles=runfiles, ) ] ebook_pdf = rule(", "{ebook_epub} {html_file} \\ \"\"\".format( script=script_cmd, epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path), ebook_epub=_strip_reference_dir(dir_reference, ebook_epub.path), html_file=_strip_reference_dir(dir_reference,", "in markdowns] markdowns_paths_stripped = _strip_reference_dir_from_files(dir_reference, markdowns) script = ctx.executable._script script_cmd", "def _strip_reference_dir_from_files(reference_dir, files): return [ _strip_reference_dir(reference_dir, file.path) for file in", "This is needed because we can not control # figure", "its reference directory. # # Params: # script_path: (string) The", "-o {target} {sources} \\ \"\"\".format( script=script_cmd, target=htex_file.path, sources=\" \".join(markdowns_paths)) )", "DefaultInfo( files=depset([ebook_pdf]), runfiles=runfiles, ) ] ebook_pdf = rule( implementation =", "to PNG: {0}\".format(in_file.short_path), inputs = [in_file], outputs = [out_file], tools", "runfiles=runfiles, ) ] ebook_kindle = rule( implementation = _ebook_kindle_impl, attrs", "licensed under Apache 2.0 license. Please see the LICENSE #", "= \"Transform an asymptote file into png\", ) def _copy_file_to_workdir_renamed(ctx,", "is needed because we can not control # figure inclusion.", "= rule( implementation = _ebook_kindle_impl, attrs = { \"deps\": attr.label_list(", "root). def _script_cmd(script_path, dir_reference): return \"\"\"\\ {script} \\ --container={container} \\", "cmd = \"drawtiming\" docker_run = ctx.executable._script figures = [] for", "command=\"cp {} {}\".format(src.path, src_copy.path), ) return src_copy def _markdown_lib_impl(ctx): markdowns", "to the directory dir_reference, not the # directory where the", "EPUB for: {}\".format(name), inputs = inputs, tools = [script], outputs", "one such file outputs = default_info.files.to_list() epub_file = outputs[0] equation_outdir", "[in_file], outputs = [out_file], tools = [docker_run], command = \"\"\"\\", "use for this book\", ), \"_script\": attr.label( default=\"//build:docker_run\", executable=True, cfg=\"host\"),", "ctx.actions.declare_file(\"{}.tar\".format(outdir.basename)) tar_command = \"(cd {base} ; tar cf {archive} {dir})\".format(", ") def _generalized_graphviz_rule_impl(ctx, cmd): docker_run = ctx.executable._script figures = []", "# file at the root of the repository. # Build", "htexepub to obtain book.epub. # This is gonna be fun!", "file to use for this book\", ), \"metadata_xml\": attr.label( allow_files", "{ \"srcs\": attr.label_list( allow_files = [\".dot\"], doc = \"The file", "needed because we can not control # figure inclusion. markdowns_paths", "see the LICENSE # file at the root of the", ") def _ebook_epub_impl(ctx): name = ctx.label.name # This is duplicated", "= \"The title.yaml file to use for this book\", ),", "# script_path: (string) The full path to the script to", "{script} --cd-to-dir-reference \\ gladtex -r 200 -d {outdir} {htex_file} \\", "cfg=\"host\"), }, doc = \"Generate an ebook in PDF format\"", "ebook_pdf=_strip_reference_dir(dir_reference, ebook_pdf.path), markdowns=\" \".join(markdowns_paths), )) runfiles = ctx.runfiles(files=[ebook_pdf]) for dep", "{ \"srcs\": attr.label_list( allow_files = [\".asy\"], doc = \"The file", "+= [out_file] script_cmd = _script_cmd(docker_run.path, in_file.path) ctx.actions.run_shell( progress_message = \"graphviz", "html and output directory with figures. outdir = ctx.actions.declare_directory(\"{}.eqn\".format(name)) html_file", "command = \"\"\"\\ {script} \\ {cmd} --output \"{out_file}\" \"{in_file}\" \"\"\".format(", "epub_file.path) ctx.actions.run_shell( progress_message = \"Building MOBI for: {}\".format(name), inputs =", "{script} \\ --container={container} \\ --dir-reference={dir_reference}\"\"\".format( script=script_path, container=CONTAINER, dir_reference=dir_reference, ) def", "\".png\") figures += [out_file] script_cmd = _script_cmd(asycc.path, in_file.path) ctx.actions.run_shell( progress_message", "book\", ), \"metadata_xml\": attr.label( allow_files = True, doc = \"The", "runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles) return [ dep[EbookInfo], DefaultInfo( files=depset([ebook_epub, outdir, outdir_tar]),", "figures = [] for dep in ctx.attr.deps: provider = dep[EbookInfo]", "markdowns = [] for target in ctx.attr.srcs: for src in", "_drawtiming_png_impl(ctx): cmd = \"drawtiming\" docker_run = ctx.executable._script figures = []", "for target in ctx.attr.deps: provider = target[EbookInfo] figures += (provider.figures", "this book\", ), \"_script\": attr.label( default=\"//build:docker_run\", executable=True, cfg=\"host\"), }, doc", "= [out_file], tools = [asycc], command = \"\"\"\\ {script} \\", "not ebook_provider: continue deps += ebook_provider.figures runfiles = ctx.runfiles(files=figures+deps) for", "\"\"\".format( script=script_cmd, epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path), ebook_epub=_strip_reference_dir(dir_reference, ebook_epub.path), html_file=_strip_reference_dir(dir_reference, html_file.path), )) runfiles", "figures) return [ EbookInfo(figures=figures+deps, markdowns=[]), DefaultInfo(files=depset(figures+deps), runfiles=runfiles), ] drawtiming_png =", "default_info) = _ebook_epub_impl(ctx) # There can be only one such", "gladtex -r 200 -d {outdir} {htex_file} \\ \"\"\".format( script=script_cmd, outdir=_strip_reference_dir(dir_reference,", "= _markdown_lib_impl, doc = \"Declares a set of markdown files\",", "src_copy.path), ) return src_copy def _copy_file_to_workdir(ctx, src): src_copy = ctx.actions.declare_file(src.basename)", "on the resulting htex to obtain html and output directory", "[docker_run], command = \"\"\"\\ {script} \\ {cmd} -Tpng -o \"{out_file}\"", "\\ {cmd} --output \"{out_file}\" \"{in_file}\" \"\"\".format( cmd=cmd, out_file=out_file.path, in_file=in_file.path, script=script_cmd),", "= [] for target in ctx.attr.srcs: for src in target.files.to_list():", "obtain html and output directory with figures. outdir = ctx.actions.declare_directory(\"{}.eqn\".format(name))", "command = tar_command, ) dir_reference = epub_file script = ctx.executable._script" ]